concurrentMarkSweepGeneration.cpp revision 8528:01d947f8d411
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderData.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "code/codeCache.hpp"
30#include "gc/cms/cmsCollectorPolicy.hpp"
31#include "gc/cms/cmsOopClosures.inline.hpp"
32#include "gc/cms/compactibleFreeListSpace.hpp"
33#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
34#include "gc/cms/concurrentMarkSweepThread.hpp"
35#include "gc/cms/parNewGeneration.hpp"
36#include "gc/cms/vmCMSOperations.hpp"
37#include "gc/serial/genMarkSweep.hpp"
38#include "gc/serial/tenuredGeneration.hpp"
39#include "gc/shared/adaptiveSizePolicy.hpp"
40#include "gc/shared/cardGeneration.inline.hpp"
41#include "gc/shared/cardTableRS.hpp"
42#include "gc/shared/collectedHeap.inline.hpp"
43#include "gc/shared/collectorCounters.hpp"
44#include "gc/shared/collectorPolicy.hpp"
45#include "gc/shared/gcLocker.inline.hpp"
46#include "gc/shared/gcPolicyCounters.hpp"
47#include "gc/shared/gcTimer.hpp"
48#include "gc/shared/gcTrace.hpp"
49#include "gc/shared/gcTraceTime.hpp"
50#include "gc/shared/genCollectedHeap.hpp"
51#include "gc/shared/genOopClosures.inline.hpp"
52#include "gc/shared/isGCActiveMark.hpp"
53#include "gc/shared/referencePolicy.hpp"
54#include "gc/shared/strongRootsScope.hpp"
55#include "gc/shared/taskqueue.inline.hpp"
56#include "memory/allocation.hpp"
57#include "memory/iterator.inline.hpp"
58#include "memory/padded.hpp"
59#include "memory/resourceArea.hpp"
60#include "oops/oop.inline.hpp"
61#include "prims/jvmtiExport.hpp"
62#include "runtime/atomic.inline.hpp"
63#include "runtime/globals_extension.hpp"
64#include "runtime/handles.inline.hpp"
65#include "runtime/java.hpp"
66#include "runtime/orderAccess.inline.hpp"
67#include "runtime/vmThread.hpp"
68#include "services/memoryService.hpp"
69#include "services/runtimeService.hpp"
70#include "utilities/stack.inline.hpp"
71
72// statics
73CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
74bool CMSCollector::_full_gc_requested = false;
75GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
76
77//////////////////////////////////////////////////////////////////
78// In support of CMS/VM thread synchronization
79//////////////////////////////////////////////////////////////////
80// We split use of the CGC_lock into 2 "levels".
81// The low-level locking is of the usual CGC_lock monitor. We introduce
82// a higher level "token" (hereafter "CMS token") built on top of the
83// low level monitor (hereafter "CGC lock").
84// The token-passing protocol gives priority to the VM thread. The
85// CMS-lock doesn't provide any fairness guarantees, but clients
86// should ensure that it is only held for very short, bounded
87// durations.
88//
89// When either of the CMS thread or the VM thread is involved in
90// collection operations during which it does not want the other
91// thread to interfere, it obtains the CMS token.
92//
93// If either thread tries to get the token while the other has
94// it, that thread waits. However, if the VM thread and CMS thread
95// both want the token, then the VM thread gets priority while the
96// CMS thread waits. This ensures, for instance, that the "concurrent"
97// phases of the CMS thread's work do not block out the VM thread
98// for long periods of time as the CMS thread continues to hog
99// the token. (See bug 4616232).
100//
101// The baton-passing functions are, however, controlled by the
102// flags _foregroundGCShouldWait and _foregroundGCIsActive,
103// and here the low-level CMS lock, not the high level token,
104// ensures mutual exclusion.
105//
106// Two important conditions that we have to satisfy:
107// 1. if a thread does a low-level wait on the CMS lock, then it
108//    relinquishes the CMS token if it were holding that token
109//    when it acquired the low-level CMS lock.
110// 2. any low-level notifications on the low-level lock
111//    should only be sent when a thread has relinquished the token.
112//
113// In the absence of either property, we'd have potential deadlock.
114//
115// We protect each of the CMS (concurrent and sequential) phases
116// with the CMS _token_, not the CMS _lock_.
117//
118// The only code protected by CMS lock is the token acquisition code
119// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
120// baton-passing code.
121//
122// Unfortunately, i couldn't come up with a good abstraction to factor and
123// hide the naked CGC_lock manipulation in the baton-passing code
124// further below. That's something we should try to do. Also, the proof
125// of correctness of this 2-level locking scheme is far from obvious,
126// and potentially quite slippery. We have an uneasy suspicion, for instance,
127// that there may be a theoretical possibility of delay/starvation in the
128// low-level lock/wait/notify scheme used for the baton-passing because of
129// potential interference with the priority scheme embodied in the
130// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
131// invocation further below and marked with "XXX 20011219YSR".
132// Indeed, as we note elsewhere, this may become yet more slippery
133// in the presence of multiple CMS and/or multiple VM threads. XXX
134
135class CMSTokenSync: public StackObj {
136 private:
137  bool _is_cms_thread;
138 public:
139  CMSTokenSync(bool is_cms_thread):
140    _is_cms_thread(is_cms_thread) {
141    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
142           "Incorrect argument to constructor");
143    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
144  }
145
146  ~CMSTokenSync() {
147    assert(_is_cms_thread ?
148             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
149             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
150          "Incorrect state");
151    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
152  }
153};
154
155// Convenience class that does a CMSTokenSync, and then acquires
156// upto three locks.
157class CMSTokenSyncWithLocks: public CMSTokenSync {
158 private:
159  // Note: locks are acquired in textual declaration order
160  // and released in the opposite order
161  MutexLockerEx _locker1, _locker2, _locker3;
162 public:
163  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
164                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
165    CMSTokenSync(is_cms_thread),
166    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
167    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
168    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
169  { }
170};
171
172
173//////////////////////////////////////////////////////////////////
174//  Concurrent Mark-Sweep Generation /////////////////////////////
175//////////////////////////////////////////////////////////////////
176
177NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
178
179// This struct contains per-thread things necessary to support parallel
180// young-gen collection.
181class CMSParGCThreadState: public CHeapObj<mtGC> {
182 public:
183  CFLS_LAB lab;
184  PromotionInfo promo;
185
186  // Constructor.
187  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
188    promo.setSpace(cfls);
189  }
190};
191
192ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
193     ReservedSpace rs, size_t initial_byte_size, int level,
194     CardTableRS* ct, bool use_adaptive_freelists,
195     FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
196  CardGeneration(rs, initial_byte_size, level, ct),
197  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
198  _did_compact(false)
199{
200  HeapWord* bottom = (HeapWord*) _virtual_space.low();
201  HeapWord* end    = (HeapWord*) _virtual_space.high();
202
203  _direct_allocated_words = 0;
204  NOT_PRODUCT(
205    _numObjectsPromoted = 0;
206    _numWordsPromoted = 0;
207    _numObjectsAllocated = 0;
208    _numWordsAllocated = 0;
209  )
210
211  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
212                                           use_adaptive_freelists,
213                                           dictionaryChoice);
214  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
215  _cmsSpace->_gen = this;
216
217  _gc_stats = new CMSGCStats();
218
219  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
220  // offsets match. The ability to tell free chunks from objects
221  // depends on this property.
222  debug_only(
223    FreeChunk* junk = NULL;
224    assert(UseCompressedClassPointers ||
225           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
226           "Offset of FreeChunk::_prev within FreeChunk must match"
227           "  that of OopDesc::_klass within OopDesc");
228  )
229
230  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
231  for (uint i = 0; i < ParallelGCThreads; i++) {
232    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
233  }
234
235  _incremental_collection_failed = false;
236  // The "dilatation_factor" is the expansion that can occur on
237  // account of the fact that the minimum object size in the CMS
238  // generation may be larger than that in, say, a contiguous young
239  //  generation.
240  // Ideally, in the calculation below, we'd compute the dilatation
241  // factor as: MinChunkSize/(promoting_gen's min object size)
242  // Since we do not have such a general query interface for the
243  // promoting generation, we'll instead just use the minimum
244  // object size (which today is a header's worth of space);
245  // note that all arithmetic is in units of HeapWords.
246  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
247  assert(_dilatation_factor >= 1.0, "from previous assert");
248}
249
250
251// The field "_initiating_occupancy" represents the occupancy percentage
252// at which we trigger a new collection cycle.  Unless explicitly specified
253// via CMSInitiatingOccupancyFraction (argument "io" below), it
254// is calculated by:
255//
256//   Let "f" be MinHeapFreeRatio in
257//
258//    _initiating_occupancy = 100-f +
259//                           f * (CMSTriggerRatio/100)
260//   where CMSTriggerRatio is the argument "tr" below.
261//
262// That is, if we assume the heap is at its desired maximum occupancy at the
263// end of a collection, we let CMSTriggerRatio of the (purported) free
264// space be allocated before initiating a new collection cycle.
265//
266void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
267  assert(io <= 100 && tr <= 100, "Check the arguments");
268  if (io >= 0) {
269    _initiating_occupancy = (double)io / 100.0;
270  } else {
271    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
272                             (double)(tr * MinHeapFreeRatio) / 100.0)
273                            / 100.0;
274  }
275}
276
277void ConcurrentMarkSweepGeneration::ref_processor_init() {
278  assert(collector() != NULL, "no collector");
279  collector()->ref_processor_init();
280}
281
282void CMSCollector::ref_processor_init() {
283  if (_ref_processor == NULL) {
284    // Allocate and initialize a reference processor
285    _ref_processor =
286      new ReferenceProcessor(_span,                               // span
287                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
288                             ParallelGCThreads,                   // mt processing degree
289                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
290                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
291                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
292                             &_is_alive_closure);                 // closure for liveness info
293    // Initialize the _ref_processor field of CMSGen
294    _cmsGen->set_ref_processor(_ref_processor);
295
296  }
297}
298
299AdaptiveSizePolicy* CMSCollector::size_policy() {
300  GenCollectedHeap* gch = GenCollectedHeap::heap();
301  return gch->gen_policy()->size_policy();
302}
303
304void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
305
306  const char* gen_name = "old";
307  GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
308
309  // Generation Counters - generation 1, 1 subspace
310  _gen_counters = new GenerationCounters(gen_name, 1, 1,
311      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
312
313  _space_counters = new GSpaceCounters(gen_name, 0,
314                                       _virtual_space.reserved_size(),
315                                       this, _gen_counters);
316}
317
318CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
319  _cms_gen(cms_gen)
320{
321  assert(alpha <= 100, "bad value");
322  _saved_alpha = alpha;
323
324  // Initialize the alphas to the bootstrap value of 100.
325  _gc0_alpha = _cms_alpha = 100;
326
327  _cms_begin_time.update();
328  _cms_end_time.update();
329
330  _gc0_duration = 0.0;
331  _gc0_period = 0.0;
332  _gc0_promoted = 0;
333
334  _cms_duration = 0.0;
335  _cms_period = 0.0;
336  _cms_allocated = 0;
337
338  _cms_used_at_gc0_begin = 0;
339  _cms_used_at_gc0_end = 0;
340  _allow_duty_cycle_reduction = false;
341  _valid_bits = 0;
342}
343
344double CMSStats::cms_free_adjustment_factor(size_t free) const {
345  // TBD: CR 6909490
346  return 1.0;
347}
348
349void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
350}
351
352// If promotion failure handling is on use
353// the padded average size of the promotion for each
354// young generation collection.
355double CMSStats::time_until_cms_gen_full() const {
356  size_t cms_free = _cms_gen->cmsSpace()->free();
357  GenCollectedHeap* gch = GenCollectedHeap::heap();
358  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
359                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
360  if (cms_free > expected_promotion) {
361    // Start a cms collection if there isn't enough space to promote
362    // for the next minor collection.  Use the padded average as
363    // a safety factor.
364    cms_free -= expected_promotion;
365
366    // Adjust by the safety factor.
367    double cms_free_dbl = (double)cms_free;
368    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
369    // Apply a further correction factor which tries to adjust
370    // for recent occurance of concurrent mode failures.
371    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
372    cms_free_dbl = cms_free_dbl * cms_adjustment;
373
374    if (PrintGCDetails && Verbose) {
375      gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
376        SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
377        cms_free, expected_promotion);
378      gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
379        cms_free_dbl, cms_consumption_rate() + 1.0);
380    }
381    // Add 1 in case the consumption rate goes to zero.
382    return cms_free_dbl / (cms_consumption_rate() + 1.0);
383  }
384  return 0.0;
385}
386
387// Compare the duration of the cms collection to the
388// time remaining before the cms generation is empty.
389// Note that the time from the start of the cms collection
390// to the start of the cms sweep (less than the total
391// duration of the cms collection) can be used.  This
392// has been tried and some applications experienced
393// promotion failures early in execution.  This was
394// possibly because the averages were not accurate
395// enough at the beginning.
396double CMSStats::time_until_cms_start() const {
397  // We add "gc0_period" to the "work" calculation
398  // below because this query is done (mostly) at the
399  // end of a scavenge, so we need to conservatively
400  // account for that much possible delay
401  // in the query so as to avoid concurrent mode failures
402  // due to starting the collection just a wee bit too
403  // late.
404  double work = cms_duration() + gc0_period();
405  double deadline = time_until_cms_gen_full();
406  // If a concurrent mode failure occurred recently, we want to be
407  // more conservative and halve our expected time_until_cms_gen_full()
408  if (work > deadline) {
409    if (Verbose && PrintGCDetails) {
410      gclog_or_tty->print(
411        " CMSCollector: collect because of anticipated promotion "
412        "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
413        gc0_period(), time_until_cms_gen_full());
414    }
415    return 0.0;
416  }
417  return work - deadline;
418}
419
420#ifndef PRODUCT
421void CMSStats::print_on(outputStream *st) const {
422  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
423  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
424               gc0_duration(), gc0_period(), gc0_promoted());
425  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
426            cms_duration(), cms_period(), cms_allocated());
427  st->print(",cms_since_beg=%g,cms_since_end=%g",
428            cms_time_since_begin(), cms_time_since_end());
429  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
430            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
431
432  if (valid()) {
433    st->print(",promo_rate=%g,cms_alloc_rate=%g",
434              promotion_rate(), cms_allocation_rate());
435    st->print(",cms_consumption_rate=%g,time_until_full=%g",
436              cms_consumption_rate(), time_until_cms_gen_full());
437  }
438  st->print(" ");
439}
440#endif // #ifndef PRODUCT
441
442CMSCollector::CollectorState CMSCollector::_collectorState =
443                             CMSCollector::Idling;
444bool CMSCollector::_foregroundGCIsActive = false;
445bool CMSCollector::_foregroundGCShouldWait = false;
446
447CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
448                           CardTableRS*                   ct,
449                           ConcurrentMarkSweepPolicy*     cp):
450  _cmsGen(cmsGen),
451  _ct(ct),
452  _ref_processor(NULL),    // will be set later
453  _conc_workers(NULL),     // may be set later
454  _abort_preclean(false),
455  _start_sampling(false),
456  _between_prologue_and_epilogue(false),
457  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
458  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
459                 -1 /* lock-free */, "No_lock" /* dummy */),
460  _modUnionClosurePar(&_modUnionTable),
461  // Adjust my span to cover old (cms) gen
462  _span(cmsGen->reserved()),
463  // Construct the is_alive_closure with _span & markBitMap
464  _is_alive_closure(_span, &_markBitMap),
465  _restart_addr(NULL),
466  _overflow_list(NULL),
467  _stats(cmsGen),
468  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
469                             //verify that this lock should be acquired with safepoint check.
470                             Monitor::_safepoint_check_sometimes)),
471  _eden_chunk_array(NULL),     // may be set in ctor body
472  _eden_chunk_capacity(0),     // -- ditto --
473  _eden_chunk_index(0),        // -- ditto --
474  _survivor_plab_array(NULL),  // -- ditto --
475  _survivor_chunk_array(NULL), // -- ditto --
476  _survivor_chunk_capacity(0), // -- ditto --
477  _survivor_chunk_index(0),    // -- ditto --
478  _ser_pmc_preclean_ovflw(0),
479  _ser_kac_preclean_ovflw(0),
480  _ser_pmc_remark_ovflw(0),
481  _par_pmc_remark_ovflw(0),
482  _ser_kac_ovflw(0),
483  _par_kac_ovflw(0),
484#ifndef PRODUCT
485  _num_par_pushes(0),
486#endif
487  _collection_count_start(0),
488  _verifying(false),
489  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
490  _completed_initialization(false),
491  _collector_policy(cp),
492  _should_unload_classes(CMSClassUnloadingEnabled),
493  _concurrent_cycles_since_last_unload(0),
494  _roots_scanning_options(GenCollectedHeap::SO_None),
495  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
496  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
497  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
498  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
499  _cms_start_registered(false)
500{
501  if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
502    ExplicitGCInvokesConcurrent = true;
503  }
504  // Now expand the span and allocate the collection support structures
505  // (MUT, marking bit map etc.) to cover both generations subject to
506  // collection.
507
508  // For use by dirty card to oop closures.
509  _cmsGen->cmsSpace()->set_collector(this);
510
511  // Allocate MUT and marking bit map
512  {
513    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
514    if (!_markBitMap.allocate(_span)) {
515      warning("Failed to allocate CMS Bit Map");
516      return;
517    }
518    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
519  }
520  {
521    _modUnionTable.allocate(_span);
522    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
523  }
524
525  if (!_markStack.allocate(MarkStackSize)) {
526    warning("Failed to allocate CMS Marking Stack");
527    return;
528  }
529
530  // Support for multi-threaded concurrent phases
531  if (CMSConcurrentMTEnabled) {
532    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
533      // just for now
534      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
535    }
536    if (ConcGCThreads > 1) {
537      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
538                                 ConcGCThreads, true);
539      if (_conc_workers == NULL) {
540        warning("GC/CMS: _conc_workers allocation failure: "
541              "forcing -CMSConcurrentMTEnabled");
542        CMSConcurrentMTEnabled = false;
543      } else {
544        _conc_workers->initialize_workers();
545      }
546    } else {
547      CMSConcurrentMTEnabled = false;
548    }
549  }
550  if (!CMSConcurrentMTEnabled) {
551    ConcGCThreads = 0;
552  } else {
553    // Turn off CMSCleanOnEnter optimization temporarily for
554    // the MT case where it's not fixed yet; see 6178663.
555    CMSCleanOnEnter = false;
556  }
557  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
558         "Inconsistency");
559
560  // Parallel task queues; these are shared for the
561  // concurrent and stop-world phases of CMS, but
562  // are not shared with parallel scavenge (ParNew).
563  {
564    uint i;
565    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
566
567    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
568         || ParallelRefProcEnabled)
569        && num_queues > 0) {
570      _task_queues = new OopTaskQueueSet(num_queues);
571      if (_task_queues == NULL) {
572        warning("task_queues allocation failure.");
573        return;
574      }
575      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
576      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
577      for (i = 0; i < num_queues; i++) {
578        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
579        if (q == NULL) {
580          warning("work_queue allocation failure.");
581          return;
582        }
583        _task_queues->register_queue(i, q);
584      }
585      for (i = 0; i < num_queues; i++) {
586        _task_queues->queue(i)->initialize();
587        _hash_seed[i] = 17;  // copied from ParNew
588      }
589    }
590  }
591
592  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
593
594  // Clip CMSBootstrapOccupancy between 0 and 100.
595  _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
596
597  // Now tell CMS generations the identity of their collector
598  ConcurrentMarkSweepGeneration::set_collector(this);
599
600  // Create & start a CMS thread for this CMS collector
601  _cmsThread = ConcurrentMarkSweepThread::start(this);
602  assert(cmsThread() != NULL, "CMS Thread should have been created");
603  assert(cmsThread()->collector() == this,
604         "CMS Thread should refer to this gen");
605  assert(CGC_lock != NULL, "Where's the CGC_lock?");
606
607  // Support for parallelizing young gen rescan
608  GenCollectedHeap* gch = GenCollectedHeap::heap();
609  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
610  _young_gen = (ParNewGeneration*)gch->young_gen();
611  if (gch->supports_inline_contig_alloc()) {
612    _top_addr = gch->top_addr();
613    _end_addr = gch->end_addr();
614    assert(_young_gen != NULL, "no _young_gen");
615    _eden_chunk_index = 0;
616    _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
617    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
618  }
619
620  // Support for parallelizing survivor space rescan
621  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
622    const size_t max_plab_samples =
623      ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
624
625    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
626    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
627    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
628    _survivor_chunk_capacity = 2*max_plab_samples;
629    for (uint i = 0; i < ParallelGCThreads; i++) {
630      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
631      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
632      assert(cur->end() == 0, "Should be 0");
633      assert(cur->array() == vec, "Should be vec");
634      assert(cur->capacity() == max_plab_samples, "Error");
635    }
636  }
637
638  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
639  _gc_counters = new CollectorCounters("CMS", 1);
640  _completed_initialization = true;
641  _inter_sweep_timer.start();  // start of time
642}
643
644size_t CMSCollector::plab_sample_minimum_size() {
645  // The default value of MinTLABSize is 2k, but there is
646  // no way to get the default value if the flag has been overridden.
647  return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
648}
649
650const char* ConcurrentMarkSweepGeneration::name() const {
651  return "concurrent mark-sweep generation";
652}
653void ConcurrentMarkSweepGeneration::update_counters() {
654  if (UsePerfData) {
655    _space_counters->update_all();
656    _gen_counters->update_all();
657  }
658}
659
660// this is an optimized version of update_counters(). it takes the
661// used value as a parameter rather than computing it.
662//
663void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
664  if (UsePerfData) {
665    _space_counters->update_used(used);
666    _space_counters->update_capacity();
667    _gen_counters->update_all();
668  }
669}
670
671void ConcurrentMarkSweepGeneration::print() const {
672  Generation::print();
673  cmsSpace()->print();
674}
675
676#ifndef PRODUCT
677void ConcurrentMarkSweepGeneration::print_statistics() {
678  cmsSpace()->printFLCensus(0);
679}
680#endif
681
682void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
683  GenCollectedHeap* gch = GenCollectedHeap::heap();
684  if (PrintGCDetails) {
685    if (Verbose) {
686      gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
687        level(), short_name(), s, used(), capacity());
688    } else {
689      gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
690        level(), short_name(), s, used() / K, capacity() / K);
691    }
692  }
693  if (Verbose) {
694    gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
695              gch->used(), gch->capacity());
696  } else {
697    gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
698              gch->used() / K, gch->capacity() / K);
699  }
700}
701
702size_t
703ConcurrentMarkSweepGeneration::contiguous_available() const {
704  // dld proposes an improvement in precision here. If the committed
705  // part of the space ends in a free block we should add that to
706  // uncommitted size in the calculation below. Will make this
707  // change later, staying with the approximation below for the
708  // time being. -- ysr.
709  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
710}
711
712size_t
713ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
714  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
715}
716
717size_t ConcurrentMarkSweepGeneration::max_available() const {
718  return free() + _virtual_space.uncommitted_size();
719}
720
721bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
722  size_t available = max_available();
723  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
724  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
725  if (Verbose && PrintGCDetails) {
726    gclog_or_tty->print_cr(
727      "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
728      "max_promo("SIZE_FORMAT")",
729      res? "":" not", available, res? ">=":"<",
730      av_promo, max_promotion_in_bytes);
731  }
732  return res;
733}
734
735// At a promotion failure dump information on block layout in heap
736// (cms old generation).
737void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
738  if (CMSDumpAtPromotionFailure) {
739    cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
740  }
741}
742
743void ConcurrentMarkSweepGeneration::reset_after_compaction() {
744  // Clear the promotion information.  These pointers can be adjusted
745  // along with all the other pointers into the heap but
746  // compaction is expected to be a rare event with
747  // a heap using cms so don't do it without seeing the need.
748  for (uint i = 0; i < ParallelGCThreads; i++) {
749    _par_gc_thread_states[i]->promo.reset();
750  }
751}
752
753void ConcurrentMarkSweepGeneration::compute_new_size() {
754  assert_locked_or_safepoint(Heap_lock);
755
756  // If incremental collection failed, we just want to expand
757  // to the limit.
758  if (incremental_collection_failed()) {
759    clear_incremental_collection_failed();
760    grow_to_reserved();
761    return;
762  }
763
764  // The heap has been compacted but not reset yet.
765  // Any metric such as free() or used() will be incorrect.
766
767  CardGeneration::compute_new_size();
768
769  // Reset again after a possible resizing
770  if (did_compact()) {
771    cmsSpace()->reset_after_compaction();
772  }
773}
774
775void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
776  assert_locked_or_safepoint(Heap_lock);
777
778  // If incremental collection failed, we just want to expand
779  // to the limit.
780  if (incremental_collection_failed()) {
781    clear_incremental_collection_failed();
782    grow_to_reserved();
783    return;
784  }
785
786  double free_percentage = ((double) free()) / capacity();
787  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
788  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
789
790  // compute expansion delta needed for reaching desired free percentage
791  if (free_percentage < desired_free_percentage) {
792    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
793    assert(desired_capacity >= capacity(), "invalid expansion size");
794    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
795    if (PrintGCDetails && Verbose) {
796      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
797      gclog_or_tty->print_cr("\nFrom compute_new_size: ");
798      gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
799      gclog_or_tty->print_cr("  Desired free fraction %f",
800        desired_free_percentage);
801      gclog_or_tty->print_cr("  Maximum free fraction %f",
802        maximum_free_percentage);
803      gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
804      gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
805        desired_capacity/1000);
806      int prev_level = level() - 1;
807      if (prev_level >= 0) {
808        size_t prev_size = 0;
809        GenCollectedHeap* gch = GenCollectedHeap::heap();
810        Generation* prev_gen = gch->young_gen();
811        prev_size = prev_gen->capacity();
812          gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
813                                 prev_size/1000);
814      }
815      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
816        unsafe_max_alloc_nogc()/1000);
817      gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
818        contiguous_available()/1000);
819      gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
820        expand_bytes);
821    }
822    // safe if expansion fails
823    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
824    if (PrintGCDetails && Verbose) {
825      gclog_or_tty->print_cr("  Expanded free fraction %f",
826        ((double) free()) / capacity());
827    }
828  } else {
829    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
830    assert(desired_capacity <= capacity(), "invalid expansion size");
831    size_t shrink_bytes = capacity() - desired_capacity;
832    // Don't shrink unless the delta is greater than the minimum shrink we want
833    if (shrink_bytes >= MinHeapDeltaBytes) {
834      shrink_free_list_by(shrink_bytes);
835    }
836  }
837}
838
839Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
840  return cmsSpace()->freelistLock();
841}
842
843HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
844                                                  bool   tlab) {
845  CMSSynchronousYieldRequest yr;
846  MutexLockerEx x(freelistLock(),
847                  Mutex::_no_safepoint_check_flag);
848  return have_lock_and_allocate(size, tlab);
849}
850
851HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
852                                                  bool   tlab /* ignored */) {
853  assert_lock_strong(freelistLock());
854  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
855  HeapWord* res = cmsSpace()->allocate(adjustedSize);
856  // Allocate the object live (grey) if the background collector has
857  // started marking. This is necessary because the marker may
858  // have passed this address and consequently this object will
859  // not otherwise be greyed and would be incorrectly swept up.
860  // Note that if this object contains references, the writing
861  // of those references will dirty the card containing this object
862  // allowing the object to be blackened (and its references scanned)
863  // either during a preclean phase or at the final checkpoint.
864  if (res != NULL) {
865    // We may block here with an uninitialized object with
866    // its mark-bit or P-bits not yet set. Such objects need
867    // to be safely navigable by block_start().
868    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
869    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
870    collector()->direct_allocated(res, adjustedSize);
871    _direct_allocated_words += adjustedSize;
872    // allocation counters
873    NOT_PRODUCT(
874      _numObjectsAllocated++;
875      _numWordsAllocated += (int)adjustedSize;
876    )
877  }
878  return res;
879}
880
881// In the case of direct allocation by mutators in a generation that
882// is being concurrently collected, the object must be allocated
883// live (grey) if the background collector has started marking.
884// This is necessary because the marker may
885// have passed this address and consequently this object will
886// not otherwise be greyed and would be incorrectly swept up.
887// Note that if this object contains references, the writing
888// of those references will dirty the card containing this object
889// allowing the object to be blackened (and its references scanned)
890// either during a preclean phase or at the final checkpoint.
891void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
892  assert(_markBitMap.covers(start, size), "Out of bounds");
893  if (_collectorState >= Marking) {
894    MutexLockerEx y(_markBitMap.lock(),
895                    Mutex::_no_safepoint_check_flag);
896    // [see comments preceding SweepClosure::do_blk() below for details]
897    //
898    // Can the P-bits be deleted now?  JJJ
899    //
900    // 1. need to mark the object as live so it isn't collected
901    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
902    // 3. need to mark the end of the object so marking, precleaning or sweeping
903    //    can skip over uninitialized or unparsable objects. An allocated
904    //    object is considered uninitialized for our purposes as long as
905    //    its klass word is NULL.  All old gen objects are parsable
906    //    as soon as they are initialized.)
907    _markBitMap.mark(start);          // object is live
908    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
909    _markBitMap.mark(start + size - 1);
910                                      // mark end of object
911  }
912  // check that oop looks uninitialized
913  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
914}
915
916void CMSCollector::promoted(bool par, HeapWord* start,
917                            bool is_obj_array, size_t obj_size) {
918  assert(_markBitMap.covers(start), "Out of bounds");
919  // See comment in direct_allocated() about when objects should
920  // be allocated live.
921  if (_collectorState >= Marking) {
922    // we already hold the marking bit map lock, taken in
923    // the prologue
924    if (par) {
925      _markBitMap.par_mark(start);
926    } else {
927      _markBitMap.mark(start);
928    }
929    // We don't need to mark the object as uninitialized (as
930    // in direct_allocated above) because this is being done with the
931    // world stopped and the object will be initialized by the
932    // time the marking, precleaning or sweeping get to look at it.
933    // But see the code for copying objects into the CMS generation,
934    // where we need to ensure that concurrent readers of the
935    // block offset table are able to safely navigate a block that
936    // is in flux from being free to being allocated (and in
937    // transition while being copied into) and subsequently
938    // becoming a bona-fide object when the copy/promotion is complete.
939    assert(SafepointSynchronize::is_at_safepoint(),
940           "expect promotion only at safepoints");
941
942    if (_collectorState < Sweeping) {
943      // Mark the appropriate cards in the modUnionTable, so that
944      // this object gets scanned before the sweep. If this is
945      // not done, CMS generation references in the object might
946      // not get marked.
947      // For the case of arrays, which are otherwise precisely
948      // marked, we need to dirty the entire array, not just its head.
949      if (is_obj_array) {
950        // The [par_]mark_range() method expects mr.end() below to
951        // be aligned to the granularity of a bit's representation
952        // in the heap. In the case of the MUT below, that's a
953        // card size.
954        MemRegion mr(start,
955                     (HeapWord*)round_to((intptr_t)(start + obj_size),
956                        CardTableModRefBS::card_size /* bytes */));
957        if (par) {
958          _modUnionTable.par_mark_range(mr);
959        } else {
960          _modUnionTable.mark_range(mr);
961        }
962      } else {  // not an obj array; we can just mark the head
963        if (par) {
964          _modUnionTable.par_mark(start);
965        } else {
966          _modUnionTable.mark(start);
967        }
968      }
969    }
970  }
971}
972
973oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
974  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
975  // allocate, copy and if necessary update promoinfo --
976  // delegate to underlying space.
977  assert_lock_strong(freelistLock());
978
979#ifndef PRODUCT
980  if (GenCollectedHeap::heap()->promotion_should_fail()) {
981    return NULL;
982  }
983#endif  // #ifndef PRODUCT
984
985  oop res = _cmsSpace->promote(obj, obj_size);
986  if (res == NULL) {
987    // expand and retry
988    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
989    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
990    // Since this is the old generation, we don't try to promote
991    // into a more senior generation.
992    res = _cmsSpace->promote(obj, obj_size);
993  }
994  if (res != NULL) {
995    // See comment in allocate() about when objects should
996    // be allocated live.
997    assert(obj->is_oop(), "Will dereference klass pointer below");
998    collector()->promoted(false,           // Not parallel
999                          (HeapWord*)res, obj->is_objArray(), obj_size);
1000    // promotion counters
1001    NOT_PRODUCT(
1002      _numObjectsPromoted++;
1003      _numWordsPromoted +=
1004        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1005    )
1006  }
1007  return res;
1008}
1009
1010
1011// IMPORTANT: Notes on object size recognition in CMS.
1012// ---------------------------------------------------
1013// A block of storage in the CMS generation is always in
1014// one of three states. A free block (FREE), an allocated
1015// object (OBJECT) whose size() method reports the correct size,
1016// and an intermediate state (TRANSIENT) in which its size cannot
1017// be accurately determined.
1018// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1019// -----------------------------------------------------
1020// FREE:      klass_word & 1 == 1; mark_word holds block size
1021//
1022// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1023//            obj->size() computes correct size
1024//
1025// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1026//
1027// STATE IDENTIFICATION: (64 bit+COOPS)
1028// ------------------------------------
1029// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1030//
1031// OBJECT:    klass_word installed; klass_word != 0;
1032//            obj->size() computes correct size
1033//
1034// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1035//
1036//
1037// STATE TRANSITION DIAGRAM
1038//
1039//        mut / parnew                     mut  /  parnew
1040// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1041//  ^                                                                   |
1042//  |------------------------ DEAD <------------------------------------|
1043//         sweep                            mut
1044//
1045// While a block is in TRANSIENT state its size cannot be determined
1046// so readers will either need to come back later or stall until
1047// the size can be determined. Note that for the case of direct
1048// allocation, P-bits, when available, may be used to determine the
1049// size of an object that may not yet have been initialized.
1050
1051// Things to support parallel young-gen collection.
1052oop
1053ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1054                                           oop old, markOop m,
1055                                           size_t word_sz) {
1056#ifndef PRODUCT
1057  if (GenCollectedHeap::heap()->promotion_should_fail()) {
1058    return NULL;
1059  }
1060#endif  // #ifndef PRODUCT
1061
1062  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1063  PromotionInfo* promoInfo = &ps->promo;
1064  // if we are tracking promotions, then first ensure space for
1065  // promotion (including spooling space for saving header if necessary).
1066  // then allocate and copy, then track promoted info if needed.
1067  // When tracking (see PromotionInfo::track()), the mark word may
1068  // be displaced and in this case restoration of the mark word
1069  // occurs in the (oop_since_save_marks_)iterate phase.
1070  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1071    // Out of space for allocating spooling buffers;
1072    // try expanding and allocating spooling buffers.
1073    if (!expand_and_ensure_spooling_space(promoInfo)) {
1074      return NULL;
1075    }
1076  }
1077  assert(promoInfo->has_spooling_space(), "Control point invariant");
1078  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1079  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1080  if (obj_ptr == NULL) {
1081     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1082     if (obj_ptr == NULL) {
1083       return NULL;
1084     }
1085  }
1086  oop obj = oop(obj_ptr);
1087  OrderAccess::storestore();
1088  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1089  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1090  // IMPORTANT: See note on object initialization for CMS above.
1091  // Otherwise, copy the object.  Here we must be careful to insert the
1092  // klass pointer last, since this marks the block as an allocated object.
1093  // Except with compressed oops it's the mark word.
1094  HeapWord* old_ptr = (HeapWord*)old;
1095  // Restore the mark word copied above.
1096  obj->set_mark(m);
1097  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1098  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1099  OrderAccess::storestore();
1100
1101  if (UseCompressedClassPointers) {
1102    // Copy gap missed by (aligned) header size calculation below
1103    obj->set_klass_gap(old->klass_gap());
1104  }
1105  if (word_sz > (size_t)oopDesc::header_size()) {
1106    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1107                                 obj_ptr + oopDesc::header_size(),
1108                                 word_sz - oopDesc::header_size());
1109  }
1110
1111  // Now we can track the promoted object, if necessary.  We take care
1112  // to delay the transition from uninitialized to full object
1113  // (i.e., insertion of klass pointer) until after, so that it
1114  // atomically becomes a promoted object.
1115  if (promoInfo->tracking()) {
1116    promoInfo->track((PromotedObject*)obj, old->klass());
1117  }
1118  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1119  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1120  assert(old->is_oop(), "Will use and dereference old klass ptr below");
1121
1122  // Finally, install the klass pointer (this should be volatile).
1123  OrderAccess::storestore();
1124  obj->set_klass(old->klass());
1125  // We should now be able to calculate the right size for this object
1126  assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1127
1128  collector()->promoted(true,          // parallel
1129                        obj_ptr, old->is_objArray(), word_sz);
1130
1131  NOT_PRODUCT(
1132    Atomic::inc_ptr(&_numObjectsPromoted);
1133    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1134  )
1135
1136  return obj;
1137}
1138
1139void
1140ConcurrentMarkSweepGeneration::
1141par_promote_alloc_done(int thread_num) {
1142  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1143  ps->lab.retire(thread_num);
1144}
1145
1146void
1147ConcurrentMarkSweepGeneration::
1148par_oop_since_save_marks_iterate_done(int thread_num) {
1149  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1150  ParScanWithoutBarrierClosure* dummy_cl = NULL;
1151  ps->promo.promoted_oops_iterate_nv(dummy_cl);
1152}
1153
1154bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1155                                                   size_t size,
1156                                                   bool   tlab)
1157{
1158  // We allow a STW collection only if a full
1159  // collection was requested.
1160  return full || should_allocate(size, tlab); // FIX ME !!!
1161  // This and promotion failure handling are connected at the
1162  // hip and should be fixed by untying them.
1163}
1164
1165bool CMSCollector::shouldConcurrentCollect() {
1166  if (_full_gc_requested) {
1167    if (Verbose && PrintGCDetails) {
1168      gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1169                             " gc request (or gc_locker)");
1170    }
1171    return true;
1172  }
1173
1174  FreelistLocker x(this);
1175  // ------------------------------------------------------------------
1176  // Print out lots of information which affects the initiation of
1177  // a collection.
1178  if (PrintCMSInitiationStatistics && stats().valid()) {
1179    gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1180    gclog_or_tty->stamp();
1181    gclog_or_tty->cr();
1182    stats().print_on(gclog_or_tty);
1183    gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1184      stats().time_until_cms_gen_full());
1185    gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1186    gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1187                           _cmsGen->contiguous_available());
1188    gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1189    gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1190    gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1191    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1192    gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1193    gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1194    gclog_or_tty->print_cr("metadata initialized %d",
1195      MetaspaceGC::should_concurrent_collect());
1196  }
1197  // ------------------------------------------------------------------
1198
1199  // If the estimated time to complete a cms collection (cms_duration())
1200  // is less than the estimated time remaining until the cms generation
1201  // is full, start a collection.
1202  if (!UseCMSInitiatingOccupancyOnly) {
1203    if (stats().valid()) {
1204      if (stats().time_until_cms_start() == 0.0) {
1205        return true;
1206      }
1207    } else {
1208      // We want to conservatively collect somewhat early in order
1209      // to try and "bootstrap" our CMS/promotion statistics;
1210      // this branch will not fire after the first successful CMS
1211      // collection because the stats should then be valid.
1212      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1213        if (Verbose && PrintGCDetails) {
1214          gclog_or_tty->print_cr(
1215            " CMSCollector: collect for bootstrapping statistics:"
1216            " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1217            _bootstrap_occupancy);
1218        }
1219        return true;
1220      }
1221    }
1222  }
1223
1224  // Otherwise, we start a collection cycle if
1225  // old gen want a collection cycle started. Each may use
1226  // an appropriate criterion for making this decision.
1227  // XXX We need to make sure that the gen expansion
1228  // criterion dovetails well with this. XXX NEED TO FIX THIS
1229  if (_cmsGen->should_concurrent_collect()) {
1230    if (Verbose && PrintGCDetails) {
1231      gclog_or_tty->print_cr("CMS old gen initiated");
1232    }
1233    return true;
1234  }
1235
1236  // We start a collection if we believe an incremental collection may fail;
1237  // this is not likely to be productive in practice because it's probably too
1238  // late anyway.
1239  GenCollectedHeap* gch = GenCollectedHeap::heap();
1240  assert(gch->collector_policy()->is_generation_policy(),
1241         "You may want to check the correctness of the following");
1242  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1243    if (Verbose && PrintGCDetails) {
1244      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1245    }
1246    return true;
1247  }
1248
1249  if (MetaspaceGC::should_concurrent_collect()) {
1250    if (Verbose && PrintGCDetails) {
1251      gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1252    }
1253    return true;
1254  }
1255
1256  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1257  if (CMSTriggerInterval >= 0) {
1258    if (CMSTriggerInterval == 0) {
1259      // Trigger always
1260      return true;
1261    }
1262
1263    // Check the CMS time since begin (we do not check the stats validity
1264    // as we want to be able to trigger the first CMS cycle as well)
1265    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1266      if (Verbose && PrintGCDetails) {
1267        if (stats().valid()) {
1268          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1269                                 stats().cms_time_since_begin());
1270        } else {
1271          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1272        }
1273      }
1274      return true;
1275    }
1276  }
1277
1278  return false;
1279}
1280
1281void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1282
1283// Clear _expansion_cause fields of constituent generations
1284void CMSCollector::clear_expansion_cause() {
1285  _cmsGen->clear_expansion_cause();
1286}
1287
1288// We should be conservative in starting a collection cycle.  To
1289// start too eagerly runs the risk of collecting too often in the
1290// extreme.  To collect too rarely falls back on full collections,
1291// which works, even if not optimum in terms of concurrent work.
1292// As a work around for too eagerly collecting, use the flag
1293// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1294// giving the user an easily understandable way of controlling the
1295// collections.
1296// We want to start a new collection cycle if any of the following
1297// conditions hold:
1298// . our current occupancy exceeds the configured initiating occupancy
1299//   for this generation, or
1300// . we recently needed to expand this space and have not, since that
1301//   expansion, done a collection of this generation, or
1302// . the underlying space believes that it may be a good idea to initiate
1303//   a concurrent collection (this may be based on criteria such as the
1304//   following: the space uses linear allocation and linear allocation is
1305//   going to fail, or there is believed to be excessive fragmentation in
1306//   the generation, etc... or ...
1307// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1308//   the case of the old generation; see CR 6543076):
1309//   we may be approaching a point at which allocation requests may fail because
1310//   we will be out of sufficient free space given allocation rate estimates.]
1311bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1312
1313  assert_lock_strong(freelistLock());
1314  if (occupancy() > initiating_occupancy()) {
1315    if (PrintGCDetails && Verbose) {
1316      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1317        short_name(), occupancy(), initiating_occupancy());
1318    }
1319    return true;
1320  }
1321  if (UseCMSInitiatingOccupancyOnly) {
1322    return false;
1323  }
1324  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1325    if (PrintGCDetails && Verbose) {
1326      gclog_or_tty->print(" %s: collect because expanded for allocation ",
1327        short_name());
1328    }
1329    return true;
1330  }
1331  if (_cmsSpace->should_concurrent_collect()) {
1332    if (PrintGCDetails && Verbose) {
1333      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1334        short_name());
1335    }
1336    return true;
1337  }
1338  return false;
1339}
1340
1341void ConcurrentMarkSweepGeneration::collect(bool   full,
1342                                            bool   clear_all_soft_refs,
1343                                            size_t size,
1344                                            bool   tlab)
1345{
1346  collector()->collect(full, clear_all_soft_refs, size, tlab);
1347}
1348
1349void CMSCollector::collect(bool   full,
1350                           bool   clear_all_soft_refs,
1351                           size_t size,
1352                           bool   tlab)
1353{
1354  // The following "if" branch is present for defensive reasons.
1355  // In the current uses of this interface, it can be replaced with:
1356  // assert(!GC_locker.is_active(), "Can't be called otherwise");
1357  // But I am not placing that assert here to allow future
1358  // generality in invoking this interface.
1359  if (GC_locker::is_active()) {
1360    // A consistency test for GC_locker
1361    assert(GC_locker::needs_gc(), "Should have been set already");
1362    // Skip this foreground collection, instead
1363    // expanding the heap if necessary.
1364    // Need the free list locks for the call to free() in compute_new_size()
1365    compute_new_size();
1366    return;
1367  }
1368  acquire_control_and_collect(full, clear_all_soft_refs);
1369}
1370
1371void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1372  GenCollectedHeap* gch = GenCollectedHeap::heap();
1373  unsigned int gc_count = gch->total_full_collections();
1374  if (gc_count == full_gc_count) {
1375    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1376    _full_gc_requested = true;
1377    _full_gc_cause = cause;
1378    CGC_lock->notify();   // nudge CMS thread
1379  } else {
1380    assert(gc_count > full_gc_count, "Error: causal loop");
1381  }
1382}
1383
1384bool CMSCollector::is_external_interruption() {
1385  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1386  return GCCause::is_user_requested_gc(cause) ||
1387         GCCause::is_serviceability_requested_gc(cause);
1388}
1389
1390void CMSCollector::report_concurrent_mode_interruption() {
1391  if (is_external_interruption()) {
1392    if (PrintGCDetails) {
1393      gclog_or_tty->print(" (concurrent mode interrupted)");
1394    }
1395  } else {
1396    if (PrintGCDetails) {
1397      gclog_or_tty->print(" (concurrent mode failure)");
1398    }
1399    _gc_tracer_cm->report_concurrent_mode_failure();
1400  }
1401}
1402
1403
1404// The foreground and background collectors need to coordinate in order
1405// to make sure that they do not mutually interfere with CMS collections.
1406// When a background collection is active,
1407// the foreground collector may need to take over (preempt) and
1408// synchronously complete an ongoing collection. Depending on the
1409// frequency of the background collections and the heap usage
1410// of the application, this preemption can be seldom or frequent.
1411// There are only certain
1412// points in the background collection that the "collection-baton"
1413// can be passed to the foreground collector.
1414//
1415// The foreground collector will wait for the baton before
1416// starting any part of the collection.  The foreground collector
1417// will only wait at one location.
1418//
1419// The background collector will yield the baton before starting a new
1420// phase of the collection (e.g., before initial marking, marking from roots,
1421// precleaning, final re-mark, sweep etc.)  This is normally done at the head
1422// of the loop which switches the phases. The background collector does some
1423// of the phases (initial mark, final re-mark) with the world stopped.
1424// Because of locking involved in stopping the world,
1425// the foreground collector should not block waiting for the background
1426// collector when it is doing a stop-the-world phase.  The background
1427// collector will yield the baton at an additional point just before
1428// it enters a stop-the-world phase.  Once the world is stopped, the
1429// background collector checks the phase of the collection.  If the
1430// phase has not changed, it proceeds with the collection.  If the
1431// phase has changed, it skips that phase of the collection.  See
1432// the comments on the use of the Heap_lock in collect_in_background().
1433//
1434// Variable used in baton passing.
1435//   _foregroundGCIsActive - Set to true by the foreground collector when
1436//      it wants the baton.  The foreground clears it when it has finished
1437//      the collection.
1438//   _foregroundGCShouldWait - Set to true by the background collector
1439//        when it is running.  The foreground collector waits while
1440//      _foregroundGCShouldWait is true.
1441//  CGC_lock - monitor used to protect access to the above variables
1442//      and to notify the foreground and background collectors.
1443//  _collectorState - current state of the CMS collection.
1444//
1445// The foreground collector
1446//   acquires the CGC_lock
1447//   sets _foregroundGCIsActive
1448//   waits on the CGC_lock for _foregroundGCShouldWait to be false
1449//     various locks acquired in preparation for the collection
1450//     are released so as not to block the background collector
1451//     that is in the midst of a collection
1452//   proceeds with the collection
1453//   clears _foregroundGCIsActive
1454//   returns
1455//
1456// The background collector in a loop iterating on the phases of the
1457//      collection
1458//   acquires the CGC_lock
1459//   sets _foregroundGCShouldWait
1460//   if _foregroundGCIsActive is set
1461//     clears _foregroundGCShouldWait, notifies _CGC_lock
1462//     waits on _CGC_lock for _foregroundGCIsActive to become false
1463//     and exits the loop.
1464//   otherwise
1465//     proceed with that phase of the collection
1466//     if the phase is a stop-the-world phase,
1467//       yield the baton once more just before enqueueing
1468//       the stop-world CMS operation (executed by the VM thread).
1469//   returns after all phases of the collection are done
1470//
1471
1472void CMSCollector::acquire_control_and_collect(bool full,
1473        bool clear_all_soft_refs) {
1474  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1475  assert(!Thread::current()->is_ConcurrentGC_thread(),
1476         "shouldn't try to acquire control from self!");
1477
1478  // Start the protocol for acquiring control of the
1479  // collection from the background collector (aka CMS thread).
1480  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1481         "VM thread should have CMS token");
1482  // Remember the possibly interrupted state of an ongoing
1483  // concurrent collection
1484  CollectorState first_state = _collectorState;
1485
1486  // Signal to a possibly ongoing concurrent collection that
1487  // we want to do a foreground collection.
1488  _foregroundGCIsActive = true;
1489
1490  // release locks and wait for a notify from the background collector
1491  // releasing the locks in only necessary for phases which
1492  // do yields to improve the granularity of the collection.
1493  assert_lock_strong(bitMapLock());
1494  // We need to lock the Free list lock for the space that we are
1495  // currently collecting.
1496  assert(haveFreelistLocks(), "Must be holding free list locks");
1497  bitMapLock()->unlock();
1498  releaseFreelistLocks();
1499  {
1500    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1501    if (_foregroundGCShouldWait) {
1502      // We are going to be waiting for action for the CMS thread;
1503      // it had better not be gone (for instance at shutdown)!
1504      assert(ConcurrentMarkSweepThread::cmst() != NULL,
1505             "CMS thread must be running");
1506      // Wait here until the background collector gives us the go-ahead
1507      ConcurrentMarkSweepThread::clear_CMS_flag(
1508        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1509      // Get a possibly blocked CMS thread going:
1510      //   Note that we set _foregroundGCIsActive true above,
1511      //   without protection of the CGC_lock.
1512      CGC_lock->notify();
1513      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1514             "Possible deadlock");
1515      while (_foregroundGCShouldWait) {
1516        // wait for notification
1517        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1518        // Possibility of delay/starvation here, since CMS token does
1519        // not know to give priority to VM thread? Actually, i think
1520        // there wouldn't be any delay/starvation, but the proof of
1521        // that "fact" (?) appears non-trivial. XXX 20011219YSR
1522      }
1523      ConcurrentMarkSweepThread::set_CMS_flag(
1524        ConcurrentMarkSweepThread::CMS_vm_has_token);
1525    }
1526  }
1527  // The CMS_token is already held.  Get back the other locks.
1528  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1529         "VM thread should have CMS token");
1530  getFreelistLocks();
1531  bitMapLock()->lock_without_safepoint_check();
1532  if (TraceCMSState) {
1533    gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1534      INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1535    gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1536  }
1537
1538  // Inform cms gen if this was due to partial collection failing.
1539  // The CMS gen may use this fact to determine its expansion policy.
1540  GenCollectedHeap* gch = GenCollectedHeap::heap();
1541  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1542    assert(!_cmsGen->incremental_collection_failed(),
1543           "Should have been noticed, reacted to and cleared");
1544    _cmsGen->set_incremental_collection_failed();
1545  }
1546
1547  if (first_state > Idling) {
1548    report_concurrent_mode_interruption();
1549  }
1550
1551  set_did_compact(true);
1552
1553  // If the collection is being acquired from the background
1554  // collector, there may be references on the discovered
1555  // references lists.  Abandon those references, since some
1556  // of them may have become unreachable after concurrent
1557  // discovery; the STW compacting collector will redo discovery
1558  // more precisely, without being subject to floating garbage.
1559  // Leaving otherwise unreachable references in the discovered
1560  // lists would require special handling.
1561  ref_processor()->disable_discovery();
1562  ref_processor()->abandon_partial_discovery();
1563  ref_processor()->verify_no_references_recorded();
1564
1565  if (first_state > Idling) {
1566    save_heap_summary();
1567  }
1568
1569  do_compaction_work(clear_all_soft_refs);
1570
1571  // Has the GC time limit been exceeded?
1572  size_t max_eden_size = _young_gen->max_capacity() -
1573                         _young_gen->to()->capacity() -
1574                         _young_gen->from()->capacity();
1575  GCCause::Cause gc_cause = gch->gc_cause();
1576  size_policy()->check_gc_overhead_limit(_young_gen->used(),
1577                                         _young_gen->eden()->used(),
1578                                         _cmsGen->max_capacity(),
1579                                         max_eden_size,
1580                                         full,
1581                                         gc_cause,
1582                                         gch->collector_policy());
1583
1584  // Reset the expansion cause, now that we just completed
1585  // a collection cycle.
1586  clear_expansion_cause();
1587  _foregroundGCIsActive = false;
1588  return;
1589}
1590
1591// Resize the tenured generation
1592// after obtaining the free list locks for the
1593// two generations.
1594void CMSCollector::compute_new_size() {
1595  assert_locked_or_safepoint(Heap_lock);
1596  FreelistLocker z(this);
1597  MetaspaceGC::compute_new_size();
1598  _cmsGen->compute_new_size_free_list();
1599}
1600
1601// A work method used by the foreground collector to do
1602// a mark-sweep-compact.
1603void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1604  GenCollectedHeap* gch = GenCollectedHeap::heap();
1605
1606  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1607  gc_timer->register_gc_start();
1608
1609  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1610  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1611
1612  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1613
1614  // Temporarily widen the span of the weak reference processing to
1615  // the entire heap.
1616  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1617  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1618  // Temporarily, clear the "is_alive_non_header" field of the
1619  // reference processor.
1620  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1621  // Temporarily make reference _processing_ single threaded (non-MT).
1622  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1623  // Temporarily make refs discovery atomic
1624  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1625  // Temporarily make reference _discovery_ single threaded (non-MT)
1626  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1627
1628  ref_processor()->set_enqueuing_is_done(false);
1629  ref_processor()->enable_discovery();
1630  ref_processor()->setup_policy(clear_all_soft_refs);
1631  // If an asynchronous collection finishes, the _modUnionTable is
1632  // all clear.  If we are assuming the collection from an asynchronous
1633  // collection, clear the _modUnionTable.
1634  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1635    "_modUnionTable should be clear if the baton was not passed");
1636  _modUnionTable.clear_all();
1637  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1638    "mod union for klasses should be clear if the baton was passed");
1639  _ct->klass_rem_set()->clear_mod_union();
1640
1641  // We must adjust the allocation statistics being maintained
1642  // in the free list space. We do so by reading and clearing
1643  // the sweep timer and updating the block flux rate estimates below.
1644  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1645  if (_inter_sweep_timer.is_active()) {
1646    _inter_sweep_timer.stop();
1647    // Note that we do not use this sample to update the _inter_sweep_estimate.
1648    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1649                                            _inter_sweep_estimate.padded_average(),
1650                                            _intra_sweep_estimate.padded_average());
1651  }
1652
1653  GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1654    ref_processor(), clear_all_soft_refs);
1655  #ifdef ASSERT
1656    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1657    size_t free_size = cms_space->free();
1658    assert(free_size ==
1659           pointer_delta(cms_space->end(), cms_space->compaction_top())
1660           * HeapWordSize,
1661      "All the free space should be compacted into one chunk at top");
1662    assert(cms_space->dictionary()->total_chunk_size(
1663                                      debug_only(cms_space->freelistLock())) == 0 ||
1664           cms_space->totalSizeInIndexedFreeLists() == 0,
1665      "All the free space should be in a single chunk");
1666    size_t num = cms_space->totalCount();
1667    assert((free_size == 0 && num == 0) ||
1668           (free_size > 0  && (num == 1 || num == 2)),
1669         "There should be at most 2 free chunks after compaction");
1670  #endif // ASSERT
1671  _collectorState = Resetting;
1672  assert(_restart_addr == NULL,
1673         "Should have been NULL'd before baton was passed");
1674  reset(false /* == !concurrent */);
1675  _cmsGen->reset_after_compaction();
1676  _concurrent_cycles_since_last_unload = 0;
1677
1678  // Clear any data recorded in the PLAB chunk arrays.
1679  if (_survivor_plab_array != NULL) {
1680    reset_survivor_plab_arrays();
1681  }
1682
1683  // Adjust the per-size allocation stats for the next epoch.
1684  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1685  // Restart the "inter sweep timer" for the next epoch.
1686  _inter_sweep_timer.reset();
1687  _inter_sweep_timer.start();
1688
1689  gc_timer->register_gc_end();
1690
1691  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1692
1693  // For a mark-sweep-compact, compute_new_size() will be called
1694  // in the heap's do_collection() method.
1695}
1696
1697void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1698  ContiguousSpace* eden_space = _young_gen->eden();
1699  ContiguousSpace* from_space = _young_gen->from();
1700  ContiguousSpace* to_space   = _young_gen->to();
1701  // Eden
1702  if (_eden_chunk_array != NULL) {
1703    gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1704                           p2i(eden_space->bottom()), p2i(eden_space->top()),
1705                           p2i(eden_space->end()), eden_space->capacity());
1706    gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1707                           "_eden_chunk_capacity=" SIZE_FORMAT,
1708                           _eden_chunk_index, _eden_chunk_capacity);
1709    for (size_t i = 0; i < _eden_chunk_index; i++) {
1710      gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1711                             i, p2i(_eden_chunk_array[i]));
1712    }
1713  }
1714  // Survivor
1715  if (_survivor_chunk_array != NULL) {
1716    gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1717                           p2i(from_space->bottom()), p2i(from_space->top()),
1718                           p2i(from_space->end()), from_space->capacity());
1719    gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1720                           "_survivor_chunk_capacity=" SIZE_FORMAT,
1721                           _survivor_chunk_index, _survivor_chunk_capacity);
1722    for (size_t i = 0; i < _survivor_chunk_index; i++) {
1723      gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1724                             i, p2i(_survivor_chunk_array[i]));
1725    }
1726  }
1727}
1728
1729void CMSCollector::getFreelistLocks() const {
1730  // Get locks for all free lists in all generations that this
1731  // collector is responsible for
1732  _cmsGen->freelistLock()->lock_without_safepoint_check();
1733}
1734
1735void CMSCollector::releaseFreelistLocks() const {
1736  // Release locks for all free lists in all generations that this
1737  // collector is responsible for
1738  _cmsGen->freelistLock()->unlock();
1739}
1740
1741bool CMSCollector::haveFreelistLocks() const {
1742  // Check locks for all free lists in all generations that this
1743  // collector is responsible for
1744  assert_lock_strong(_cmsGen->freelistLock());
1745  PRODUCT_ONLY(ShouldNotReachHere());
1746  return true;
1747}
1748
1749// A utility class that is used by the CMS collector to
1750// temporarily "release" the foreground collector from its
1751// usual obligation to wait for the background collector to
1752// complete an ongoing phase before proceeding.
1753class ReleaseForegroundGC: public StackObj {
1754 private:
1755  CMSCollector* _c;
1756 public:
1757  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1758    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1759    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1760    // allow a potentially blocked foreground collector to proceed
1761    _c->_foregroundGCShouldWait = false;
1762    if (_c->_foregroundGCIsActive) {
1763      CGC_lock->notify();
1764    }
1765    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1766           "Possible deadlock");
1767  }
1768
1769  ~ReleaseForegroundGC() {
1770    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1771    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1772    _c->_foregroundGCShouldWait = true;
1773  }
1774};
1775
1776void CMSCollector::collect_in_background(GCCause::Cause cause) {
1777  assert(Thread::current()->is_ConcurrentGC_thread(),
1778    "A CMS asynchronous collection is only allowed on a CMS thread.");
1779
1780  GenCollectedHeap* gch = GenCollectedHeap::heap();
1781  {
1782    bool safepoint_check = Mutex::_no_safepoint_check_flag;
1783    MutexLockerEx hl(Heap_lock, safepoint_check);
1784    FreelistLocker fll(this);
1785    MutexLockerEx x(CGC_lock, safepoint_check);
1786    if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
1787      // The foreground collector is active or we're
1788      // not using asynchronous collections.  Skip this
1789      // background collection.
1790      assert(!_foregroundGCShouldWait, "Should be clear");
1791      return;
1792    } else {
1793      assert(_collectorState == Idling, "Should be idling before start.");
1794      _collectorState = InitialMarking;
1795      register_gc_start(cause);
1796      // Reset the expansion cause, now that we are about to begin
1797      // a new cycle.
1798      clear_expansion_cause();
1799
1800      // Clear the MetaspaceGC flag since a concurrent collection
1801      // is starting but also clear it after the collection.
1802      MetaspaceGC::set_should_concurrent_collect(false);
1803    }
1804    // Decide if we want to enable class unloading as part of the
1805    // ensuing concurrent GC cycle.
1806    update_should_unload_classes();
1807    _full_gc_requested = false;           // acks all outstanding full gc requests
1808    _full_gc_cause = GCCause::_no_gc;
1809    // Signal that we are about to start a collection
1810    gch->increment_total_full_collections();  // ... starting a collection cycle
1811    _collection_count_start = gch->total_full_collections();
1812  }
1813
1814  // Used for PrintGC
1815  size_t prev_used;
1816  if (PrintGC && Verbose) {
1817    prev_used = _cmsGen->used();
1818  }
1819
1820  // The change of the collection state is normally done at this level;
1821  // the exceptions are phases that are executed while the world is
1822  // stopped.  For those phases the change of state is done while the
1823  // world is stopped.  For baton passing purposes this allows the
1824  // background collector to finish the phase and change state atomically.
1825  // The foreground collector cannot wait on a phase that is done
1826  // while the world is stopped because the foreground collector already
1827  // has the world stopped and would deadlock.
1828  while (_collectorState != Idling) {
1829    if (TraceCMSState) {
1830      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1831        p2i(Thread::current()), _collectorState);
1832    }
1833    // The foreground collector
1834    //   holds the Heap_lock throughout its collection.
1835    //   holds the CMS token (but not the lock)
1836    //     except while it is waiting for the background collector to yield.
1837    //
1838    // The foreground collector should be blocked (not for long)
1839    //   if the background collector is about to start a phase
1840    //   executed with world stopped.  If the background
1841    //   collector has already started such a phase, the
1842    //   foreground collector is blocked waiting for the
1843    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1844    //   are executed in the VM thread.
1845    //
1846    // The locking order is
1847    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1848    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1849    //   CMS token  (claimed in
1850    //                stop_world_and_do() -->
1851    //                  safepoint_synchronize() -->
1852    //                    CMSThread::synchronize())
1853
1854    {
1855      // Check if the FG collector wants us to yield.
1856      CMSTokenSync x(true); // is cms thread
1857      if (waitForForegroundGC()) {
1858        // We yielded to a foreground GC, nothing more to be
1859        // done this round.
1860        assert(_foregroundGCShouldWait == false, "We set it to false in "
1861               "waitForForegroundGC()");
1862        if (TraceCMSState) {
1863          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1864            " exiting collection CMS state %d",
1865            p2i(Thread::current()), _collectorState);
1866        }
1867        return;
1868      } else {
1869        // The background collector can run but check to see if the
1870        // foreground collector has done a collection while the
1871        // background collector was waiting to get the CGC_lock
1872        // above.  If yes, break so that _foregroundGCShouldWait
1873        // is cleared before returning.
1874        if (_collectorState == Idling) {
1875          break;
1876        }
1877      }
1878    }
1879
1880    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1881      "should be waiting");
1882
1883    switch (_collectorState) {
1884      case InitialMarking:
1885        {
1886          ReleaseForegroundGC x(this);
1887          stats().record_cms_begin();
1888          VM_CMS_Initial_Mark initial_mark_op(this);
1889          VMThread::execute(&initial_mark_op);
1890        }
1891        // The collector state may be any legal state at this point
1892        // since the background collector may have yielded to the
1893        // foreground collector.
1894        break;
1895      case Marking:
1896        // initial marking in checkpointRootsInitialWork has been completed
1897        if (markFromRoots()) { // we were successful
1898          assert(_collectorState == Precleaning, "Collector state should "
1899            "have changed");
1900        } else {
1901          assert(_foregroundGCIsActive, "Internal state inconsistency");
1902        }
1903        break;
1904      case Precleaning:
1905        // marking from roots in markFromRoots has been completed
1906        preclean();
1907        assert(_collectorState == AbortablePreclean ||
1908               _collectorState == FinalMarking,
1909               "Collector state should have changed");
1910        break;
1911      case AbortablePreclean:
1912        abortable_preclean();
1913        assert(_collectorState == FinalMarking, "Collector state should "
1914          "have changed");
1915        break;
1916      case FinalMarking:
1917        {
1918          ReleaseForegroundGC x(this);
1919
1920          VM_CMS_Final_Remark final_remark_op(this);
1921          VMThread::execute(&final_remark_op);
1922        }
1923        assert(_foregroundGCShouldWait, "block post-condition");
1924        break;
1925      case Sweeping:
1926        // final marking in checkpointRootsFinal has been completed
1927        sweep();
1928        assert(_collectorState == Resizing, "Collector state change "
1929          "to Resizing must be done under the free_list_lock");
1930
1931      case Resizing: {
1932        // Sweeping has been completed...
1933        // At this point the background collection has completed.
1934        // Don't move the call to compute_new_size() down
1935        // into code that might be executed if the background
1936        // collection was preempted.
1937        {
1938          ReleaseForegroundGC x(this);   // unblock FG collection
1939          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1940          CMSTokenSync        z(true);   // not strictly needed.
1941          if (_collectorState == Resizing) {
1942            compute_new_size();
1943            save_heap_summary();
1944            _collectorState = Resetting;
1945          } else {
1946            assert(_collectorState == Idling, "The state should only change"
1947                   " because the foreground collector has finished the collection");
1948          }
1949        }
1950        break;
1951      }
1952      case Resetting:
1953        // CMS heap resizing has been completed
1954        reset(true);
1955        assert(_collectorState == Idling, "Collector state should "
1956          "have changed");
1957
1958        MetaspaceGC::set_should_concurrent_collect(false);
1959
1960        stats().record_cms_end();
1961        // Don't move the concurrent_phases_end() and compute_new_size()
1962        // calls to here because a preempted background collection
1963        // has it's state set to "Resetting".
1964        break;
1965      case Idling:
1966      default:
1967        ShouldNotReachHere();
1968        break;
1969    }
1970    if (TraceCMSState) {
1971      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1972        p2i(Thread::current()), _collectorState);
1973    }
1974    assert(_foregroundGCShouldWait, "block post-condition");
1975  }
1976
1977  // Should this be in gc_epilogue?
1978  collector_policy()->counters()->update_counters();
1979
1980  {
1981    // Clear _foregroundGCShouldWait and, in the event that the
1982    // foreground collector is waiting, notify it, before
1983    // returning.
1984    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1985    _foregroundGCShouldWait = false;
1986    if (_foregroundGCIsActive) {
1987      CGC_lock->notify();
1988    }
1989    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1990           "Possible deadlock");
1991  }
1992  if (TraceCMSState) {
1993    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1994      " exiting collection CMS state %d",
1995      p2i(Thread::current()), _collectorState);
1996  }
1997  if (PrintGC && Verbose) {
1998    _cmsGen->print_heap_change(prev_used);
1999  }
2000}
2001
2002void CMSCollector::register_gc_start(GCCause::Cause cause) {
2003  _cms_start_registered = true;
2004  _gc_timer_cm->register_gc_start();
2005  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2006}
2007
2008void CMSCollector::register_gc_end() {
2009  if (_cms_start_registered) {
2010    report_heap_summary(GCWhen::AfterGC);
2011
2012    _gc_timer_cm->register_gc_end();
2013    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2014    _cms_start_registered = false;
2015  }
2016}
2017
2018void CMSCollector::save_heap_summary() {
2019  GenCollectedHeap* gch = GenCollectedHeap::heap();
2020  _last_heap_summary = gch->create_heap_summary();
2021  _last_metaspace_summary = gch->create_metaspace_summary();
2022}
2023
2024void CMSCollector::report_heap_summary(GCWhen::Type when) {
2025  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2026  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2027}
2028
2029bool CMSCollector::waitForForegroundGC() {
2030  bool res = false;
2031  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2032         "CMS thread should have CMS token");
2033  // Block the foreground collector until the
2034  // background collectors decides whether to
2035  // yield.
2036  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2037  _foregroundGCShouldWait = true;
2038  if (_foregroundGCIsActive) {
2039    // The background collector yields to the
2040    // foreground collector and returns a value
2041    // indicating that it has yielded.  The foreground
2042    // collector can proceed.
2043    res = true;
2044    _foregroundGCShouldWait = false;
2045    ConcurrentMarkSweepThread::clear_CMS_flag(
2046      ConcurrentMarkSweepThread::CMS_cms_has_token);
2047    ConcurrentMarkSweepThread::set_CMS_flag(
2048      ConcurrentMarkSweepThread::CMS_cms_wants_token);
2049    // Get a possibly blocked foreground thread going
2050    CGC_lock->notify();
2051    if (TraceCMSState) {
2052      gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2053        p2i(Thread::current()), _collectorState);
2054    }
2055    while (_foregroundGCIsActive) {
2056      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2057    }
2058    ConcurrentMarkSweepThread::set_CMS_flag(
2059      ConcurrentMarkSweepThread::CMS_cms_has_token);
2060    ConcurrentMarkSweepThread::clear_CMS_flag(
2061      ConcurrentMarkSweepThread::CMS_cms_wants_token);
2062  }
2063  if (TraceCMSState) {
2064    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2065      p2i(Thread::current()), _collectorState);
2066  }
2067  return res;
2068}
2069
2070// Because of the need to lock the free lists and other structures in
2071// the collector, common to all the generations that the collector is
2072// collecting, we need the gc_prologues of individual CMS generations
2073// delegate to their collector. It may have been simpler had the
2074// current infrastructure allowed one to call a prologue on a
2075// collector. In the absence of that we have the generation's
2076// prologue delegate to the collector, which delegates back
2077// some "local" work to a worker method in the individual generations
2078// that it's responsible for collecting, while itself doing any
2079// work common to all generations it's responsible for. A similar
2080// comment applies to the  gc_epilogue()'s.
2081// The role of the variable _between_prologue_and_epilogue is to
2082// enforce the invocation protocol.
2083void CMSCollector::gc_prologue(bool full) {
2084  // Call gc_prologue_work() for the CMSGen
2085  // we are responsible for.
2086
2087  // The following locking discipline assumes that we are only called
2088  // when the world is stopped.
2089  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2090
2091  // The CMSCollector prologue must call the gc_prologues for the
2092  // "generations" that it's responsible
2093  // for.
2094
2095  assert(   Thread::current()->is_VM_thread()
2096         || (   CMSScavengeBeforeRemark
2097             && Thread::current()->is_ConcurrentGC_thread()),
2098         "Incorrect thread type for prologue execution");
2099
2100  if (_between_prologue_and_epilogue) {
2101    // We have already been invoked; this is a gc_prologue delegation
2102    // from yet another CMS generation that we are responsible for, just
2103    // ignore it since all relevant work has already been done.
2104    return;
2105  }
2106
2107  // set a bit saying prologue has been called; cleared in epilogue
2108  _between_prologue_and_epilogue = true;
2109  // Claim locks for common data structures, then call gc_prologue_work()
2110  // for each CMSGen.
2111
2112  getFreelistLocks();   // gets free list locks on constituent spaces
2113  bitMapLock()->lock_without_safepoint_check();
2114
2115  // Should call gc_prologue_work() for all cms gens we are responsible for
2116  bool duringMarking =    _collectorState >= Marking
2117                         && _collectorState < Sweeping;
2118
2119  // The young collections clear the modified oops state, which tells if
2120  // there are any modified oops in the class. The remark phase also needs
2121  // that information. Tell the young collection to save the union of all
2122  // modified klasses.
2123  if (duringMarking) {
2124    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2125  }
2126
2127  bool registerClosure = duringMarking;
2128
2129  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2130
2131  if (!full) {
2132    stats().record_gc0_begin();
2133  }
2134}
2135
2136void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2137
2138  _capacity_at_prologue = capacity();
2139  _used_at_prologue = used();
2140
2141  // Delegate to CMScollector which knows how to coordinate between
2142  // this and any other CMS generations that it is responsible for
2143  // collecting.
2144  collector()->gc_prologue(full);
2145}
2146
2147// This is a "private" interface for use by this generation's CMSCollector.
2148// Not to be called directly by any other entity (for instance,
2149// GenCollectedHeap, which calls the "public" gc_prologue method above).
2150void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2151  bool registerClosure, ModUnionClosure* modUnionClosure) {
2152  assert(!incremental_collection_failed(), "Shouldn't be set yet");
2153  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2154    "Should be NULL");
2155  if (registerClosure) {
2156    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2157  }
2158  cmsSpace()->gc_prologue();
2159  // Clear stat counters
2160  NOT_PRODUCT(
2161    assert(_numObjectsPromoted == 0, "check");
2162    assert(_numWordsPromoted   == 0, "check");
2163    if (Verbose && PrintGC) {
2164      gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2165                          SIZE_FORMAT" bytes concurrently",
2166      _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2167    }
2168    _numObjectsAllocated = 0;
2169    _numWordsAllocated   = 0;
2170  )
2171}
2172
2173void CMSCollector::gc_epilogue(bool full) {
2174  // The following locking discipline assumes that we are only called
2175  // when the world is stopped.
2176  assert(SafepointSynchronize::is_at_safepoint(),
2177         "world is stopped assumption");
2178
2179  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2180  // if linear allocation blocks need to be appropriately marked to allow the
2181  // the blocks to be parsable. We also check here whether we need to nudge the
2182  // CMS collector thread to start a new cycle (if it's not already active).
2183  assert(   Thread::current()->is_VM_thread()
2184         || (   CMSScavengeBeforeRemark
2185             && Thread::current()->is_ConcurrentGC_thread()),
2186         "Incorrect thread type for epilogue execution");
2187
2188  if (!_between_prologue_and_epilogue) {
2189    // We have already been invoked; this is a gc_epilogue delegation
2190    // from yet another CMS generation that we are responsible for, just
2191    // ignore it since all relevant work has already been done.
2192    return;
2193  }
2194  assert(haveFreelistLocks(), "must have freelist locks");
2195  assert_lock_strong(bitMapLock());
2196
2197  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2198
2199  _cmsGen->gc_epilogue_work(full);
2200
2201  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2202    // in case sampling was not already enabled, enable it
2203    _start_sampling = true;
2204  }
2205  // reset _eden_chunk_array so sampling starts afresh
2206  _eden_chunk_index = 0;
2207
2208  size_t cms_used   = _cmsGen->cmsSpace()->used();
2209
2210  // update performance counters - this uses a special version of
2211  // update_counters() that allows the utilization to be passed as a
2212  // parameter, avoiding multiple calls to used().
2213  //
2214  _cmsGen->update_counters(cms_used);
2215
2216  bitMapLock()->unlock();
2217  releaseFreelistLocks();
2218
2219  if (!CleanChunkPoolAsync) {
2220    Chunk::clean_chunk_pool();
2221  }
2222
2223  set_did_compact(false);
2224  _between_prologue_and_epilogue = false;  // ready for next cycle
2225}
2226
2227void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2228  collector()->gc_epilogue(full);
2229
2230  // Also reset promotion tracking in par gc thread states.
2231  for (uint i = 0; i < ParallelGCThreads; i++) {
2232    _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2233  }
2234}
2235
2236void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2237  assert(!incremental_collection_failed(), "Should have been cleared");
2238  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2239  cmsSpace()->gc_epilogue();
2240    // Print stat counters
2241  NOT_PRODUCT(
2242    assert(_numObjectsAllocated == 0, "check");
2243    assert(_numWordsAllocated == 0, "check");
2244    if (Verbose && PrintGC) {
2245      gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2246                          SIZE_FORMAT" bytes",
2247                 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2248    }
2249    _numObjectsPromoted = 0;
2250    _numWordsPromoted   = 0;
2251  )
2252
2253  if (PrintGC && Verbose) {
2254    // Call down the chain in contiguous_available needs the freelistLock
2255    // so print this out before releasing the freeListLock.
2256    gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2257                        contiguous_available());
2258  }
2259}
2260
2261#ifndef PRODUCT
2262bool CMSCollector::have_cms_token() {
2263  Thread* thr = Thread::current();
2264  if (thr->is_VM_thread()) {
2265    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2266  } else if (thr->is_ConcurrentGC_thread()) {
2267    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2268  } else if (thr->is_GC_task_thread()) {
2269    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2270           ParGCRareEvent_lock->owned_by_self();
2271  }
2272  return false;
2273}
2274#endif
2275
2276// Check reachability of the given heap address in CMS generation,
2277// treating all other generations as roots.
2278bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2279  // We could "guarantee" below, rather than assert, but I'll
2280  // leave these as "asserts" so that an adventurous debugger
2281  // could try this in the product build provided some subset of
2282  // the conditions were met, provided they were interested in the
2283  // results and knew that the computation below wouldn't interfere
2284  // with other concurrent computations mutating the structures
2285  // being read or written.
2286  assert(SafepointSynchronize::is_at_safepoint(),
2287         "Else mutations in object graph will make answer suspect");
2288  assert(have_cms_token(), "Should hold cms token");
2289  assert(haveFreelistLocks(), "must hold free list locks");
2290  assert_lock_strong(bitMapLock());
2291
2292  // Clear the marking bit map array before starting, but, just
2293  // for kicks, first report if the given address is already marked
2294  gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2295                _markBitMap.isMarked(addr) ? "" : " not");
2296
2297  if (verify_after_remark()) {
2298    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2299    bool result = verification_mark_bm()->isMarked(addr);
2300    gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2301                           result ? "IS" : "is NOT");
2302    return result;
2303  } else {
2304    gclog_or_tty->print_cr("Could not compute result");
2305    return false;
2306  }
2307}
2308
2309
2310void
2311CMSCollector::print_on_error(outputStream* st) {
2312  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2313  if (collector != NULL) {
2314    CMSBitMap* bitmap = &collector->_markBitMap;
2315    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2316    bitmap->print_on_error(st, " Bits: ");
2317
2318    st->cr();
2319
2320    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2321    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2322    mut_bitmap->print_on_error(st, " Bits: ");
2323  }
2324}
2325
2326////////////////////////////////////////////////////////
2327// CMS Verification Support
2328////////////////////////////////////////////////////////
2329// Following the remark phase, the following invariant
2330// should hold -- each object in the CMS heap which is
2331// marked in markBitMap() should be marked in the verification_mark_bm().
2332
2333class VerifyMarkedClosure: public BitMapClosure {
2334  CMSBitMap* _marks;
2335  bool       _failed;
2336
2337 public:
2338  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2339
2340  bool do_bit(size_t offset) {
2341    HeapWord* addr = _marks->offsetToHeapWord(offset);
2342    if (!_marks->isMarked(addr)) {
2343      oop(addr)->print_on(gclog_or_tty);
2344      gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", p2i(addr));
2345      _failed = true;
2346    }
2347    return true;
2348  }
2349
2350  bool failed() { return _failed; }
2351};
2352
2353bool CMSCollector::verify_after_remark(bool silent) {
2354  if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2355  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2356  static bool init = false;
2357
2358  assert(SafepointSynchronize::is_at_safepoint(),
2359         "Else mutations in object graph will make answer suspect");
2360  assert(have_cms_token(),
2361         "Else there may be mutual interference in use of "
2362         " verification data structures");
2363  assert(_collectorState > Marking && _collectorState <= Sweeping,
2364         "Else marking info checked here may be obsolete");
2365  assert(haveFreelistLocks(), "must hold free list locks");
2366  assert_lock_strong(bitMapLock());
2367
2368
2369  // Allocate marking bit map if not already allocated
2370  if (!init) { // first time
2371    if (!verification_mark_bm()->allocate(_span)) {
2372      return false;
2373    }
2374    init = true;
2375  }
2376
2377  assert(verification_mark_stack()->isEmpty(), "Should be empty");
2378
2379  // Turn off refs discovery -- so we will be tracing through refs.
2380  // This is as intended, because by this time
2381  // GC must already have cleared any refs that need to be cleared,
2382  // and traced those that need to be marked; moreover,
2383  // the marking done here is not going to interfere in any
2384  // way with the marking information used by GC.
2385  NoRefDiscovery no_discovery(ref_processor());
2386
2387  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2388
2389  // Clear any marks from a previous round
2390  verification_mark_bm()->clear_all();
2391  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2392  verify_work_stacks_empty();
2393
2394  GenCollectedHeap* gch = GenCollectedHeap::heap();
2395  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2396  // Update the saved marks which may affect the root scans.
2397  gch->save_marks();
2398
2399  if (CMSRemarkVerifyVariant == 1) {
2400    // In this first variant of verification, we complete
2401    // all marking, then check if the new marks-vector is
2402    // a subset of the CMS marks-vector.
2403    verify_after_remark_work_1();
2404  } else if (CMSRemarkVerifyVariant == 2) {
2405    // In this second variant of verification, we flag an error
2406    // (i.e. an object reachable in the new marks-vector not reachable
2407    // in the CMS marks-vector) immediately, also indicating the
2408    // identify of an object (A) that references the unmarked object (B) --
2409    // presumably, a mutation to A failed to be picked up by preclean/remark?
2410    verify_after_remark_work_2();
2411  } else {
2412    warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2413            CMSRemarkVerifyVariant);
2414  }
2415  if (!silent) gclog_or_tty->print(" done] ");
2416  return true;
2417}
2418
2419void CMSCollector::verify_after_remark_work_1() {
2420  ResourceMark rm;
2421  HandleMark  hm;
2422  GenCollectedHeap* gch = GenCollectedHeap::heap();
2423
2424  // Get a clear set of claim bits for the roots processing to work with.
2425  ClassLoaderDataGraph::clear_claimed_marks();
2426
2427  // Mark from roots one level into CMS
2428  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2429  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2430
2431  {
2432    StrongRootsScope srs(1);
2433
2434    gch->gen_process_roots(&srs,
2435                           _cmsGen->level(),
2436                           true,   // younger gens are roots
2437                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2438                           should_unload_classes(),
2439                           &notOlder,
2440                           NULL,
2441                           NULL);
2442  }
2443
2444  // Now mark from the roots
2445  MarkFromRootsClosure markFromRootsClosure(this, _span,
2446    verification_mark_bm(), verification_mark_stack(),
2447    false /* don't yield */, true /* verifying */);
2448  assert(_restart_addr == NULL, "Expected pre-condition");
2449  verification_mark_bm()->iterate(&markFromRootsClosure);
2450  while (_restart_addr != NULL) {
2451    // Deal with stack overflow: by restarting at the indicated
2452    // address.
2453    HeapWord* ra = _restart_addr;
2454    markFromRootsClosure.reset(ra);
2455    _restart_addr = NULL;
2456    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2457  }
2458  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2459  verify_work_stacks_empty();
2460
2461  // Marking completed -- now verify that each bit marked in
2462  // verification_mark_bm() is also marked in markBitMap(); flag all
2463  // errors by printing corresponding objects.
2464  VerifyMarkedClosure vcl(markBitMap());
2465  verification_mark_bm()->iterate(&vcl);
2466  if (vcl.failed()) {
2467    gclog_or_tty->print("Verification failed");
2468    gch->print_on(gclog_or_tty);
2469    fatal("CMS: failed marking verification after remark");
2470  }
2471}
2472
2473class VerifyKlassOopsKlassClosure : public KlassClosure {
2474  class VerifyKlassOopsClosure : public OopClosure {
2475    CMSBitMap* _bitmap;
2476   public:
2477    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2478    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2479    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2480  } _oop_closure;
2481 public:
2482  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2483  void do_klass(Klass* k) {
2484    k->oops_do(&_oop_closure);
2485  }
2486};
2487
2488void CMSCollector::verify_after_remark_work_2() {
2489  ResourceMark rm;
2490  HandleMark  hm;
2491  GenCollectedHeap* gch = GenCollectedHeap::heap();
2492
2493  // Get a clear set of claim bits for the roots processing to work with.
2494  ClassLoaderDataGraph::clear_claimed_marks();
2495
2496  // Mark from roots one level into CMS
2497  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2498                                     markBitMap());
2499  CLDToOopClosure cld_closure(&notOlder, true);
2500
2501  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2502
2503  {
2504    StrongRootsScope srs(1);
2505
2506    gch->gen_process_roots(&srs,
2507                           _cmsGen->level(),
2508                           true,   // younger gens are roots
2509                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2510                           should_unload_classes(),
2511                           &notOlder,
2512                           NULL,
2513                           &cld_closure);
2514  }
2515
2516  // Now mark from the roots
2517  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2518    verification_mark_bm(), markBitMap(), verification_mark_stack());
2519  assert(_restart_addr == NULL, "Expected pre-condition");
2520  verification_mark_bm()->iterate(&markFromRootsClosure);
2521  while (_restart_addr != NULL) {
2522    // Deal with stack overflow: by restarting at the indicated
2523    // address.
2524    HeapWord* ra = _restart_addr;
2525    markFromRootsClosure.reset(ra);
2526    _restart_addr = NULL;
2527    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2528  }
2529  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2530  verify_work_stacks_empty();
2531
2532  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2533  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2534
2535  // Marking completed -- now verify that each bit marked in
2536  // verification_mark_bm() is also marked in markBitMap(); flag all
2537  // errors by printing corresponding objects.
2538  VerifyMarkedClosure vcl(markBitMap());
2539  verification_mark_bm()->iterate(&vcl);
2540  assert(!vcl.failed(), "Else verification above should not have succeeded");
2541}
2542
2543void ConcurrentMarkSweepGeneration::save_marks() {
2544  // delegate to CMS space
2545  cmsSpace()->save_marks();
2546  for (uint i = 0; i < ParallelGCThreads; i++) {
2547    _par_gc_thread_states[i]->promo.startTrackingPromotions();
2548  }
2549}
2550
2551bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2552  return cmsSpace()->no_allocs_since_save_marks();
2553}
2554
2555#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2556                                                                \
2557void ConcurrentMarkSweepGeneration::                            \
2558oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2559  cl->set_generation(this);                                     \
2560  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2561  cl->reset_generation();                                       \
2562  save_marks();                                                 \
2563}
2564
2565ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2566
2567void
2568ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2569  if (freelistLock()->owned_by_self()) {
2570    Generation::oop_iterate(cl);
2571  } else {
2572    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2573    Generation::oop_iterate(cl);
2574  }
2575}
2576
2577void
2578ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2579  if (freelistLock()->owned_by_self()) {
2580    Generation::object_iterate(cl);
2581  } else {
2582    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2583    Generation::object_iterate(cl);
2584  }
2585}
2586
2587void
2588ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2589  if (freelistLock()->owned_by_self()) {
2590    Generation::safe_object_iterate(cl);
2591  } else {
2592    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2593    Generation::safe_object_iterate(cl);
2594  }
2595}
2596
2597void
2598ConcurrentMarkSweepGeneration::post_compact() {
2599}
2600
2601void
2602ConcurrentMarkSweepGeneration::prepare_for_verify() {
2603  // Fix the linear allocation blocks to look like free blocks.
2604
2605  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2606  // are not called when the heap is verified during universe initialization and
2607  // at vm shutdown.
2608  if (freelistLock()->owned_by_self()) {
2609    cmsSpace()->prepare_for_verify();
2610  } else {
2611    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2612    cmsSpace()->prepare_for_verify();
2613  }
2614}
2615
2616void
2617ConcurrentMarkSweepGeneration::verify() {
2618  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2619  // are not called when the heap is verified during universe initialization and
2620  // at vm shutdown.
2621  if (freelistLock()->owned_by_self()) {
2622    cmsSpace()->verify();
2623  } else {
2624    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2625    cmsSpace()->verify();
2626  }
2627}
2628
2629void CMSCollector::verify() {
2630  _cmsGen->verify();
2631}
2632
2633#ifndef PRODUCT
2634bool CMSCollector::overflow_list_is_empty() const {
2635  assert(_num_par_pushes >= 0, "Inconsistency");
2636  if (_overflow_list == NULL) {
2637    assert(_num_par_pushes == 0, "Inconsistency");
2638  }
2639  return _overflow_list == NULL;
2640}
2641
2642// The methods verify_work_stacks_empty() and verify_overflow_empty()
2643// merely consolidate assertion checks that appear to occur together frequently.
2644void CMSCollector::verify_work_stacks_empty() const {
2645  assert(_markStack.isEmpty(), "Marking stack should be empty");
2646  assert(overflow_list_is_empty(), "Overflow list should be empty");
2647}
2648
2649void CMSCollector::verify_overflow_empty() const {
2650  assert(overflow_list_is_empty(), "Overflow list should be empty");
2651  assert(no_preserved_marks(), "No preserved marks");
2652}
2653#endif // PRODUCT
2654
2655// Decide if we want to enable class unloading as part of the
2656// ensuing concurrent GC cycle. We will collect and
2657// unload classes if it's the case that:
2658// (1) an explicit gc request has been made and the flag
2659//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2660// (2) (a) class unloading is enabled at the command line, and
2661//     (b) old gen is getting really full
2662// NOTE: Provided there is no change in the state of the heap between
2663// calls to this method, it should have idempotent results. Moreover,
2664// its results should be monotonically increasing (i.e. going from 0 to 1,
2665// but not 1 to 0) between successive calls between which the heap was
2666// not collected. For the implementation below, it must thus rely on
2667// the property that concurrent_cycles_since_last_unload()
2668// will not decrease unless a collection cycle happened and that
2669// _cmsGen->is_too_full() are
2670// themselves also monotonic in that sense. See check_monotonicity()
2671// below.
2672void CMSCollector::update_should_unload_classes() {
2673  _should_unload_classes = false;
2674  // Condition 1 above
2675  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2676    _should_unload_classes = true;
2677  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2678    // Disjuncts 2.b.(i,ii,iii) above
2679    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2680                              CMSClassUnloadingMaxInterval)
2681                           || _cmsGen->is_too_full();
2682  }
2683}
2684
2685bool ConcurrentMarkSweepGeneration::is_too_full() const {
2686  bool res = should_concurrent_collect();
2687  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2688  return res;
2689}
2690
2691void CMSCollector::setup_cms_unloading_and_verification_state() {
2692  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2693                             || VerifyBeforeExit;
2694  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2695
2696  // We set the proper root for this CMS cycle here.
2697  if (should_unload_classes()) {   // Should unload classes this cycle
2698    remove_root_scanning_option(rso);  // Shrink the root set appropriately
2699    set_verifying(should_verify);    // Set verification state for this cycle
2700    return;                            // Nothing else needs to be done at this time
2701  }
2702
2703  // Not unloading classes this cycle
2704  assert(!should_unload_classes(), "Inconsistency!");
2705
2706  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2707    // Include symbols, strings and code cache elements to prevent their resurrection.
2708    add_root_scanning_option(rso);
2709    set_verifying(true);
2710  } else if (verifying() && !should_verify) {
2711    // We were verifying, but some verification flags got disabled.
2712    set_verifying(false);
2713    // Exclude symbols, strings and code cache elements from root scanning to
2714    // reduce IM and RM pauses.
2715    remove_root_scanning_option(rso);
2716  }
2717}
2718
2719
2720#ifndef PRODUCT
2721HeapWord* CMSCollector::block_start(const void* p) const {
2722  const HeapWord* addr = (HeapWord*)p;
2723  if (_span.contains(p)) {
2724    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2725      return _cmsGen->cmsSpace()->block_start(p);
2726    }
2727  }
2728  return NULL;
2729}
2730#endif
2731
2732HeapWord*
2733ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2734                                                   bool   tlab,
2735                                                   bool   parallel) {
2736  CMSSynchronousYieldRequest yr;
2737  assert(!tlab, "Can't deal with TLAB allocation");
2738  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2739  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2740  if (GCExpandToAllocateDelayMillis > 0) {
2741    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2742  }
2743  return have_lock_and_allocate(word_size, tlab);
2744}
2745
2746void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2747    size_t bytes,
2748    size_t expand_bytes,
2749    CMSExpansionCause::Cause cause)
2750{
2751
2752  bool success = expand(bytes, expand_bytes);
2753
2754  // remember why we expanded; this information is used
2755  // by shouldConcurrentCollect() when making decisions on whether to start
2756  // a new CMS cycle.
2757  if (success) {
2758    set_expansion_cause(cause);
2759    if (PrintGCDetails && Verbose) {
2760      gclog_or_tty->print_cr("Expanded CMS gen for %s",
2761        CMSExpansionCause::to_string(cause));
2762    }
2763  }
2764}
2765
2766HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2767  HeapWord* res = NULL;
2768  MutexLocker x(ParGCRareEvent_lock);
2769  while (true) {
2770    // Expansion by some other thread might make alloc OK now:
2771    res = ps->lab.alloc(word_sz);
2772    if (res != NULL) return res;
2773    // If there's not enough expansion space available, give up.
2774    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2775      return NULL;
2776    }
2777    // Otherwise, we try expansion.
2778    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2779    // Now go around the loop and try alloc again;
2780    // A competing par_promote might beat us to the expansion space,
2781    // so we may go around the loop again if promotion fails again.
2782    if (GCExpandToAllocateDelayMillis > 0) {
2783      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2784    }
2785  }
2786}
2787
2788
2789bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2790  PromotionInfo* promo) {
2791  MutexLocker x(ParGCRareEvent_lock);
2792  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2793  while (true) {
2794    // Expansion by some other thread might make alloc OK now:
2795    if (promo->ensure_spooling_space()) {
2796      assert(promo->has_spooling_space(),
2797             "Post-condition of successful ensure_spooling_space()");
2798      return true;
2799    }
2800    // If there's not enough expansion space available, give up.
2801    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2802      return false;
2803    }
2804    // Otherwise, we try expansion.
2805    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2806    // Now go around the loop and try alloc again;
2807    // A competing allocation might beat us to the expansion space,
2808    // so we may go around the loop again if allocation fails again.
2809    if (GCExpandToAllocateDelayMillis > 0) {
2810      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2811    }
2812  }
2813}
2814
2815void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2816  // Only shrink if a compaction was done so that all the free space
2817  // in the generation is in a contiguous block at the end.
2818  if (did_compact()) {
2819    CardGeneration::shrink(bytes);
2820  }
2821}
2822
2823void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2824  assert_locked_or_safepoint(Heap_lock);
2825}
2826
2827void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2828  assert_locked_or_safepoint(Heap_lock);
2829  assert_lock_strong(freelistLock());
2830  if (PrintGCDetails && Verbose) {
2831    warning("Shrinking of CMS not yet implemented");
2832  }
2833  return;
2834}
2835
2836
2837// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2838// phases.
2839class CMSPhaseAccounting: public StackObj {
2840 public:
2841  CMSPhaseAccounting(CMSCollector *collector,
2842                     const char *phase,
2843                     const GCId gc_id,
2844                     bool print_cr = true);
2845  ~CMSPhaseAccounting();
2846
2847 private:
2848  CMSCollector *_collector;
2849  const char *_phase;
2850  elapsedTimer _wallclock;
2851  bool _print_cr;
2852  const GCId _gc_id;
2853
2854 public:
2855  // Not MT-safe; so do not pass around these StackObj's
2856  // where they may be accessed by other threads.
2857  jlong wallclock_millis() {
2858    assert(_wallclock.is_active(), "Wall clock should not stop");
2859    _wallclock.stop();  // to record time
2860    jlong ret = _wallclock.milliseconds();
2861    _wallclock.start(); // restart
2862    return ret;
2863  }
2864};
2865
2866CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2867                                       const char *phase,
2868                                       const GCId gc_id,
2869                                       bool print_cr) :
2870  _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
2871
2872  if (PrintCMSStatistics != 0) {
2873    _collector->resetYields();
2874  }
2875  if (PrintGCDetails) {
2876    gclog_or_tty->gclog_stamp(_gc_id);
2877    gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2878      _collector->cmsGen()->short_name(), _phase);
2879  }
2880  _collector->resetTimer();
2881  _wallclock.start();
2882  _collector->startTimer();
2883}
2884
2885CMSPhaseAccounting::~CMSPhaseAccounting() {
2886  assert(_wallclock.is_active(), "Wall clock should not have stopped");
2887  _collector->stopTimer();
2888  _wallclock.stop();
2889  if (PrintGCDetails) {
2890    gclog_or_tty->gclog_stamp(_gc_id);
2891    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2892                 _collector->cmsGen()->short_name(),
2893                 _phase, _collector->timerValue(), _wallclock.seconds());
2894    if (_print_cr) {
2895      gclog_or_tty->cr();
2896    }
2897    if (PrintCMSStatistics != 0) {
2898      gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2899                    _collector->yields());
2900    }
2901  }
2902}
2903
2904// CMS work
2905
2906// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2907class CMSParMarkTask : public AbstractGangTask {
2908 protected:
2909  CMSCollector*     _collector;
2910  uint              _n_workers;
2911  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2912      AbstractGangTask(name),
2913      _collector(collector),
2914      _n_workers(n_workers) {}
2915  // Work method in support of parallel rescan ... of young gen spaces
2916  void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2917                             ContiguousSpace* space,
2918                             HeapWord** chunk_array, size_t chunk_top);
2919  void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2920};
2921
2922// Parallel initial mark task
2923class CMSParInitialMarkTask: public CMSParMarkTask {
2924  StrongRootsScope* _strong_roots_scope;
2925 public:
2926  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2927      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2928      _strong_roots_scope(strong_roots_scope) {}
2929  void work(uint worker_id);
2930};
2931
2932// Checkpoint the roots into this generation from outside
2933// this generation. [Note this initial checkpoint need only
2934// be approximate -- we'll do a catch up phase subsequently.]
2935void CMSCollector::checkpointRootsInitial() {
2936  assert(_collectorState == InitialMarking, "Wrong collector state");
2937  check_correct_thread_executing();
2938  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2939
2940  save_heap_summary();
2941  report_heap_summary(GCWhen::BeforeGC);
2942
2943  ReferenceProcessor* rp = ref_processor();
2944  assert(_restart_addr == NULL, "Control point invariant");
2945  {
2946    // acquire locks for subsequent manipulations
2947    MutexLockerEx x(bitMapLock(),
2948                    Mutex::_no_safepoint_check_flag);
2949    checkpointRootsInitialWork();
2950    // enable ("weak") refs discovery
2951    rp->enable_discovery();
2952    _collectorState = Marking;
2953  }
2954}
2955
2956void CMSCollector::checkpointRootsInitialWork() {
2957  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2958  assert(_collectorState == InitialMarking, "just checking");
2959
2960  // If there has not been a GC[n-1] since last GC[n] cycle completed,
2961  // precede our marking with a collection of all
2962  // younger generations to keep floating garbage to a minimum.
2963  // XXX: we won't do this for now -- it's an optimization to be done later.
2964
2965  // already have locks
2966  assert_lock_strong(bitMapLock());
2967  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2968
2969  // Setup the verification and class unloading state for this
2970  // CMS collection cycle.
2971  setup_cms_unloading_and_verification_state();
2972
2973  NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2974    PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
2975
2976  // Reset all the PLAB chunk arrays if necessary.
2977  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2978    reset_survivor_plab_arrays();
2979  }
2980
2981  ResourceMark rm;
2982  HandleMark  hm;
2983
2984  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2985  GenCollectedHeap* gch = GenCollectedHeap::heap();
2986
2987  verify_work_stacks_empty();
2988  verify_overflow_empty();
2989
2990  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2991  // Update the saved marks which may affect the root scans.
2992  gch->save_marks();
2993
2994  // weak reference processing has not started yet.
2995  ref_processor()->set_enqueuing_is_done(false);
2996
2997  // Need to remember all newly created CLDs,
2998  // so that we can guarantee that the remark finds them.
2999  ClassLoaderDataGraph::remember_new_clds(true);
3000
3001  // Whenever a CLD is found, it will be claimed before proceeding to mark
3002  // the klasses. The claimed marks need to be cleared before marking starts.
3003  ClassLoaderDataGraph::clear_claimed_marks();
3004
3005  if (CMSPrintEdenSurvivorChunks) {
3006    print_eden_and_survivor_chunk_arrays();
3007  }
3008
3009  {
3010    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3011    if (CMSParallelInitialMarkEnabled) {
3012      // The parallel version.
3013      FlexibleWorkGang* workers = gch->workers();
3014      assert(workers != NULL, "Need parallel worker threads.");
3015      uint n_workers = workers->active_workers();
3016
3017      StrongRootsScope srs(n_workers);
3018
3019      CMSParInitialMarkTask tsk(this, &srs, n_workers);
3020      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3021      if (n_workers > 1) {
3022        workers->run_task(&tsk);
3023      } else {
3024        tsk.work(0);
3025      }
3026    } else {
3027      // The serial version.
3028      CLDToOopClosure cld_closure(&notOlder, true);
3029      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3030
3031      StrongRootsScope srs(1);
3032
3033      gch->gen_process_roots(&srs,
3034                             _cmsGen->level(),
3035                             true,   // younger gens are roots
3036                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
3037                             should_unload_classes(),
3038                             &notOlder,
3039                             NULL,
3040                             &cld_closure);
3041    }
3042  }
3043
3044  // Clear mod-union table; it will be dirtied in the prologue of
3045  // CMS generation per each younger generation collection.
3046
3047  assert(_modUnionTable.isAllClear(),
3048       "Was cleared in most recent final checkpoint phase"
3049       " or no bits are set in the gc_prologue before the start of the next "
3050       "subsequent marking phase.");
3051
3052  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3053
3054  // Save the end of the used_region of the constituent generations
3055  // to be used to limit the extent of sweep in each generation.
3056  save_sweep_limits();
3057  verify_overflow_empty();
3058}
3059
3060bool CMSCollector::markFromRoots() {
3061  // we might be tempted to assert that:
3062  // assert(!SafepointSynchronize::is_at_safepoint(),
3063  //        "inconsistent argument?");
3064  // However that wouldn't be right, because it's possible that
3065  // a safepoint is indeed in progress as a younger generation
3066  // stop-the-world GC happens even as we mark in this generation.
3067  assert(_collectorState == Marking, "inconsistent state?");
3068  check_correct_thread_executing();
3069  verify_overflow_empty();
3070
3071  // Weak ref discovery note: We may be discovering weak
3072  // refs in this generation concurrent (but interleaved) with
3073  // weak ref discovery by a younger generation collector.
3074
3075  CMSTokenSyncWithLocks ts(true, bitMapLock());
3076  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3077  CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3078  bool res = markFromRootsWork();
3079  if (res) {
3080    _collectorState = Precleaning;
3081  } else { // We failed and a foreground collection wants to take over
3082    assert(_foregroundGCIsActive, "internal state inconsistency");
3083    assert(_restart_addr == NULL,  "foreground will restart from scratch");
3084    if (PrintGCDetails) {
3085      gclog_or_tty->print_cr("bailing out to foreground collection");
3086    }
3087  }
3088  verify_overflow_empty();
3089  return res;
3090}
3091
3092bool CMSCollector::markFromRootsWork() {
3093  // iterate over marked bits in bit map, doing a full scan and mark
3094  // from these roots using the following algorithm:
3095  // . if oop is to the right of the current scan pointer,
3096  //   mark corresponding bit (we'll process it later)
3097  // . else (oop is to left of current scan pointer)
3098  //   push oop on marking stack
3099  // . drain the marking stack
3100
3101  // Note that when we do a marking step we need to hold the
3102  // bit map lock -- recall that direct allocation (by mutators)
3103  // and promotion (by younger generation collectors) is also
3104  // marking the bit map. [the so-called allocate live policy.]
3105  // Because the implementation of bit map marking is not
3106  // robust wrt simultaneous marking of bits in the same word,
3107  // we need to make sure that there is no such interference
3108  // between concurrent such updates.
3109
3110  // already have locks
3111  assert_lock_strong(bitMapLock());
3112
3113  verify_work_stacks_empty();
3114  verify_overflow_empty();
3115  bool result = false;
3116  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3117    result = do_marking_mt();
3118  } else {
3119    result = do_marking_st();
3120  }
3121  return result;
3122}
3123
3124// Forward decl
3125class CMSConcMarkingTask;
3126
3127class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3128  CMSCollector*       _collector;
3129  CMSConcMarkingTask* _task;
3130 public:
3131  virtual void yield();
3132
3133  // "n_threads" is the number of threads to be terminated.
3134  // "queue_set" is a set of work queues of other threads.
3135  // "collector" is the CMS collector associated with this task terminator.
3136  // "yield" indicates whether we need the gang as a whole to yield.
3137  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3138    ParallelTaskTerminator(n_threads, queue_set),
3139    _collector(collector) { }
3140
3141  void set_task(CMSConcMarkingTask* task) {
3142    _task = task;
3143  }
3144};
3145
3146class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3147  CMSConcMarkingTask* _task;
3148 public:
3149  bool should_exit_termination();
3150  void set_task(CMSConcMarkingTask* task) {
3151    _task = task;
3152  }
3153};
3154
3155// MT Concurrent Marking Task
3156class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3157  CMSCollector* _collector;
3158  uint          _n_workers;       // requested/desired # workers
3159  bool          _result;
3160  CompactibleFreeListSpace*  _cms_space;
3161  char          _pad_front[64];   // padding to ...
3162  HeapWord*     _global_finger;   // ... avoid sharing cache line
3163  char          _pad_back[64];
3164  HeapWord*     _restart_addr;
3165
3166  //  Exposed here for yielding support
3167  Mutex* const _bit_map_lock;
3168
3169  // The per thread work queues, available here for stealing
3170  OopTaskQueueSet*  _task_queues;
3171
3172  // Termination (and yielding) support
3173  CMSConcMarkingTerminator _term;
3174  CMSConcMarkingTerminatorTerminator _term_term;
3175
3176 public:
3177  CMSConcMarkingTask(CMSCollector* collector,
3178                 CompactibleFreeListSpace* cms_space,
3179                 YieldingFlexibleWorkGang* workers,
3180                 OopTaskQueueSet* task_queues):
3181    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3182    _collector(collector),
3183    _cms_space(cms_space),
3184    _n_workers(0), _result(true),
3185    _task_queues(task_queues),
3186    _term(_n_workers, task_queues, _collector),
3187    _bit_map_lock(collector->bitMapLock())
3188  {
3189    _requested_size = _n_workers;
3190    _term.set_task(this);
3191    _term_term.set_task(this);
3192    _restart_addr = _global_finger = _cms_space->bottom();
3193  }
3194
3195
3196  OopTaskQueueSet* task_queues()  { return _task_queues; }
3197
3198  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3199
3200  HeapWord** global_finger_addr() { return &_global_finger; }
3201
3202  CMSConcMarkingTerminator* terminator() { return &_term; }
3203
3204  virtual void set_for_termination(uint active_workers) {
3205    terminator()->reset_for_reuse(active_workers);
3206  }
3207
3208  void work(uint worker_id);
3209  bool should_yield() {
3210    return    ConcurrentMarkSweepThread::should_yield()
3211           && !_collector->foregroundGCIsActive();
3212  }
3213
3214  virtual void coordinator_yield();  // stuff done by coordinator
3215  bool result() { return _result; }
3216
3217  void reset(HeapWord* ra) {
3218    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3219    _restart_addr = _global_finger = ra;
3220    _term.reset_for_reuse();
3221  }
3222
3223  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3224                                           OopTaskQueue* work_q);
3225
3226 private:
3227  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3228  void do_work_steal(int i);
3229  void bump_global_finger(HeapWord* f);
3230};
3231
3232bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3233  assert(_task != NULL, "Error");
3234  return _task->yielding();
3235  // Note that we do not need the disjunct || _task->should_yield() above
3236  // because we want terminating threads to yield only if the task
3237  // is already in the midst of yielding, which happens only after at least one
3238  // thread has yielded.
3239}
3240
3241void CMSConcMarkingTerminator::yield() {
3242  if (_task->should_yield()) {
3243    _task->yield();
3244  } else {
3245    ParallelTaskTerminator::yield();
3246  }
3247}
3248
3249////////////////////////////////////////////////////////////////
3250// Concurrent Marking Algorithm Sketch
3251////////////////////////////////////////////////////////////////
3252// Until all tasks exhausted (both spaces):
3253// -- claim next available chunk
3254// -- bump global finger via CAS
3255// -- find first object that starts in this chunk
3256//    and start scanning bitmap from that position
3257// -- scan marked objects for oops
3258// -- CAS-mark target, and if successful:
3259//    . if target oop is above global finger (volatile read)
3260//      nothing to do
3261//    . if target oop is in chunk and above local finger
3262//        then nothing to do
3263//    . else push on work-queue
3264// -- Deal with possible overflow issues:
3265//    . local work-queue overflow causes stuff to be pushed on
3266//      global (common) overflow queue
3267//    . always first empty local work queue
3268//    . then get a batch of oops from global work queue if any
3269//    . then do work stealing
3270// -- When all tasks claimed (both spaces)
3271//    and local work queue empty,
3272//    then in a loop do:
3273//    . check global overflow stack; steal a batch of oops and trace
3274//    . try to steal from other threads oif GOS is empty
3275//    . if neither is available, offer termination
3276// -- Terminate and return result
3277//
3278void CMSConcMarkingTask::work(uint worker_id) {
3279  elapsedTimer _timer;
3280  ResourceMark rm;
3281  HandleMark hm;
3282
3283  DEBUG_ONLY(_collector->verify_overflow_empty();)
3284
3285  // Before we begin work, our work queue should be empty
3286  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3287  // Scan the bitmap covering _cms_space, tracing through grey objects.
3288  _timer.start();
3289  do_scan_and_mark(worker_id, _cms_space);
3290  _timer.stop();
3291  if (PrintCMSStatistics != 0) {
3292    gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3293      worker_id, _timer.seconds());
3294      // XXX: need xxx/xxx type of notation, two timers
3295  }
3296
3297  // ... do work stealing
3298  _timer.reset();
3299  _timer.start();
3300  do_work_steal(worker_id);
3301  _timer.stop();
3302  if (PrintCMSStatistics != 0) {
3303    gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3304      worker_id, _timer.seconds());
3305      // XXX: need xxx/xxx type of notation, two timers
3306  }
3307  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3308  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3309  // Note that under the current task protocol, the
3310  // following assertion is true even of the spaces
3311  // expanded since the completion of the concurrent
3312  // marking. XXX This will likely change under a strict
3313  // ABORT semantics.
3314  // After perm removal the comparison was changed to
3315  // greater than or equal to from strictly greater than.
3316  // Before perm removal the highest address sweep would
3317  // have been at the end of perm gen but now is at the
3318  // end of the tenured gen.
3319  assert(_global_finger >=  _cms_space->end(),
3320         "All tasks have been completed");
3321  DEBUG_ONLY(_collector->verify_overflow_empty();)
3322}
3323
3324void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3325  HeapWord* read = _global_finger;
3326  HeapWord* cur  = read;
3327  while (f > read) {
3328    cur = read;
3329    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3330    if (cur == read) {
3331      // our cas succeeded
3332      assert(_global_finger >= f, "protocol consistency");
3333      break;
3334    }
3335  }
3336}
3337
3338// This is really inefficient, and should be redone by
3339// using (not yet available) block-read and -write interfaces to the
3340// stack and the work_queue. XXX FIX ME !!!
3341bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3342                                                      OopTaskQueue* work_q) {
3343  // Fast lock-free check
3344  if (ovflw_stk->length() == 0) {
3345    return false;
3346  }
3347  assert(work_q->size() == 0, "Shouldn't steal");
3348  MutexLockerEx ml(ovflw_stk->par_lock(),
3349                   Mutex::_no_safepoint_check_flag);
3350  // Grab up to 1/4 the size of the work queue
3351  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3352                    (size_t)ParGCDesiredObjsFromOverflowList);
3353  num = MIN2(num, ovflw_stk->length());
3354  for (int i = (int) num; i > 0; i--) {
3355    oop cur = ovflw_stk->pop();
3356    assert(cur != NULL, "Counted wrong?");
3357    work_q->push(cur);
3358  }
3359  return num > 0;
3360}
3361
3362void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3363  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3364  int n_tasks = pst->n_tasks();
3365  // We allow that there may be no tasks to do here because
3366  // we are restarting after a stack overflow.
3367  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3368  uint nth_task = 0;
3369
3370  HeapWord* aligned_start = sp->bottom();
3371  if (sp->used_region().contains(_restart_addr)) {
3372    // Align down to a card boundary for the start of 0th task
3373    // for this space.
3374    aligned_start =
3375      (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3376                                 CardTableModRefBS::card_size);
3377  }
3378
3379  size_t chunk_size = sp->marking_task_size();
3380  while (!pst->is_task_claimed(/* reference */ nth_task)) {
3381    // Having claimed the nth task in this space,
3382    // compute the chunk that it corresponds to:
3383    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3384                               aligned_start + (nth_task+1)*chunk_size);
3385    // Try and bump the global finger via a CAS;
3386    // note that we need to do the global finger bump
3387    // _before_ taking the intersection below, because
3388    // the task corresponding to that region will be
3389    // deemed done even if the used_region() expands
3390    // because of allocation -- as it almost certainly will
3391    // during start-up while the threads yield in the
3392    // closure below.
3393    HeapWord* finger = span.end();
3394    bump_global_finger(finger);   // atomically
3395    // There are null tasks here corresponding to chunks
3396    // beyond the "top" address of the space.
3397    span = span.intersection(sp->used_region());
3398    if (!span.is_empty()) {  // Non-null task
3399      HeapWord* prev_obj;
3400      assert(!span.contains(_restart_addr) || nth_task == 0,
3401             "Inconsistency");
3402      if (nth_task == 0) {
3403        // For the 0th task, we'll not need to compute a block_start.
3404        if (span.contains(_restart_addr)) {
3405          // In the case of a restart because of stack overflow,
3406          // we might additionally skip a chunk prefix.
3407          prev_obj = _restart_addr;
3408        } else {
3409          prev_obj = span.start();
3410        }
3411      } else {
3412        // We want to skip the first object because
3413        // the protocol is to scan any object in its entirety
3414        // that _starts_ in this span; a fortiori, any
3415        // object starting in an earlier span is scanned
3416        // as part of an earlier claimed task.
3417        // Below we use the "careful" version of block_start
3418        // so we do not try to navigate uninitialized objects.
3419        prev_obj = sp->block_start_careful(span.start());
3420        // Below we use a variant of block_size that uses the
3421        // Printezis bits to avoid waiting for allocated
3422        // objects to become initialized/parsable.
3423        while (prev_obj < span.start()) {
3424          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3425          if (sz > 0) {
3426            prev_obj += sz;
3427          } else {
3428            // In this case we may end up doing a bit of redundant
3429            // scanning, but that appears unavoidable, short of
3430            // locking the free list locks; see bug 6324141.
3431            break;
3432          }
3433        }
3434      }
3435      if (prev_obj < span.end()) {
3436        MemRegion my_span = MemRegion(prev_obj, span.end());
3437        // Do the marking work within a non-empty span --
3438        // the last argument to the constructor indicates whether the
3439        // iteration should be incremental with periodic yields.
3440        Par_MarkFromRootsClosure cl(this, _collector, my_span,
3441                                    &_collector->_markBitMap,
3442                                    work_queue(i),
3443                                    &_collector->_markStack);
3444        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3445      } // else nothing to do for this task
3446    }   // else nothing to do for this task
3447  }
3448  // We'd be tempted to assert here that since there are no
3449  // more tasks left to claim in this space, the global_finger
3450  // must exceed space->top() and a fortiori space->end(). However,
3451  // that would not quite be correct because the bumping of
3452  // global_finger occurs strictly after the claiming of a task,
3453  // so by the time we reach here the global finger may not yet
3454  // have been bumped up by the thread that claimed the last
3455  // task.
3456  pst->all_tasks_completed();
3457}
3458
3459class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
3460 private:
3461  CMSCollector* _collector;
3462  CMSConcMarkingTask* _task;
3463  MemRegion     _span;
3464  CMSBitMap*    _bit_map;
3465  CMSMarkStack* _overflow_stack;
3466  OopTaskQueue* _work_queue;
3467 protected:
3468  DO_OOP_WORK_DEFN
3469 public:
3470  Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3471                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3472    MetadataAwareOopClosure(collector->ref_processor()),
3473    _collector(collector),
3474    _task(task),
3475    _span(collector->_span),
3476    _work_queue(work_queue),
3477    _bit_map(bit_map),
3478    _overflow_stack(overflow_stack)
3479  { }
3480  virtual void do_oop(oop* p);
3481  virtual void do_oop(narrowOop* p);
3482
3483  void trim_queue(size_t max);
3484  void handle_stack_overflow(HeapWord* lost);
3485  void do_yield_check() {
3486    if (_task->should_yield()) {
3487      _task->yield();
3488    }
3489  }
3490};
3491
3492// Grey object scanning during work stealing phase --
3493// the salient assumption here is that any references
3494// that are in these stolen objects being scanned must
3495// already have been initialized (else they would not have
3496// been published), so we do not need to check for
3497// uninitialized objects before pushing here.
3498void Par_ConcMarkingClosure::do_oop(oop obj) {
3499  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
3500  HeapWord* addr = (HeapWord*)obj;
3501  // Check if oop points into the CMS generation
3502  // and is not marked
3503  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3504    // a white object ...
3505    // If we manage to "claim" the object, by being the
3506    // first thread to mark it, then we push it on our
3507    // marking stack
3508    if (_bit_map->par_mark(addr)) {     // ... now grey
3509      // push on work queue (grey set)
3510      bool simulate_overflow = false;
3511      NOT_PRODUCT(
3512        if (CMSMarkStackOverflowALot &&
3513            _collector->simulate_overflow()) {
3514          // simulate a stack overflow
3515          simulate_overflow = true;
3516        }
3517      )
3518      if (simulate_overflow ||
3519          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3520        // stack overflow
3521        if (PrintCMSStatistics != 0) {
3522          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3523                                 SIZE_FORMAT, _overflow_stack->capacity());
3524        }
3525        // We cannot assert that the overflow stack is full because
3526        // it may have been emptied since.
3527        assert(simulate_overflow ||
3528               _work_queue->size() == _work_queue->max_elems(),
3529              "Else push should have succeeded");
3530        handle_stack_overflow(addr);
3531      }
3532    } // Else, some other thread got there first
3533    do_yield_check();
3534  }
3535}
3536
3537void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3538void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3539
3540void Par_ConcMarkingClosure::trim_queue(size_t max) {
3541  while (_work_queue->size() > max) {
3542    oop new_oop;
3543    if (_work_queue->pop_local(new_oop)) {
3544      assert(new_oop->is_oop(), "Should be an oop");
3545      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3546      assert(_span.contains((HeapWord*)new_oop), "Not in span");
3547      new_oop->oop_iterate(this);  // do_oop() above
3548      do_yield_check();
3549    }
3550  }
3551}
3552
3553// Upon stack overflow, we discard (part of) the stack,
3554// remembering the least address amongst those discarded
3555// in CMSCollector's _restart_address.
3556void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3557  // We need to do this under a mutex to prevent other
3558  // workers from interfering with the work done below.
3559  MutexLockerEx ml(_overflow_stack->par_lock(),
3560                   Mutex::_no_safepoint_check_flag);
3561  // Remember the least grey address discarded
3562  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3563  _collector->lower_restart_addr(ra);
3564  _overflow_stack->reset();  // discard stack contents
3565  _overflow_stack->expand(); // expand the stack if possible
3566}
3567
3568
3569void CMSConcMarkingTask::do_work_steal(int i) {
3570  OopTaskQueue* work_q = work_queue(i);
3571  oop obj_to_scan;
3572  CMSBitMap* bm = &(_collector->_markBitMap);
3573  CMSMarkStack* ovflw = &(_collector->_markStack);
3574  int* seed = _collector->hash_seed(i);
3575  Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3576  while (true) {
3577    cl.trim_queue(0);
3578    assert(work_q->size() == 0, "Should have been emptied above");
3579    if (get_work_from_overflow_stack(ovflw, work_q)) {
3580      // Can't assert below because the work obtained from the
3581      // overflow stack may already have been stolen from us.
3582      // assert(work_q->size() > 0, "Work from overflow stack");
3583      continue;
3584    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3585      assert(obj_to_scan->is_oop(), "Should be an oop");
3586      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3587      obj_to_scan->oop_iterate(&cl);
3588    } else if (terminator()->offer_termination(&_term_term)) {
3589      assert(work_q->size() == 0, "Impossible!");
3590      break;
3591    } else if (yielding() || should_yield()) {
3592      yield();
3593    }
3594  }
3595}
3596
3597// This is run by the CMS (coordinator) thread.
3598void CMSConcMarkingTask::coordinator_yield() {
3599  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3600         "CMS thread should hold CMS token");
3601  // First give up the locks, then yield, then re-lock
3602  // We should probably use a constructor/destructor idiom to
3603  // do this unlock/lock or modify the MutexUnlocker class to
3604  // serve our purpose. XXX
3605  assert_lock_strong(_bit_map_lock);
3606  _bit_map_lock->unlock();
3607  ConcurrentMarkSweepThread::desynchronize(true);
3608  _collector->stopTimer();
3609  if (PrintCMSStatistics != 0) {
3610    _collector->incrementYields();
3611  }
3612
3613  // It is possible for whichever thread initiated the yield request
3614  // not to get a chance to wake up and take the bitmap lock between
3615  // this thread releasing it and reacquiring it. So, while the
3616  // should_yield() flag is on, let's sleep for a bit to give the
3617  // other thread a chance to wake up. The limit imposed on the number
3618  // of iterations is defensive, to avoid any unforseen circumstances
3619  // putting us into an infinite loop. Since it's always been this
3620  // (coordinator_yield()) method that was observed to cause the
3621  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3622  // which is by default non-zero. For the other seven methods that
3623  // also perform the yield operation, as are using a different
3624  // parameter (CMSYieldSleepCount) which is by default zero. This way we
3625  // can enable the sleeping for those methods too, if necessary.
3626  // See 6442774.
3627  //
3628  // We really need to reconsider the synchronization between the GC
3629  // thread and the yield-requesting threads in the future and we
3630  // should really use wait/notify, which is the recommended
3631  // way of doing this type of interaction. Additionally, we should
3632  // consolidate the eight methods that do the yield operation and they
3633  // are almost identical into one for better maintainability and
3634  // readability. See 6445193.
3635  //
3636  // Tony 2006.06.29
3637  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3638                   ConcurrentMarkSweepThread::should_yield() &&
3639                   !CMSCollector::foregroundGCIsActive(); ++i) {
3640    os::sleep(Thread::current(), 1, false);
3641  }
3642
3643  ConcurrentMarkSweepThread::synchronize(true);
3644  _bit_map_lock->lock_without_safepoint_check();
3645  _collector->startTimer();
3646}
3647
3648bool CMSCollector::do_marking_mt() {
3649  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3650  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3651                                                                  conc_workers()->active_workers(),
3652                                                                  Threads::number_of_non_daemon_threads());
3653  conc_workers()->set_active_workers(num_workers);
3654
3655  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3656
3657  CMSConcMarkingTask tsk(this,
3658                         cms_space,
3659                         conc_workers(),
3660                         task_queues());
3661
3662  // Since the actual number of workers we get may be different
3663  // from the number we requested above, do we need to do anything different
3664  // below? In particular, may be we need to subclass the SequantialSubTasksDone
3665  // class?? XXX
3666  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3667
3668  // Refs discovery is already non-atomic.
3669  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3670  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3671  conc_workers()->start_task(&tsk);
3672  while (tsk.yielded()) {
3673    tsk.coordinator_yield();
3674    conc_workers()->continue_task(&tsk);
3675  }
3676  // If the task was aborted, _restart_addr will be non-NULL
3677  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3678  while (_restart_addr != NULL) {
3679    // XXX For now we do not make use of ABORTED state and have not
3680    // yet implemented the right abort semantics (even in the original
3681    // single-threaded CMS case). That needs some more investigation
3682    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3683    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3684    // If _restart_addr is non-NULL, a marking stack overflow
3685    // occurred; we need to do a fresh marking iteration from the
3686    // indicated restart address.
3687    if (_foregroundGCIsActive) {
3688      // We may be running into repeated stack overflows, having
3689      // reached the limit of the stack size, while making very
3690      // slow forward progress. It may be best to bail out and
3691      // let the foreground collector do its job.
3692      // Clear _restart_addr, so that foreground GC
3693      // works from scratch. This avoids the headache of
3694      // a "rescan" which would otherwise be needed because
3695      // of the dirty mod union table & card table.
3696      _restart_addr = NULL;
3697      return false;
3698    }
3699    // Adjust the task to restart from _restart_addr
3700    tsk.reset(_restart_addr);
3701    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3702                  _restart_addr);
3703    _restart_addr = NULL;
3704    // Get the workers going again
3705    conc_workers()->start_task(&tsk);
3706    while (tsk.yielded()) {
3707      tsk.coordinator_yield();
3708      conc_workers()->continue_task(&tsk);
3709    }
3710  }
3711  assert(tsk.completed(), "Inconsistency");
3712  assert(tsk.result() == true, "Inconsistency");
3713  return true;
3714}
3715
3716bool CMSCollector::do_marking_st() {
3717  ResourceMark rm;
3718  HandleMark   hm;
3719
3720  // Temporarily make refs discovery single threaded (non-MT)
3721  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3722  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3723    &_markStack, CMSYield);
3724  // the last argument to iterate indicates whether the iteration
3725  // should be incremental with periodic yields.
3726  _markBitMap.iterate(&markFromRootsClosure);
3727  // If _restart_addr is non-NULL, a marking stack overflow
3728  // occurred; we need to do a fresh iteration from the
3729  // indicated restart address.
3730  while (_restart_addr != NULL) {
3731    if (_foregroundGCIsActive) {
3732      // We may be running into repeated stack overflows, having
3733      // reached the limit of the stack size, while making very
3734      // slow forward progress. It may be best to bail out and
3735      // let the foreground collector do its job.
3736      // Clear _restart_addr, so that foreground GC
3737      // works from scratch. This avoids the headache of
3738      // a "rescan" which would otherwise be needed because
3739      // of the dirty mod union table & card table.
3740      _restart_addr = NULL;
3741      return false;  // indicating failure to complete marking
3742    }
3743    // Deal with stack overflow:
3744    // we restart marking from _restart_addr
3745    HeapWord* ra = _restart_addr;
3746    markFromRootsClosure.reset(ra);
3747    _restart_addr = NULL;
3748    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3749  }
3750  return true;
3751}
3752
3753void CMSCollector::preclean() {
3754  check_correct_thread_executing();
3755  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3756  verify_work_stacks_empty();
3757  verify_overflow_empty();
3758  _abort_preclean = false;
3759  if (CMSPrecleaningEnabled) {
3760    if (!CMSEdenChunksRecordAlways) {
3761      _eden_chunk_index = 0;
3762    }
3763    size_t used = get_eden_used();
3764    size_t capacity = get_eden_capacity();
3765    // Don't start sampling unless we will get sufficiently
3766    // many samples.
3767    if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3768                * CMSScheduleRemarkEdenPenetration)) {
3769      _start_sampling = true;
3770    } else {
3771      _start_sampling = false;
3772    }
3773    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3774    CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3775    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3776  }
3777  CMSTokenSync x(true); // is cms thread
3778  if (CMSPrecleaningEnabled) {
3779    sample_eden();
3780    _collectorState = AbortablePreclean;
3781  } else {
3782    _collectorState = FinalMarking;
3783  }
3784  verify_work_stacks_empty();
3785  verify_overflow_empty();
3786}
3787
3788// Try and schedule the remark such that young gen
3789// occupancy is CMSScheduleRemarkEdenPenetration %.
3790void CMSCollector::abortable_preclean() {
3791  check_correct_thread_executing();
3792  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3793  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3794
3795  // If Eden's current occupancy is below this threshold,
3796  // immediately schedule the remark; else preclean
3797  // past the next scavenge in an effort to
3798  // schedule the pause as described above. By choosing
3799  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3800  // we will never do an actual abortable preclean cycle.
3801  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3802    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3803    CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3804    // We need more smarts in the abortable preclean
3805    // loop below to deal with cases where allocation
3806    // in young gen is very very slow, and our precleaning
3807    // is running a losing race against a horde of
3808    // mutators intent on flooding us with CMS updates
3809    // (dirty cards).
3810    // One, admittedly dumb, strategy is to give up
3811    // after a certain number of abortable precleaning loops
3812    // or after a certain maximum time. We want to make
3813    // this smarter in the next iteration.
3814    // XXX FIX ME!!! YSR
3815    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3816    while (!(should_abort_preclean() ||
3817             ConcurrentMarkSweepThread::should_terminate())) {
3818      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3819      cumworkdone += workdone;
3820      loops++;
3821      // Voluntarily terminate abortable preclean phase if we have
3822      // been at it for too long.
3823      if ((CMSMaxAbortablePrecleanLoops != 0) &&
3824          loops >= CMSMaxAbortablePrecleanLoops) {
3825        if (PrintGCDetails) {
3826          gclog_or_tty->print(" CMS: abort preclean due to loops ");
3827        }
3828        break;
3829      }
3830      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3831        if (PrintGCDetails) {
3832          gclog_or_tty->print(" CMS: abort preclean due to time ");
3833        }
3834        break;
3835      }
3836      // If we are doing little work each iteration, we should
3837      // take a short break.
3838      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3839        // Sleep for some time, waiting for work to accumulate
3840        stopTimer();
3841        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3842        startTimer();
3843        waited++;
3844      }
3845    }
3846    if (PrintCMSStatistics > 0) {
3847      gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3848                          loops, waited, cumworkdone);
3849    }
3850  }
3851  CMSTokenSync x(true); // is cms thread
3852  if (_collectorState != Idling) {
3853    assert(_collectorState == AbortablePreclean,
3854           "Spontaneous state transition?");
3855    _collectorState = FinalMarking;
3856  } // Else, a foreground collection completed this CMS cycle.
3857  return;
3858}
3859
3860// Respond to an Eden sampling opportunity
3861void CMSCollector::sample_eden() {
3862  // Make sure a young gc cannot sneak in between our
3863  // reading and recording of a sample.
3864  assert(Thread::current()->is_ConcurrentGC_thread(),
3865         "Only the cms thread may collect Eden samples");
3866  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3867         "Should collect samples while holding CMS token");
3868  if (!_start_sampling) {
3869    return;
3870  }
3871  // When CMSEdenChunksRecordAlways is true, the eden chunk array
3872  // is populated by the young generation.
3873  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3874    if (_eden_chunk_index < _eden_chunk_capacity) {
3875      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3876      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3877             "Unexpected state of Eden");
3878      // We'd like to check that what we just sampled is an oop-start address;
3879      // however, we cannot do that here since the object may not yet have been
3880      // initialized. So we'll instead do the check when we _use_ this sample
3881      // later.
3882      if (_eden_chunk_index == 0 ||
3883          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3884                         _eden_chunk_array[_eden_chunk_index-1])
3885           >= CMSSamplingGrain)) {
3886        _eden_chunk_index++;  // commit sample
3887      }
3888    }
3889  }
3890  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3891    size_t used = get_eden_used();
3892    size_t capacity = get_eden_capacity();
3893    assert(used <= capacity, "Unexpected state of Eden");
3894    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3895      _abort_preclean = true;
3896    }
3897  }
3898}
3899
3900
3901size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3902  assert(_collectorState == Precleaning ||
3903         _collectorState == AbortablePreclean, "incorrect state");
3904  ResourceMark rm;
3905  HandleMark   hm;
3906
3907  // Precleaning is currently not MT but the reference processor
3908  // may be set for MT.  Disable it temporarily here.
3909  ReferenceProcessor* rp = ref_processor();
3910  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3911
3912  // Do one pass of scrubbing the discovered reference lists
3913  // to remove any reference objects with strongly-reachable
3914  // referents.
3915  if (clean_refs) {
3916    CMSPrecleanRefsYieldClosure yield_cl(this);
3917    assert(rp->span().equals(_span), "Spans should be equal");
3918    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3919                                   &_markStack, true /* preclean */);
3920    CMSDrainMarkingStackClosure complete_trace(this,
3921                                   _span, &_markBitMap, &_markStack,
3922                                   &keep_alive, true /* preclean */);
3923
3924    // We don't want this step to interfere with a young
3925    // collection because we don't want to take CPU
3926    // or memory bandwidth away from the young GC threads
3927    // (which may be as many as there are CPUs).
3928    // Note that we don't need to protect ourselves from
3929    // interference with mutators because they can't
3930    // manipulate the discovered reference lists nor affect
3931    // the computed reachability of the referents, the
3932    // only properties manipulated by the precleaning
3933    // of these reference lists.
3934    stopTimer();
3935    CMSTokenSyncWithLocks x(true /* is cms thread */,
3936                            bitMapLock());
3937    startTimer();
3938    sample_eden();
3939
3940    // The following will yield to allow foreground
3941    // collection to proceed promptly. XXX YSR:
3942    // The code in this method may need further
3943    // tweaking for better performance and some restructuring
3944    // for cleaner interfaces.
3945    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3946    rp->preclean_discovered_references(
3947          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3948          gc_timer, _gc_tracer_cm->gc_id());
3949  }
3950
3951  if (clean_survivor) {  // preclean the active survivor space(s)
3952    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3953                             &_markBitMap, &_modUnionTable,
3954                             &_markStack, true /* precleaning phase */);
3955    stopTimer();
3956    CMSTokenSyncWithLocks ts(true /* is cms thread */,
3957                             bitMapLock());
3958    startTimer();
3959    unsigned int before_count =
3960      GenCollectedHeap::heap()->total_collections();
3961    SurvivorSpacePrecleanClosure
3962      sss_cl(this, _span, &_markBitMap, &_markStack,
3963             &pam_cl, before_count, CMSYield);
3964    _young_gen->from()->object_iterate_careful(&sss_cl);
3965    _young_gen->to()->object_iterate_careful(&sss_cl);
3966  }
3967  MarkRefsIntoAndScanClosure
3968    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3969             &_markStack, this, CMSYield,
3970             true /* precleaning phase */);
3971  // CAUTION: The following closure has persistent state that may need to
3972  // be reset upon a decrease in the sequence of addresses it
3973  // processes.
3974  ScanMarkedObjectsAgainCarefullyClosure
3975    smoac_cl(this, _span,
3976      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3977
3978  // Preclean dirty cards in ModUnionTable and CardTable using
3979  // appropriate convergence criterion;
3980  // repeat CMSPrecleanIter times unless we find that
3981  // we are losing.
3982  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3983  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3984         "Bad convergence multiplier");
3985  assert(CMSPrecleanThreshold >= 100,
3986         "Unreasonably low CMSPrecleanThreshold");
3987
3988  size_t numIter, cumNumCards, lastNumCards, curNumCards;
3989  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3990       numIter < CMSPrecleanIter;
3991       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3992    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3993    if (Verbose && PrintGCDetails) {
3994      gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3995    }
3996    // Either there are very few dirty cards, so re-mark
3997    // pause will be small anyway, or our pre-cleaning isn't
3998    // that much faster than the rate at which cards are being
3999    // dirtied, so we might as well stop and re-mark since
4000    // precleaning won't improve our re-mark time by much.
4001    if (curNumCards <= CMSPrecleanThreshold ||
4002        (numIter > 0 &&
4003         (curNumCards * CMSPrecleanDenominator >
4004         lastNumCards * CMSPrecleanNumerator))) {
4005      numIter++;
4006      cumNumCards += curNumCards;
4007      break;
4008    }
4009  }
4010
4011  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4012
4013  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4014  cumNumCards += curNumCards;
4015  if (PrintGCDetails && PrintCMSStatistics != 0) {
4016    gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
4017                  curNumCards, cumNumCards, numIter);
4018  }
4019  return cumNumCards;   // as a measure of useful work done
4020}
4021
4022// PRECLEANING NOTES:
4023// Precleaning involves:
4024// . reading the bits of the modUnionTable and clearing the set bits.
4025// . For the cards corresponding to the set bits, we scan the
4026//   objects on those cards. This means we need the free_list_lock
4027//   so that we can safely iterate over the CMS space when scanning
4028//   for oops.
4029// . When we scan the objects, we'll be both reading and setting
4030//   marks in the marking bit map, so we'll need the marking bit map.
4031// . For protecting _collector_state transitions, we take the CGC_lock.
4032//   Note that any races in the reading of of card table entries by the
4033//   CMS thread on the one hand and the clearing of those entries by the
4034//   VM thread or the setting of those entries by the mutator threads on the
4035//   other are quite benign. However, for efficiency it makes sense to keep
4036//   the VM thread from racing with the CMS thread while the latter is
4037//   dirty card info to the modUnionTable. We therefore also use the
4038//   CGC_lock to protect the reading of the card table and the mod union
4039//   table by the CM thread.
4040// . We run concurrently with mutator updates, so scanning
4041//   needs to be done carefully  -- we should not try to scan
4042//   potentially uninitialized objects.
4043//
4044// Locking strategy: While holding the CGC_lock, we scan over and
4045// reset a maximal dirty range of the mod union / card tables, then lock
4046// the free_list_lock and bitmap lock to do a full marking, then
4047// release these locks; and repeat the cycle. This allows for a
4048// certain amount of fairness in the sharing of these locks between
4049// the CMS collector on the one hand, and the VM thread and the
4050// mutators on the other.
4051
4052// NOTE: preclean_mod_union_table() and preclean_card_table()
4053// further below are largely identical; if you need to modify
4054// one of these methods, please check the other method too.
4055
4056size_t CMSCollector::preclean_mod_union_table(
4057  ConcurrentMarkSweepGeneration* gen,
4058  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4059  verify_work_stacks_empty();
4060  verify_overflow_empty();
4061
4062  // strategy: starting with the first card, accumulate contiguous
4063  // ranges of dirty cards; clear these cards, then scan the region
4064  // covered by these cards.
4065
4066  // Since all of the MUT is committed ahead, we can just use
4067  // that, in case the generations expand while we are precleaning.
4068  // It might also be fine to just use the committed part of the
4069  // generation, but we might potentially miss cards when the
4070  // generation is rapidly expanding while we are in the midst
4071  // of precleaning.
4072  HeapWord* startAddr = gen->reserved().start();
4073  HeapWord* endAddr   = gen->reserved().end();
4074
4075  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4076
4077  size_t numDirtyCards, cumNumDirtyCards;
4078  HeapWord *nextAddr, *lastAddr;
4079  for (cumNumDirtyCards = numDirtyCards = 0,
4080       nextAddr = lastAddr = startAddr;
4081       nextAddr < endAddr;
4082       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4083
4084    ResourceMark rm;
4085    HandleMark   hm;
4086
4087    MemRegion dirtyRegion;
4088    {
4089      stopTimer();
4090      // Potential yield point
4091      CMSTokenSync ts(true);
4092      startTimer();
4093      sample_eden();
4094      // Get dirty region starting at nextOffset (inclusive),
4095      // simultaneously clearing it.
4096      dirtyRegion =
4097        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4098      assert(dirtyRegion.start() >= nextAddr,
4099             "returned region inconsistent?");
4100    }
4101    // Remember where the next search should begin.
4102    // The returned region (if non-empty) is a right open interval,
4103    // so lastOffset is obtained from the right end of that
4104    // interval.
4105    lastAddr = dirtyRegion.end();
4106    // Should do something more transparent and less hacky XXX
4107    numDirtyCards =
4108      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4109
4110    // We'll scan the cards in the dirty region (with periodic
4111    // yields for foreground GC as needed).
4112    if (!dirtyRegion.is_empty()) {
4113      assert(numDirtyCards > 0, "consistency check");
4114      HeapWord* stop_point = NULL;
4115      stopTimer();
4116      // Potential yield point
4117      CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4118                               bitMapLock());
4119      startTimer();
4120      {
4121        verify_work_stacks_empty();
4122        verify_overflow_empty();
4123        sample_eden();
4124        stop_point =
4125          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4126      }
4127      if (stop_point != NULL) {
4128        // The careful iteration stopped early either because it found an
4129        // uninitialized object, or because we were in the midst of an
4130        // "abortable preclean", which should now be aborted. Redirty
4131        // the bits corresponding to the partially-scanned or unscanned
4132        // cards. We'll either restart at the next block boundary or
4133        // abort the preclean.
4134        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4135               "Should only be AbortablePreclean.");
4136        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4137        if (should_abort_preclean()) {
4138          break; // out of preclean loop
4139        } else {
4140          // Compute the next address at which preclean should pick up;
4141          // might need bitMapLock in order to read P-bits.
4142          lastAddr = next_card_start_after_block(stop_point);
4143        }
4144      }
4145    } else {
4146      assert(lastAddr == endAddr, "consistency check");
4147      assert(numDirtyCards == 0, "consistency check");
4148      break;
4149    }
4150  }
4151  verify_work_stacks_empty();
4152  verify_overflow_empty();
4153  return cumNumDirtyCards;
4154}
4155
4156// NOTE: preclean_mod_union_table() above and preclean_card_table()
4157// below are largely identical; if you need to modify
4158// one of these methods, please check the other method too.
4159
4160size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4161  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4162  // strategy: it's similar to precleamModUnionTable above, in that
4163  // we accumulate contiguous ranges of dirty cards, mark these cards
4164  // precleaned, then scan the region covered by these cards.
4165  HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4166  HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4167
4168  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4169
4170  size_t numDirtyCards, cumNumDirtyCards;
4171  HeapWord *lastAddr, *nextAddr;
4172
4173  for (cumNumDirtyCards = numDirtyCards = 0,
4174       nextAddr = lastAddr = startAddr;
4175       nextAddr < endAddr;
4176       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4177
4178    ResourceMark rm;
4179    HandleMark   hm;
4180
4181    MemRegion dirtyRegion;
4182    {
4183      // See comments in "Precleaning notes" above on why we
4184      // do this locking. XXX Could the locking overheads be
4185      // too high when dirty cards are sparse? [I don't think so.]
4186      stopTimer();
4187      CMSTokenSync x(true); // is cms thread
4188      startTimer();
4189      sample_eden();
4190      // Get and clear dirty region from card table
4191      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4192                                    MemRegion(nextAddr, endAddr),
4193                                    true,
4194                                    CardTableModRefBS::precleaned_card_val());
4195
4196      assert(dirtyRegion.start() >= nextAddr,
4197             "returned region inconsistent?");
4198    }
4199    lastAddr = dirtyRegion.end();
4200    numDirtyCards =
4201      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4202
4203    if (!dirtyRegion.is_empty()) {
4204      stopTimer();
4205      CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4206      startTimer();
4207      sample_eden();
4208      verify_work_stacks_empty();
4209      verify_overflow_empty();
4210      HeapWord* stop_point =
4211        gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4212      if (stop_point != NULL) {
4213        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4214               "Should only be AbortablePreclean.");
4215        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4216        if (should_abort_preclean()) {
4217          break; // out of preclean loop
4218        } else {
4219          // Compute the next address at which preclean should pick up.
4220          lastAddr = next_card_start_after_block(stop_point);
4221        }
4222      }
4223    } else {
4224      break;
4225    }
4226  }
4227  verify_work_stacks_empty();
4228  verify_overflow_empty();
4229  return cumNumDirtyCards;
4230}
4231
4232class PrecleanKlassClosure : public KlassClosure {
4233  KlassToOopClosure _cm_klass_closure;
4234 public:
4235  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4236  void do_klass(Klass* k) {
4237    if (k->has_accumulated_modified_oops()) {
4238      k->clear_accumulated_modified_oops();
4239
4240      _cm_klass_closure.do_klass(k);
4241    }
4242  }
4243};
4244
4245// The freelist lock is needed to prevent asserts, is it really needed?
4246void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4247
4248  cl->set_freelistLock(freelistLock);
4249
4250  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4251
4252  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4253  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4254  PrecleanKlassClosure preclean_klass_closure(cl);
4255  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4256
4257  verify_work_stacks_empty();
4258  verify_overflow_empty();
4259}
4260
4261void CMSCollector::checkpointRootsFinal() {
4262  assert(_collectorState == FinalMarking, "incorrect state transition?");
4263  check_correct_thread_executing();
4264  // world is stopped at this checkpoint
4265  assert(SafepointSynchronize::is_at_safepoint(),
4266         "world should be stopped");
4267  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4268
4269  verify_work_stacks_empty();
4270  verify_overflow_empty();
4271
4272  if (PrintGCDetails) {
4273    gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4274                        _young_gen->used() / K,
4275                        _young_gen->capacity() / K);
4276  }
4277  {
4278    if (CMSScavengeBeforeRemark) {
4279      GenCollectedHeap* gch = GenCollectedHeap::heap();
4280      // Temporarily set flag to false, GCH->do_collection will
4281      // expect it to be false and set to true
4282      FlagSetting fl(gch->_is_gc_active, false);
4283      NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4284        PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4285      int level = _cmsGen->level() - 1;
4286      if (level >= 0) {
4287        gch->do_collection(true,        // full (i.e. force, see below)
4288                           false,       // !clear_all_soft_refs
4289                           0,           // size
4290                           false,       // is_tlab
4291                           level        // max_level
4292                          );
4293      }
4294    }
4295    FreelistLocker x(this);
4296    MutexLockerEx y(bitMapLock(),
4297                    Mutex::_no_safepoint_check_flag);
4298    checkpointRootsFinalWork();
4299  }
4300  verify_work_stacks_empty();
4301  verify_overflow_empty();
4302}
4303
4304void CMSCollector::checkpointRootsFinalWork() {
4305  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4306
4307  assert(haveFreelistLocks(), "must have free list locks");
4308  assert_lock_strong(bitMapLock());
4309
4310  ResourceMark rm;
4311  HandleMark   hm;
4312
4313  GenCollectedHeap* gch = GenCollectedHeap::heap();
4314
4315  if (should_unload_classes()) {
4316    CodeCache::gc_prologue();
4317  }
4318  assert(haveFreelistLocks(), "must have free list locks");
4319  assert_lock_strong(bitMapLock());
4320
4321  // We might assume that we need not fill TLAB's when
4322  // CMSScavengeBeforeRemark is set, because we may have just done
4323  // a scavenge which would have filled all TLAB's -- and besides
4324  // Eden would be empty. This however may not always be the case --
4325  // for instance although we asked for a scavenge, it may not have
4326  // happened because of a JNI critical section. We probably need
4327  // a policy for deciding whether we can in that case wait until
4328  // the critical section releases and then do the remark following
4329  // the scavenge, and skip it here. In the absence of that policy,
4330  // or of an indication of whether the scavenge did indeed occur,
4331  // we cannot rely on TLAB's having been filled and must do
4332  // so here just in case a scavenge did not happen.
4333  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4334  // Update the saved marks which may affect the root scans.
4335  gch->save_marks();
4336
4337  if (CMSPrintEdenSurvivorChunks) {
4338    print_eden_and_survivor_chunk_arrays();
4339  }
4340
4341  {
4342    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4343
4344    // Note on the role of the mod union table:
4345    // Since the marker in "markFromRoots" marks concurrently with
4346    // mutators, it is possible for some reachable objects not to have been
4347    // scanned. For instance, an only reference to an object A was
4348    // placed in object B after the marker scanned B. Unless B is rescanned,
4349    // A would be collected. Such updates to references in marked objects
4350    // are detected via the mod union table which is the set of all cards
4351    // dirtied since the first checkpoint in this GC cycle and prior to
4352    // the most recent young generation GC, minus those cleaned up by the
4353    // concurrent precleaning.
4354    if (CMSParallelRemarkEnabled) {
4355      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
4356      do_remark_parallel();
4357    } else {
4358      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4359                  _gc_timer_cm, _gc_tracer_cm->gc_id());
4360      do_remark_non_parallel();
4361    }
4362  }
4363  verify_work_stacks_empty();
4364  verify_overflow_empty();
4365
4366  {
4367    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4368    refProcessingWork();
4369  }
4370  verify_work_stacks_empty();
4371  verify_overflow_empty();
4372
4373  if (should_unload_classes()) {
4374    CodeCache::gc_epilogue();
4375  }
4376  JvmtiExport::gc_epilogue();
4377
4378  // If we encountered any (marking stack / work queue) overflow
4379  // events during the current CMS cycle, take appropriate
4380  // remedial measures, where possible, so as to try and avoid
4381  // recurrence of that condition.
4382  assert(_markStack.isEmpty(), "No grey objects");
4383  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4384                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4385  if (ser_ovflw > 0) {
4386    if (PrintCMSStatistics != 0) {
4387      gclog_or_tty->print_cr("Marking stack overflow (benign) "
4388        "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4389        ", kac_preclean="SIZE_FORMAT")",
4390        _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4391        _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4392    }
4393    _markStack.expand();
4394    _ser_pmc_remark_ovflw = 0;
4395    _ser_pmc_preclean_ovflw = 0;
4396    _ser_kac_preclean_ovflw = 0;
4397    _ser_kac_ovflw = 0;
4398  }
4399  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4400    if (PrintCMSStatistics != 0) {
4401      gclog_or_tty->print_cr("Work queue overflow (benign) "
4402        "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4403        _par_pmc_remark_ovflw, _par_kac_ovflw);
4404    }
4405    _par_pmc_remark_ovflw = 0;
4406    _par_kac_ovflw = 0;
4407  }
4408  if (PrintCMSStatistics != 0) {
4409     if (_markStack._hit_limit > 0) {
4410       gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4411                              _markStack._hit_limit);
4412     }
4413     if (_markStack._failed_double > 0) {
4414       gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4415                              " current capacity "SIZE_FORMAT,
4416                              _markStack._failed_double,
4417                              _markStack.capacity());
4418     }
4419  }
4420  _markStack._hit_limit = 0;
4421  _markStack._failed_double = 0;
4422
4423  if ((VerifyAfterGC || VerifyDuringGC) &&
4424      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4425    verify_after_remark();
4426  }
4427
4428  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4429
4430  // Change under the freelistLocks.
4431  _collectorState = Sweeping;
4432  // Call isAllClear() under bitMapLock
4433  assert(_modUnionTable.isAllClear(),
4434      "Should be clear by end of the final marking");
4435  assert(_ct->klass_rem_set()->mod_union_is_clear(),
4436      "Should be clear by end of the final marking");
4437}
4438
4439void CMSParInitialMarkTask::work(uint worker_id) {
4440  elapsedTimer _timer;
4441  ResourceMark rm;
4442  HandleMark   hm;
4443
4444  // ---------- scan from roots --------------
4445  _timer.start();
4446  GenCollectedHeap* gch = GenCollectedHeap::heap();
4447  Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4448
4449  // ---------- young gen roots --------------
4450  {
4451    work_on_young_gen_roots(worker_id, &par_mri_cl);
4452    _timer.stop();
4453    if (PrintCMSStatistics != 0) {
4454      gclog_or_tty->print_cr(
4455        "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4456        worker_id, _timer.seconds());
4457    }
4458  }
4459
4460  // ---------- remaining roots --------------
4461  _timer.reset();
4462  _timer.start();
4463
4464  CLDToOopClosure cld_closure(&par_mri_cl, true);
4465
4466  gch->gen_process_roots(_strong_roots_scope,
4467                         _collector->_cmsGen->level(),
4468                         false,     // yg was scanned above
4469                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4470                         _collector->should_unload_classes(),
4471                         &par_mri_cl,
4472                         NULL,
4473                         &cld_closure);
4474  assert(_collector->should_unload_classes()
4475         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4476         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4477  _timer.stop();
4478  if (PrintCMSStatistics != 0) {
4479    gclog_or_tty->print_cr(
4480      "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4481      worker_id, _timer.seconds());
4482  }
4483}
4484
4485// Parallel remark task
4486class CMSParRemarkTask: public CMSParMarkTask {
4487  CompactibleFreeListSpace* _cms_space;
4488
4489  // The per-thread work queues, available here for stealing.
4490  OopTaskQueueSet*       _task_queues;
4491  ParallelTaskTerminator _term;
4492  StrongRootsScope*      _strong_roots_scope;
4493
4494 public:
4495  // A value of 0 passed to n_workers will cause the number of
4496  // workers to be taken from the active workers in the work gang.
4497  CMSParRemarkTask(CMSCollector* collector,
4498                   CompactibleFreeListSpace* cms_space,
4499                   uint n_workers, FlexibleWorkGang* workers,
4500                   OopTaskQueueSet* task_queues,
4501                   StrongRootsScope* strong_roots_scope):
4502    CMSParMarkTask("Rescan roots and grey objects in parallel",
4503                   collector, n_workers),
4504    _cms_space(cms_space),
4505    _task_queues(task_queues),
4506    _term(n_workers, task_queues),
4507    _strong_roots_scope(strong_roots_scope) { }
4508
4509  OopTaskQueueSet* task_queues() { return _task_queues; }
4510
4511  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4512
4513  ParallelTaskTerminator* terminator() { return &_term; }
4514  uint n_workers() { return _n_workers; }
4515
4516  void work(uint worker_id);
4517
4518 private:
4519  // ... of  dirty cards in old space
4520  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4521                                  Par_MarkRefsIntoAndScanClosure* cl);
4522
4523  // ... work stealing for the above
4524  void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4525};
4526
4527class RemarkKlassClosure : public KlassClosure {
4528  KlassToOopClosure _cm_klass_closure;
4529 public:
4530  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4531  void do_klass(Klass* k) {
4532    // Check if we have modified any oops in the Klass during the concurrent marking.
4533    if (k->has_accumulated_modified_oops()) {
4534      k->clear_accumulated_modified_oops();
4535
4536      // We could have transfered the current modified marks to the accumulated marks,
4537      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4538    } else if (k->has_modified_oops()) {
4539      // Don't clear anything, this info is needed by the next young collection.
4540    } else {
4541      // No modified oops in the Klass.
4542      return;
4543    }
4544
4545    // The klass has modified fields, need to scan the klass.
4546    _cm_klass_closure.do_klass(k);
4547  }
4548};
4549
4550void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4551  ParNewGeneration* young_gen = _collector->_young_gen;
4552  ContiguousSpace* eden_space = young_gen->eden();
4553  ContiguousSpace* from_space = young_gen->from();
4554  ContiguousSpace* to_space   = young_gen->to();
4555
4556  HeapWord** eca = _collector->_eden_chunk_array;
4557  size_t     ect = _collector->_eden_chunk_index;
4558  HeapWord** sca = _collector->_survivor_chunk_array;
4559  size_t     sct = _collector->_survivor_chunk_index;
4560
4561  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4562  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4563
4564  do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4565  do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4566  do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4567}
4568
4569// work_queue(i) is passed to the closure
4570// Par_MarkRefsIntoAndScanClosure.  The "i" parameter
4571// also is passed to do_dirty_card_rescan_tasks() and to
4572// do_work_steal() to select the i-th task_queue.
4573
4574void CMSParRemarkTask::work(uint worker_id) {
4575  elapsedTimer _timer;
4576  ResourceMark rm;
4577  HandleMark   hm;
4578
4579  // ---------- rescan from roots --------------
4580  _timer.start();
4581  GenCollectedHeap* gch = GenCollectedHeap::heap();
4582  Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4583    _collector->_span, _collector->ref_processor(),
4584    &(_collector->_markBitMap),
4585    work_queue(worker_id));
4586
4587  // Rescan young gen roots first since these are likely
4588  // coarsely partitioned and may, on that account, constitute
4589  // the critical path; thus, it's best to start off that
4590  // work first.
4591  // ---------- young gen roots --------------
4592  {
4593    work_on_young_gen_roots(worker_id, &par_mrias_cl);
4594    _timer.stop();
4595    if (PrintCMSStatistics != 0) {
4596      gclog_or_tty->print_cr(
4597        "Finished young gen rescan work in %dth thread: %3.3f sec",
4598        worker_id, _timer.seconds());
4599    }
4600  }
4601
4602  // ---------- remaining roots --------------
4603  _timer.reset();
4604  _timer.start();
4605  gch->gen_process_roots(_strong_roots_scope,
4606                         _collector->_cmsGen->level(),
4607                         false,     // yg was scanned above
4608                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4609                         _collector->should_unload_classes(),
4610                         &par_mrias_cl,
4611                         NULL,
4612                         NULL);     // The dirty klasses will be handled below
4613
4614  assert(_collector->should_unload_classes()
4615         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4616         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4617  _timer.stop();
4618  if (PrintCMSStatistics != 0) {
4619    gclog_or_tty->print_cr(
4620      "Finished remaining root rescan work in %dth thread: %3.3f sec",
4621      worker_id, _timer.seconds());
4622  }
4623
4624  // ---------- unhandled CLD scanning ----------
4625  if (worker_id == 0) { // Single threaded at the moment.
4626    _timer.reset();
4627    _timer.start();
4628
4629    // Scan all new class loader data objects and new dependencies that were
4630    // introduced during concurrent marking.
4631    ResourceMark rm;
4632    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4633    for (int i = 0; i < array->length(); i++) {
4634      par_mrias_cl.do_class_loader_data(array->at(i));
4635    }
4636
4637    // We don't need to keep track of new CLDs anymore.
4638    ClassLoaderDataGraph::remember_new_clds(false);
4639
4640    _timer.stop();
4641    if (PrintCMSStatistics != 0) {
4642      gclog_or_tty->print_cr(
4643          "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4644          worker_id, _timer.seconds());
4645    }
4646  }
4647
4648  // ---------- dirty klass scanning ----------
4649  if (worker_id == 0) { // Single threaded at the moment.
4650    _timer.reset();
4651    _timer.start();
4652
4653    // Scan all classes that was dirtied during the concurrent marking phase.
4654    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4655    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4656
4657    _timer.stop();
4658    if (PrintCMSStatistics != 0) {
4659      gclog_or_tty->print_cr(
4660          "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4661          worker_id, _timer.seconds());
4662    }
4663  }
4664
4665  // We might have added oops to ClassLoaderData::_handles during the
4666  // concurrent marking phase. These oops point to newly allocated objects
4667  // that are guaranteed to be kept alive. Either by the direct allocation
4668  // code, or when the young collector processes the roots. Hence,
4669  // we don't have to revisit the _handles block during the remark phase.
4670
4671  // ---------- rescan dirty cards ------------
4672  _timer.reset();
4673  _timer.start();
4674
4675  // Do the rescan tasks for each of the two spaces
4676  // (cms_space) in turn.
4677  // "worker_id" is passed to select the task_queue for "worker_id"
4678  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4679  _timer.stop();
4680  if (PrintCMSStatistics != 0) {
4681    gclog_or_tty->print_cr(
4682      "Finished dirty card rescan work in %dth thread: %3.3f sec",
4683      worker_id, _timer.seconds());
4684  }
4685
4686  // ---------- steal work from other threads ...
4687  // ---------- ... and drain overflow list.
4688  _timer.reset();
4689  _timer.start();
4690  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4691  _timer.stop();
4692  if (PrintCMSStatistics != 0) {
4693    gclog_or_tty->print_cr(
4694      "Finished work stealing in %dth thread: %3.3f sec",
4695      worker_id, _timer.seconds());
4696  }
4697}
4698
4699// Note that parameter "i" is not used.
4700void
4701CMSParMarkTask::do_young_space_rescan(uint worker_id,
4702  OopsInGenClosure* cl, ContiguousSpace* space,
4703  HeapWord** chunk_array, size_t chunk_top) {
4704  // Until all tasks completed:
4705  // . claim an unclaimed task
4706  // . compute region boundaries corresponding to task claimed
4707  //   using chunk_array
4708  // . par_oop_iterate(cl) over that region
4709
4710  ResourceMark rm;
4711  HandleMark   hm;
4712
4713  SequentialSubTasksDone* pst = space->par_seq_tasks();
4714
4715  uint nth_task = 0;
4716  uint n_tasks  = pst->n_tasks();
4717
4718  if (n_tasks > 0) {
4719    assert(pst->valid(), "Uninitialized use?");
4720    HeapWord *start, *end;
4721    while (!pst->is_task_claimed(/* reference */ nth_task)) {
4722      // We claimed task # nth_task; compute its boundaries.
4723      if (chunk_top == 0) {  // no samples were taken
4724        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4725        start = space->bottom();
4726        end   = space->top();
4727      } else if (nth_task == 0) {
4728        start = space->bottom();
4729        end   = chunk_array[nth_task];
4730      } else if (nth_task < (uint)chunk_top) {
4731        assert(nth_task >= 1, "Control point invariant");
4732        start = chunk_array[nth_task - 1];
4733        end   = chunk_array[nth_task];
4734      } else {
4735        assert(nth_task == (uint)chunk_top, "Control point invariant");
4736        start = chunk_array[chunk_top - 1];
4737        end   = space->top();
4738      }
4739      MemRegion mr(start, end);
4740      // Verify that mr is in space
4741      assert(mr.is_empty() || space->used_region().contains(mr),
4742             "Should be in space");
4743      // Verify that "start" is an object boundary
4744      assert(mr.is_empty() || oop(mr.start())->is_oop(),
4745             "Should be an oop");
4746      space->par_oop_iterate(mr, cl);
4747    }
4748    pst->all_tasks_completed();
4749  }
4750}
4751
4752void
4753CMSParRemarkTask::do_dirty_card_rescan_tasks(
4754  CompactibleFreeListSpace* sp, int i,
4755  Par_MarkRefsIntoAndScanClosure* cl) {
4756  // Until all tasks completed:
4757  // . claim an unclaimed task
4758  // . compute region boundaries corresponding to task claimed
4759  // . transfer dirty bits ct->mut for that region
4760  // . apply rescanclosure to dirty mut bits for that region
4761
4762  ResourceMark rm;
4763  HandleMark   hm;
4764
4765  OopTaskQueue* work_q = work_queue(i);
4766  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4767  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4768  // CAUTION: This closure has state that persists across calls to
4769  // the work method dirty_range_iterate_clear() in that it has
4770  // embedded in it a (subtype of) UpwardsObjectClosure. The
4771  // use of that state in the embedded UpwardsObjectClosure instance
4772  // assumes that the cards are always iterated (even if in parallel
4773  // by several threads) in monotonically increasing order per each
4774  // thread. This is true of the implementation below which picks
4775  // card ranges (chunks) in monotonically increasing order globally
4776  // and, a-fortiori, in monotonically increasing order per thread
4777  // (the latter order being a subsequence of the former).
4778  // If the work code below is ever reorganized into a more chaotic
4779  // work-partitioning form than the current "sequential tasks"
4780  // paradigm, the use of that persistent state will have to be
4781  // revisited and modified appropriately. See also related
4782  // bug 4756801 work on which should examine this code to make
4783  // sure that the changes there do not run counter to the
4784  // assumptions made here and necessary for correctness and
4785  // efficiency. Note also that this code might yield inefficient
4786  // behavior in the case of very large objects that span one or
4787  // more work chunks. Such objects would potentially be scanned
4788  // several times redundantly. Work on 4756801 should try and
4789  // address that performance anomaly if at all possible. XXX
4790  MemRegion  full_span  = _collector->_span;
4791  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4792  MarkFromDirtyCardsClosure
4793    greyRescanClosure(_collector, full_span, // entire span of interest
4794                      sp, bm, work_q, cl);
4795
4796  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4797  assert(pst->valid(), "Uninitialized use?");
4798  uint nth_task = 0;
4799  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4800  MemRegion span = sp->used_region();
4801  HeapWord* start_addr = span.start();
4802  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4803                                           alignment);
4804  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4805  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4806         start_addr, "Check alignment");
4807  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4808         chunk_size, "Check alignment");
4809
4810  while (!pst->is_task_claimed(/* reference */ nth_task)) {
4811    // Having claimed the nth_task, compute corresponding mem-region,
4812    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4813    // The alignment restriction ensures that we do not need any
4814    // synchronization with other gang-workers while setting or
4815    // clearing bits in thus chunk of the MUT.
4816    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4817                                    start_addr + (nth_task+1)*chunk_size);
4818    // The last chunk's end might be way beyond end of the
4819    // used region. In that case pull back appropriately.
4820    if (this_span.end() > end_addr) {
4821      this_span.set_end(end_addr);
4822      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4823    }
4824    // Iterate over the dirty cards covering this chunk, marking them
4825    // precleaned, and setting the corresponding bits in the mod union
4826    // table. Since we have been careful to partition at Card and MUT-word
4827    // boundaries no synchronization is needed between parallel threads.
4828    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4829                                                 &modUnionClosure);
4830
4831    // Having transferred these marks into the modUnionTable,
4832    // rescan the marked objects on the dirty cards in the modUnionTable.
4833    // Even if this is at a synchronous collection, the initial marking
4834    // may have been done during an asynchronous collection so there
4835    // may be dirty bits in the mod-union table.
4836    _collector->_modUnionTable.dirty_range_iterate_clear(
4837                  this_span, &greyRescanClosure);
4838    _collector->_modUnionTable.verifyNoOneBitsInRange(
4839                                 this_span.start(),
4840                                 this_span.end());
4841  }
4842  pst->all_tasks_completed();  // declare that i am done
4843}
4844
4845// . see if we can share work_queues with ParNew? XXX
4846void
4847CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
4848                                int* seed) {
4849  OopTaskQueue* work_q = work_queue(i);
4850  NOT_PRODUCT(int num_steals = 0;)
4851  oop obj_to_scan;
4852  CMSBitMap* bm = &(_collector->_markBitMap);
4853
4854  while (true) {
4855    // Completely finish any left over work from (an) earlier round(s)
4856    cl->trim_queue(0);
4857    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4858                                         (size_t)ParGCDesiredObjsFromOverflowList);
4859    // Now check if there's any work in the overflow list
4860    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4861    // only affects the number of attempts made to get work from the
4862    // overflow list and does not affect the number of workers.  Just
4863    // pass ParallelGCThreads so this behavior is unchanged.
4864    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4865                                                work_q,
4866                                                ParallelGCThreads)) {
4867      // found something in global overflow list;
4868      // not yet ready to go stealing work from others.
4869      // We'd like to assert(work_q->size() != 0, ...)
4870      // because we just took work from the overflow list,
4871      // but of course we can't since all of that could have
4872      // been already stolen from us.
4873      // "He giveth and He taketh away."
4874      continue;
4875    }
4876    // Verify that we have no work before we resort to stealing
4877    assert(work_q->size() == 0, "Have work, shouldn't steal");
4878    // Try to steal from other queues that have work
4879    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4880      NOT_PRODUCT(num_steals++;)
4881      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4882      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4883      // Do scanning work
4884      obj_to_scan->oop_iterate(cl);
4885      // Loop around, finish this work, and try to steal some more
4886    } else if (terminator()->offer_termination()) {
4887        break;  // nirvana from the infinite cycle
4888    }
4889  }
4890  NOT_PRODUCT(
4891    if (PrintCMSStatistics != 0) {
4892      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4893    }
4894  )
4895  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4896         "Else our work is not yet done");
4897}
4898
4899// Record object boundaries in _eden_chunk_array by sampling the eden
4900// top in the slow-path eden object allocation code path and record
4901// the boundaries, if CMSEdenChunksRecordAlways is true. If
4902// CMSEdenChunksRecordAlways is false, we use the other asynchronous
4903// sampling in sample_eden() that activates during the part of the
4904// preclean phase.
4905void CMSCollector::sample_eden_chunk() {
4906  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4907    if (_eden_chunk_lock->try_lock()) {
4908      // Record a sample. This is the critical section. The contents
4909      // of the _eden_chunk_array have to be non-decreasing in the
4910      // address order.
4911      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4912      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4913             "Unexpected state of Eden");
4914      if (_eden_chunk_index == 0 ||
4915          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4916           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4917                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4918        _eden_chunk_index++;  // commit sample
4919      }
4920      _eden_chunk_lock->unlock();
4921    }
4922  }
4923}
4924
4925// Return a thread-local PLAB recording array, as appropriate.
4926void* CMSCollector::get_data_recorder(int thr_num) {
4927  if (_survivor_plab_array != NULL &&
4928      (CMSPLABRecordAlways ||
4929       (_collectorState > Marking && _collectorState < FinalMarking))) {
4930    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4931    ChunkArray* ca = &_survivor_plab_array[thr_num];
4932    ca->reset();   // clear it so that fresh data is recorded
4933    return (void*) ca;
4934  } else {
4935    return NULL;
4936  }
4937}
4938
4939// Reset all the thread-local PLAB recording arrays
4940void CMSCollector::reset_survivor_plab_arrays() {
4941  for (uint i = 0; i < ParallelGCThreads; i++) {
4942    _survivor_plab_array[i].reset();
4943  }
4944}
4945
4946// Merge the per-thread plab arrays into the global survivor chunk
4947// array which will provide the partitioning of the survivor space
4948// for CMS initial scan and rescan.
4949void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4950                                              int no_of_gc_threads) {
4951  assert(_survivor_plab_array  != NULL, "Error");
4952  assert(_survivor_chunk_array != NULL, "Error");
4953  assert(_collectorState == FinalMarking ||
4954         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4955  for (int j = 0; j < no_of_gc_threads; j++) {
4956    _cursor[j] = 0;
4957  }
4958  HeapWord* top = surv->top();
4959  size_t i;
4960  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4961    HeapWord* min_val = top;          // Higher than any PLAB address
4962    uint      min_tid = 0;            // position of min_val this round
4963    for (int j = 0; j < no_of_gc_threads; j++) {
4964      ChunkArray* cur_sca = &_survivor_plab_array[j];
4965      if (_cursor[j] == cur_sca->end()) {
4966        continue;
4967      }
4968      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4969      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4970      assert(surv->used_region().contains(cur_val), "Out of bounds value");
4971      if (cur_val < min_val) {
4972        min_tid = j;
4973        min_val = cur_val;
4974      } else {
4975        assert(cur_val < top, "All recorded addresses should be less");
4976      }
4977    }
4978    // At this point min_val and min_tid are respectively
4979    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4980    // and the thread (j) that witnesses that address.
4981    // We record this address in the _survivor_chunk_array[i]
4982    // and increment _cursor[min_tid] prior to the next round i.
4983    if (min_val == top) {
4984      break;
4985    }
4986    _survivor_chunk_array[i] = min_val;
4987    _cursor[min_tid]++;
4988  }
4989  // We are all done; record the size of the _survivor_chunk_array
4990  _survivor_chunk_index = i; // exclusive: [0, i)
4991  if (PrintCMSStatistics > 0) {
4992    gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4993  }
4994  // Verify that we used up all the recorded entries
4995  #ifdef ASSERT
4996    size_t total = 0;
4997    for (int j = 0; j < no_of_gc_threads; j++) {
4998      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4999      total += _cursor[j];
5000    }
5001    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5002    // Check that the merged array is in sorted order
5003    if (total > 0) {
5004      for (size_t i = 0; i < total - 1; i++) {
5005        if (PrintCMSStatistics > 0) {
5006          gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5007                              i, p2i(_survivor_chunk_array[i]));
5008        }
5009        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5010               "Not sorted");
5011      }
5012    }
5013  #endif // ASSERT
5014}
5015
5016// Set up the space's par_seq_tasks structure for work claiming
5017// for parallel initial scan and rescan of young gen.
5018// See ParRescanTask where this is currently used.
5019void
5020CMSCollector::
5021initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5022  assert(n_threads > 0, "Unexpected n_threads argument");
5023
5024  // Eden space
5025  if (!_young_gen->eden()->is_empty()) {
5026    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5027    assert(!pst->valid(), "Clobbering existing data?");
5028    // Each valid entry in [0, _eden_chunk_index) represents a task.
5029    size_t n_tasks = _eden_chunk_index + 1;
5030    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5031    // Sets the condition for completion of the subtask (how many threads
5032    // need to finish in order to be done).
5033    pst->set_n_threads(n_threads);
5034    pst->set_n_tasks((int)n_tasks);
5035  }
5036
5037  // Merge the survivor plab arrays into _survivor_chunk_array
5038  if (_survivor_plab_array != NULL) {
5039    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
5040  } else {
5041    assert(_survivor_chunk_index == 0, "Error");
5042  }
5043
5044  // To space
5045  {
5046    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
5047    assert(!pst->valid(), "Clobbering existing data?");
5048    // Sets the condition for completion of the subtask (how many threads
5049    // need to finish in order to be done).
5050    pst->set_n_threads(n_threads);
5051    pst->set_n_tasks(1);
5052    assert(pst->valid(), "Error");
5053  }
5054
5055  // From space
5056  {
5057    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5058    assert(!pst->valid(), "Clobbering existing data?");
5059    size_t n_tasks = _survivor_chunk_index + 1;
5060    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5061    // Sets the condition for completion of the subtask (how many threads
5062    // need to finish in order to be done).
5063    pst->set_n_threads(n_threads);
5064    pst->set_n_tasks((int)n_tasks);
5065    assert(pst->valid(), "Error");
5066  }
5067}
5068
5069// Parallel version of remark
5070void CMSCollector::do_remark_parallel() {
5071  GenCollectedHeap* gch = GenCollectedHeap::heap();
5072  FlexibleWorkGang* workers = gch->workers();
5073  assert(workers != NULL, "Need parallel worker threads.");
5074  // Choose to use the number of GC workers most recently set
5075  // into "active_workers".
5076  uint n_workers = workers->active_workers();
5077
5078  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5079
5080  StrongRootsScope srs(n_workers);
5081
5082  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5083
5084  // We won't be iterating over the cards in the card table updating
5085  // the younger_gen cards, so we shouldn't call the following else
5086  // the verification code as well as subsequent younger_refs_iterate
5087  // code would get confused. XXX
5088  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5089
5090  // The young gen rescan work will not be done as part of
5091  // process_roots (which currently doesn't know how to
5092  // parallelize such a scan), but rather will be broken up into
5093  // a set of parallel tasks (via the sampling that the [abortable]
5094  // preclean phase did of eden, plus the [two] tasks of
5095  // scanning the [two] survivor spaces. Further fine-grain
5096  // parallelization of the scanning of the survivor spaces
5097  // themselves, and of precleaning of the younger gen itself
5098  // is deferred to the future.
5099  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5100
5101  // The dirty card rescan work is broken up into a "sequence"
5102  // of parallel tasks (per constituent space) that are dynamically
5103  // claimed by the parallel threads.
5104  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5105
5106  // It turns out that even when we're using 1 thread, doing the work in a
5107  // separate thread causes wide variance in run times.  We can't help this
5108  // in the multi-threaded case, but we special-case n=1 here to get
5109  // repeatable measurements of the 1-thread overhead of the parallel code.
5110  if (n_workers > 1) {
5111    // Make refs discovery MT-safe, if it isn't already: it may not
5112    // necessarily be so, since it's possible that we are doing
5113    // ST marking.
5114    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5115    workers->run_task(&tsk);
5116  } else {
5117    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5118    tsk.work(0);
5119  }
5120
5121  // restore, single-threaded for now, any preserved marks
5122  // as a result of work_q overflow
5123  restore_preserved_marks_if_any();
5124}
5125
5126// Non-parallel version of remark
5127void CMSCollector::do_remark_non_parallel() {
5128  ResourceMark rm;
5129  HandleMark   hm;
5130  GenCollectedHeap* gch = GenCollectedHeap::heap();
5131  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5132
5133  MarkRefsIntoAndScanClosure
5134    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5135             &_markStack, this,
5136             false /* should_yield */, false /* not precleaning */);
5137  MarkFromDirtyCardsClosure
5138    markFromDirtyCardsClosure(this, _span,
5139                              NULL,  // space is set further below
5140                              &_markBitMap, &_markStack, &mrias_cl);
5141  {
5142    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5143    // Iterate over the dirty cards, setting the corresponding bits in the
5144    // mod union table.
5145    {
5146      ModUnionClosure modUnionClosure(&_modUnionTable);
5147      _ct->ct_bs()->dirty_card_iterate(
5148                      _cmsGen->used_region(),
5149                      &modUnionClosure);
5150    }
5151    // Having transferred these marks into the modUnionTable, we just need
5152    // to rescan the marked objects on the dirty cards in the modUnionTable.
5153    // The initial marking may have been done during an asynchronous
5154    // collection so there may be dirty bits in the mod-union table.
5155    const int alignment =
5156      CardTableModRefBS::card_size * BitsPerWord;
5157    {
5158      // ... First handle dirty cards in CMS gen
5159      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5160      MemRegion ur = _cmsGen->used_region();
5161      HeapWord* lb = ur.start();
5162      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5163      MemRegion cms_span(lb, ub);
5164      _modUnionTable.dirty_range_iterate_clear(cms_span,
5165                                               &markFromDirtyCardsClosure);
5166      verify_work_stacks_empty();
5167      if (PrintCMSStatistics != 0) {
5168        gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5169          markFromDirtyCardsClosure.num_dirty_cards());
5170      }
5171    }
5172  }
5173  if (VerifyDuringGC &&
5174      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5175    HandleMark hm;  // Discard invalid handles created during verification
5176    Universe::verify();
5177  }
5178  {
5179    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5180
5181    verify_work_stacks_empty();
5182
5183    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5184    StrongRootsScope srs(1);
5185
5186    gch->gen_process_roots(&srs,
5187                           _cmsGen->level(),
5188                           true,  // younger gens as roots
5189                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
5190                           should_unload_classes(),
5191                           &mrias_cl,
5192                           NULL,
5193                           NULL); // The dirty klasses will be handled below
5194
5195    assert(should_unload_classes()
5196           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5197           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5198  }
5199
5200  {
5201    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5202
5203    verify_work_stacks_empty();
5204
5205    // Scan all class loader data objects that might have been introduced
5206    // during concurrent marking.
5207    ResourceMark rm;
5208    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5209    for (int i = 0; i < array->length(); i++) {
5210      mrias_cl.do_class_loader_data(array->at(i));
5211    }
5212
5213    // We don't need to keep track of new CLDs anymore.
5214    ClassLoaderDataGraph::remember_new_clds(false);
5215
5216    verify_work_stacks_empty();
5217  }
5218
5219  {
5220    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5221
5222    verify_work_stacks_empty();
5223
5224    RemarkKlassClosure remark_klass_closure(&mrias_cl);
5225    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5226
5227    verify_work_stacks_empty();
5228  }
5229
5230  // We might have added oops to ClassLoaderData::_handles during the
5231  // concurrent marking phase. These oops point to newly allocated objects
5232  // that are guaranteed to be kept alive. Either by the direct allocation
5233  // code, or when the young collector processes the roots. Hence,
5234  // we don't have to revisit the _handles block during the remark phase.
5235
5236  verify_work_stacks_empty();
5237  // Restore evacuated mark words, if any, used for overflow list links
5238  if (!CMSOverflowEarlyRestoration) {
5239    restore_preserved_marks_if_any();
5240  }
5241  verify_overflow_empty();
5242}
5243
5244////////////////////////////////////////////////////////
5245// Parallel Reference Processing Task Proxy Class
5246////////////////////////////////////////////////////////
5247class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5248  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5249  CMSCollector*          _collector;
5250  CMSBitMap*             _mark_bit_map;
5251  const MemRegion        _span;
5252  ProcessTask&           _task;
5253
5254public:
5255  CMSRefProcTaskProxy(ProcessTask&     task,
5256                      CMSCollector*    collector,
5257                      const MemRegion& span,
5258                      CMSBitMap*       mark_bit_map,
5259                      AbstractWorkGang* workers,
5260                      OopTaskQueueSet* task_queues):
5261    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5262      task_queues,
5263      workers->active_workers()),
5264    _task(task),
5265    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5266  {
5267    assert(_collector->_span.equals(_span) && !_span.is_empty(),
5268           "Inconsistency in _span");
5269  }
5270
5271  OopTaskQueueSet* task_queues() { return queues(); }
5272
5273  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5274
5275  void do_work_steal(int i,
5276                     CMSParDrainMarkingStackClosure* drain,
5277                     CMSParKeepAliveClosure* keep_alive,
5278                     int* seed);
5279
5280  virtual void work(uint worker_id);
5281};
5282
5283void CMSRefProcTaskProxy::work(uint worker_id) {
5284  ResourceMark rm;
5285  HandleMark hm;
5286  assert(_collector->_span.equals(_span), "Inconsistency in _span");
5287  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5288                                        _mark_bit_map,
5289                                        work_queue(worker_id));
5290  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5291                                                 _mark_bit_map,
5292                                                 work_queue(worker_id));
5293  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5294  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5295  if (_task.marks_oops_alive()) {
5296    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5297                  _collector->hash_seed(worker_id));
5298  }
5299  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5300  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5301}
5302
5303class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5304  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5305  EnqueueTask& _task;
5306
5307public:
5308  CMSRefEnqueueTaskProxy(EnqueueTask& task)
5309    : AbstractGangTask("Enqueue reference objects in parallel"),
5310      _task(task)
5311  { }
5312
5313  virtual void work(uint worker_id)
5314  {
5315    _task.work(worker_id);
5316  }
5317};
5318
5319CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5320  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5321   _span(span),
5322   _bit_map(bit_map),
5323   _work_queue(work_queue),
5324   _mark_and_push(collector, span, bit_map, work_queue),
5325   _low_water_mark(MIN2((work_queue->max_elems()/4),
5326                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5327{ }
5328
5329// . see if we can share work_queues with ParNew? XXX
5330void CMSRefProcTaskProxy::do_work_steal(int i,
5331  CMSParDrainMarkingStackClosure* drain,
5332  CMSParKeepAliveClosure* keep_alive,
5333  int* seed) {
5334  OopTaskQueue* work_q = work_queue(i);
5335  NOT_PRODUCT(int num_steals = 0;)
5336  oop obj_to_scan;
5337
5338  while (true) {
5339    // Completely finish any left over work from (an) earlier round(s)
5340    drain->trim_queue(0);
5341    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5342                                         (size_t)ParGCDesiredObjsFromOverflowList);
5343    // Now check if there's any work in the overflow list
5344    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5345    // only affects the number of attempts made to get work from the
5346    // overflow list and does not affect the number of workers.  Just
5347    // pass ParallelGCThreads so this behavior is unchanged.
5348    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5349                                                work_q,
5350                                                ParallelGCThreads)) {
5351      // Found something in global overflow list;
5352      // not yet ready to go stealing work from others.
5353      // We'd like to assert(work_q->size() != 0, ...)
5354      // because we just took work from the overflow list,
5355      // but of course we can't, since all of that might have
5356      // been already stolen from us.
5357      continue;
5358    }
5359    // Verify that we have no work before we resort to stealing
5360    assert(work_q->size() == 0, "Have work, shouldn't steal");
5361    // Try to steal from other queues that have work
5362    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5363      NOT_PRODUCT(num_steals++;)
5364      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5365      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5366      // Do scanning work
5367      obj_to_scan->oop_iterate(keep_alive);
5368      // Loop around, finish this work, and try to steal some more
5369    } else if (terminator()->offer_termination()) {
5370      break;  // nirvana from the infinite cycle
5371    }
5372  }
5373  NOT_PRODUCT(
5374    if (PrintCMSStatistics != 0) {
5375      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5376    }
5377  )
5378}
5379
5380void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5381{
5382  GenCollectedHeap* gch = GenCollectedHeap::heap();
5383  FlexibleWorkGang* workers = gch->workers();
5384  assert(workers != NULL, "Need parallel worker threads.");
5385  CMSRefProcTaskProxy rp_task(task, &_collector,
5386                              _collector.ref_processor()->span(),
5387                              _collector.markBitMap(),
5388                              workers, _collector.task_queues());
5389  workers->run_task(&rp_task);
5390}
5391
5392void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5393{
5394
5395  GenCollectedHeap* gch = GenCollectedHeap::heap();
5396  FlexibleWorkGang* workers = gch->workers();
5397  assert(workers != NULL, "Need parallel worker threads.");
5398  CMSRefEnqueueTaskProxy enq_task(task);
5399  workers->run_task(&enq_task);
5400}
5401
5402void CMSCollector::refProcessingWork() {
5403  ResourceMark rm;
5404  HandleMark   hm;
5405
5406  ReferenceProcessor* rp = ref_processor();
5407  assert(rp->span().equals(_span), "Spans should be equal");
5408  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5409  // Process weak references.
5410  rp->setup_policy(false);
5411  verify_work_stacks_empty();
5412
5413  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5414                                          &_markStack, false /* !preclean */);
5415  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5416                                _span, &_markBitMap, &_markStack,
5417                                &cmsKeepAliveClosure, false /* !preclean */);
5418  {
5419    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5420
5421    ReferenceProcessorStats stats;
5422    if (rp->processing_is_mt()) {
5423      // Set the degree of MT here.  If the discovery is done MT, there
5424      // may have been a different number of threads doing the discovery
5425      // and a different number of discovered lists may have Ref objects.
5426      // That is OK as long as the Reference lists are balanced (see
5427      // balance_all_queues() and balance_queues()).
5428      GenCollectedHeap* gch = GenCollectedHeap::heap();
5429      uint active_workers = ParallelGCThreads;
5430      FlexibleWorkGang* workers = gch->workers();
5431      if (workers != NULL) {
5432        active_workers = workers->active_workers();
5433        // The expectation is that active_workers will have already
5434        // been set to a reasonable value.  If it has not been set,
5435        // investigate.
5436        assert(active_workers > 0, "Should have been set during scavenge");
5437      }
5438      rp->set_active_mt_degree(active_workers);
5439      CMSRefProcTaskExecutor task_executor(*this);
5440      stats = rp->process_discovered_references(&_is_alive_closure,
5441                                        &cmsKeepAliveClosure,
5442                                        &cmsDrainMarkingStackClosure,
5443                                        &task_executor,
5444                                        _gc_timer_cm,
5445                                        _gc_tracer_cm->gc_id());
5446    } else {
5447      stats = rp->process_discovered_references(&_is_alive_closure,
5448                                        &cmsKeepAliveClosure,
5449                                        &cmsDrainMarkingStackClosure,
5450                                        NULL,
5451                                        _gc_timer_cm,
5452                                        _gc_tracer_cm->gc_id());
5453    }
5454    _gc_tracer_cm->report_gc_reference_stats(stats);
5455
5456  }
5457
5458  // This is the point where the entire marking should have completed.
5459  verify_work_stacks_empty();
5460
5461  if (should_unload_classes()) {
5462    {
5463      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5464
5465      // Unload classes and purge the SystemDictionary.
5466      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5467
5468      // Unload nmethods.
5469      CodeCache::do_unloading(&_is_alive_closure, purged_class);
5470
5471      // Prune dead klasses from subklass/sibling/implementor lists.
5472      Klass::clean_weak_klass_links(&_is_alive_closure);
5473    }
5474
5475    {
5476      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5477      // Clean up unreferenced symbols in symbol table.
5478      SymbolTable::unlink();
5479    }
5480
5481    {
5482      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5483      // Delete entries for dead interned strings.
5484      StringTable::unlink(&_is_alive_closure);
5485    }
5486  }
5487
5488
5489  // Restore any preserved marks as a result of mark stack or
5490  // work queue overflow
5491  restore_preserved_marks_if_any();  // done single-threaded for now
5492
5493  rp->set_enqueuing_is_done(true);
5494  if (rp->processing_is_mt()) {
5495    rp->balance_all_queues();
5496    CMSRefProcTaskExecutor task_executor(*this);
5497    rp->enqueue_discovered_references(&task_executor);
5498  } else {
5499    rp->enqueue_discovered_references(NULL);
5500  }
5501  rp->verify_no_references_recorded();
5502  assert(!rp->discovery_enabled(), "should have been disabled");
5503}
5504
5505#ifndef PRODUCT
5506void CMSCollector::check_correct_thread_executing() {
5507  Thread* t = Thread::current();
5508  // Only the VM thread or the CMS thread should be here.
5509  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5510         "Unexpected thread type");
5511  // If this is the vm thread, the foreground process
5512  // should not be waiting.  Note that _foregroundGCIsActive is
5513  // true while the foreground collector is waiting.
5514  if (_foregroundGCShouldWait) {
5515    // We cannot be the VM thread
5516    assert(t->is_ConcurrentGC_thread(),
5517           "Should be CMS thread");
5518  } else {
5519    // We can be the CMS thread only if we are in a stop-world
5520    // phase of CMS collection.
5521    if (t->is_ConcurrentGC_thread()) {
5522      assert(_collectorState == InitialMarking ||
5523             _collectorState == FinalMarking,
5524             "Should be a stop-world phase");
5525      // The CMS thread should be holding the CMS_token.
5526      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5527             "Potential interference with concurrently "
5528             "executing VM thread");
5529    }
5530  }
5531}
5532#endif
5533
5534void CMSCollector::sweep() {
5535  assert(_collectorState == Sweeping, "just checking");
5536  check_correct_thread_executing();
5537  verify_work_stacks_empty();
5538  verify_overflow_empty();
5539  increment_sweep_count();
5540  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5541
5542  _inter_sweep_timer.stop();
5543  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5544
5545  assert(!_intra_sweep_timer.is_active(), "Should not be active");
5546  _intra_sweep_timer.reset();
5547  _intra_sweep_timer.start();
5548  {
5549    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5550    CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5551    // First sweep the old gen
5552    {
5553      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5554                               bitMapLock());
5555      sweepWork(_cmsGen);
5556    }
5557
5558    // Update Universe::_heap_*_at_gc figures.
5559    // We need all the free list locks to make the abstract state
5560    // transition from Sweeping to Resetting. See detailed note
5561    // further below.
5562    {
5563      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5564      // Update heap occupancy information which is used as
5565      // input to soft ref clearing policy at the next gc.
5566      Universe::update_heap_info_at_gc();
5567      _collectorState = Resizing;
5568    }
5569  }
5570  verify_work_stacks_empty();
5571  verify_overflow_empty();
5572
5573  if (should_unload_classes()) {
5574    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5575    // requires that the virtual spaces are stable and not deleted.
5576    ClassLoaderDataGraph::set_should_purge(true);
5577  }
5578
5579  _intra_sweep_timer.stop();
5580  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5581
5582  _inter_sweep_timer.reset();
5583  _inter_sweep_timer.start();
5584
5585  // We need to use a monotonically non-decreasing time in ms
5586  // or we will see time-warp warnings and os::javaTimeMillis()
5587  // does not guarantee monotonicity.
5588  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5589  update_time_of_last_gc(now);
5590
5591  // NOTE on abstract state transitions:
5592  // Mutators allocate-live and/or mark the mod-union table dirty
5593  // based on the state of the collection.  The former is done in
5594  // the interval [Marking, Sweeping] and the latter in the interval
5595  // [Marking, Sweeping).  Thus the transitions into the Marking state
5596  // and out of the Sweeping state must be synchronously visible
5597  // globally to the mutators.
5598  // The transition into the Marking state happens with the world
5599  // stopped so the mutators will globally see it.  Sweeping is
5600  // done asynchronously by the background collector so the transition
5601  // from the Sweeping state to the Resizing state must be done
5602  // under the freelistLock (as is the check for whether to
5603  // allocate-live and whether to dirty the mod-union table).
5604  assert(_collectorState == Resizing, "Change of collector state to"
5605    " Resizing must be done under the freelistLocks (plural)");
5606
5607  // Now that sweeping has been completed, we clear
5608  // the incremental_collection_failed flag,
5609  // thus inviting a younger gen collection to promote into
5610  // this generation. If such a promotion may still fail,
5611  // the flag will be set again when a young collection is
5612  // attempted.
5613  GenCollectedHeap* gch = GenCollectedHeap::heap();
5614  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5615  gch->update_full_collections_completed(_collection_count_start);
5616}
5617
5618// FIX ME!!! Looks like this belongs in CFLSpace, with
5619// CMSGen merely delegating to it.
5620void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5621  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5622  HeapWord*  minAddr        = _cmsSpace->bottom();
5623  HeapWord*  largestAddr    =
5624    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5625  if (largestAddr == NULL) {
5626    // The dictionary appears to be empty.  In this case
5627    // try to coalesce at the end of the heap.
5628    largestAddr = _cmsSpace->end();
5629  }
5630  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5631  size_t nearLargestOffset =
5632    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5633  if (PrintFLSStatistics != 0) {
5634    gclog_or_tty->print_cr(
5635      "CMS: Large Block: " PTR_FORMAT ";"
5636      " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5637      p2i(largestAddr),
5638      p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5639  }
5640  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5641}
5642
5643bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5644  return addr >= _cmsSpace->nearLargestChunk();
5645}
5646
5647FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5648  return _cmsSpace->find_chunk_at_end();
5649}
5650
5651void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5652                                                    bool full) {
5653  // The next lower level has been collected.  Gather any statistics
5654  // that are of interest at this point.
5655  if (!full && (current_level + 1) == level()) {
5656    // Gather statistics on the young generation collection.
5657    collector()->stats().record_gc0_end(used());
5658  }
5659}
5660
5661void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
5662  // We iterate over the space(s) underlying this generation,
5663  // checking the mark bit map to see if the bits corresponding
5664  // to specific blocks are marked or not. Blocks that are
5665  // marked are live and are not swept up. All remaining blocks
5666  // are swept up, with coalescing on-the-fly as we sweep up
5667  // contiguous free and/or garbage blocks:
5668  // We need to ensure that the sweeper synchronizes with allocators
5669  // and stop-the-world collectors. In particular, the following
5670  // locks are used:
5671  // . CMS token: if this is held, a stop the world collection cannot occur
5672  // . freelistLock: if this is held no allocation can occur from this
5673  //                 generation by another thread
5674  // . bitMapLock: if this is held, no other thread can access or update
5675  //
5676
5677  // Note that we need to hold the freelistLock if we use
5678  // block iterate below; else the iterator might go awry if
5679  // a mutator (or promotion) causes block contents to change
5680  // (for instance if the allocator divvies up a block).
5681  // If we hold the free list lock, for all practical purposes
5682  // young generation GC's can't occur (they'll usually need to
5683  // promote), so we might as well prevent all young generation
5684  // GC's while we do a sweeping step. For the same reason, we might
5685  // as well take the bit map lock for the entire duration
5686
5687  // check that we hold the requisite locks
5688  assert(have_cms_token(), "Should hold cms token");
5689  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5690  assert_lock_strong(gen->freelistLock());
5691  assert_lock_strong(bitMapLock());
5692
5693  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5694  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5695  gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5696                                      _inter_sweep_estimate.padded_average(),
5697                                      _intra_sweep_estimate.padded_average());
5698  gen->setNearLargestChunk();
5699
5700  {
5701    SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
5702    gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5703    // We need to free-up/coalesce garbage/blocks from a
5704    // co-terminal free run. This is done in the SweepClosure
5705    // destructor; so, do not remove this scope, else the
5706    // end-of-sweep-census below will be off by a little bit.
5707  }
5708  gen->cmsSpace()->sweep_completed();
5709  gen->cmsSpace()->endSweepFLCensus(sweep_count());
5710  if (should_unload_classes()) {                // unloaded classes this cycle,
5711    _concurrent_cycles_since_last_unload = 0;   // ... reset count
5712  } else {                                      // did not unload classes,
5713    _concurrent_cycles_since_last_unload++;     // ... increment count
5714  }
5715}
5716
5717// Reset CMS data structures (for now just the marking bit map)
5718// preparatory for the next cycle.
5719void CMSCollector::reset(bool concurrent) {
5720  if (concurrent) {
5721    CMSTokenSyncWithLocks ts(true, bitMapLock());
5722
5723    // If the state is not "Resetting", the foreground  thread
5724    // has done a collection and the resetting.
5725    if (_collectorState != Resetting) {
5726      assert(_collectorState == Idling, "The state should only change"
5727        " because the foreground collector has finished the collection");
5728      return;
5729    }
5730
5731    // Clear the mark bitmap (no grey objects to start with)
5732    // for the next cycle.
5733    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5734    CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5735
5736    HeapWord* curAddr = _markBitMap.startWord();
5737    while (curAddr < _markBitMap.endWord()) {
5738      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5739      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5740      _markBitMap.clear_large_range(chunk);
5741      if (ConcurrentMarkSweepThread::should_yield() &&
5742          !foregroundGCIsActive() &&
5743          CMSYield) {
5744        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5745               "CMS thread should hold CMS token");
5746        assert_lock_strong(bitMapLock());
5747        bitMapLock()->unlock();
5748        ConcurrentMarkSweepThread::desynchronize(true);
5749        stopTimer();
5750        if (PrintCMSStatistics != 0) {
5751          incrementYields();
5752        }
5753
5754        // See the comment in coordinator_yield()
5755        for (unsigned i = 0; i < CMSYieldSleepCount &&
5756                         ConcurrentMarkSweepThread::should_yield() &&
5757                         !CMSCollector::foregroundGCIsActive(); ++i) {
5758          os::sleep(Thread::current(), 1, false);
5759        }
5760
5761        ConcurrentMarkSweepThread::synchronize(true);
5762        bitMapLock()->lock_without_safepoint_check();
5763        startTimer();
5764      }
5765      curAddr = chunk.end();
5766    }
5767    // A successful mostly concurrent collection has been done.
5768    // Because only the full (i.e., concurrent mode failure) collections
5769    // are being measured for gc overhead limits, clean the "near" flag
5770    // and count.
5771    size_policy()->reset_gc_overhead_limit_count();
5772    _collectorState = Idling;
5773  } else {
5774    // already have the lock
5775    assert(_collectorState == Resetting, "just checking");
5776    assert_lock_strong(bitMapLock());
5777    _markBitMap.clear_all();
5778    _collectorState = Idling;
5779  }
5780
5781  register_gc_end();
5782}
5783
5784void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5785  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5786  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5787  TraceCollectorStats tcs(counters());
5788
5789  switch (op) {
5790    case CMS_op_checkpointRootsInitial: {
5791      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5792      checkpointRootsInitial();
5793      if (PrintGC) {
5794        _cmsGen->printOccupancy("initial-mark");
5795      }
5796      break;
5797    }
5798    case CMS_op_checkpointRootsFinal: {
5799      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5800      checkpointRootsFinal();
5801      if (PrintGC) {
5802        _cmsGen->printOccupancy("remark");
5803      }
5804      break;
5805    }
5806    default:
5807      fatal("No such CMS_op");
5808  }
5809}
5810
5811#ifndef PRODUCT
5812size_t const CMSCollector::skip_header_HeapWords() {
5813  return FreeChunk::header_size();
5814}
5815
5816// Try and collect here conditions that should hold when
5817// CMS thread is exiting. The idea is that the foreground GC
5818// thread should not be blocked if it wants to terminate
5819// the CMS thread and yet continue to run the VM for a while
5820// after that.
5821void CMSCollector::verify_ok_to_terminate() const {
5822  assert(Thread::current()->is_ConcurrentGC_thread(),
5823         "should be called by CMS thread");
5824  assert(!_foregroundGCShouldWait, "should be false");
5825  // We could check here that all the various low-level locks
5826  // are not held by the CMS thread, but that is overkill; see
5827  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5828  // is checked.
5829}
5830#endif
5831
5832size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5833   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5834          "missing Printezis mark?");
5835  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5836  size_t size = pointer_delta(nextOneAddr + 1, addr);
5837  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5838         "alignment problem");
5839  assert(size >= 3, "Necessary for Printezis marks to work");
5840  return size;
5841}
5842
5843// A variant of the above (block_size_using_printezis_bits()) except
5844// that we return 0 if the P-bits are not yet set.
5845size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5846  if (_markBitMap.isMarked(addr + 1)) {
5847    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5848    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5849    size_t size = pointer_delta(nextOneAddr + 1, addr);
5850    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5851           "alignment problem");
5852    assert(size >= 3, "Necessary for Printezis marks to work");
5853    return size;
5854  }
5855  return 0;
5856}
5857
5858HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5859  size_t sz = 0;
5860  oop p = (oop)addr;
5861  if (p->klass_or_null() != NULL) {
5862    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5863  } else {
5864    sz = block_size_using_printezis_bits(addr);
5865  }
5866  assert(sz > 0, "size must be nonzero");
5867  HeapWord* next_block = addr + sz;
5868  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5869                                             CardTableModRefBS::card_size);
5870  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5871         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5872         "must be different cards");
5873  return next_card;
5874}
5875
5876
5877// CMS Bit Map Wrapper /////////////////////////////////////////
5878
5879// Construct a CMS bit map infrastructure, but don't create the
5880// bit vector itself. That is done by a separate call CMSBitMap::allocate()
5881// further below.
5882CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5883  _bm(),
5884  _shifter(shifter),
5885  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5886                                    Monitor::_safepoint_check_sometimes) : NULL)
5887{
5888  _bmStartWord = 0;
5889  _bmWordSize  = 0;
5890}
5891
5892bool CMSBitMap::allocate(MemRegion mr) {
5893  _bmStartWord = mr.start();
5894  _bmWordSize  = mr.word_size();
5895  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5896                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5897  if (!brs.is_reserved()) {
5898    warning("CMS bit map allocation failure");
5899    return false;
5900  }
5901  // For now we'll just commit all of the bit map up front.
5902  // Later on we'll try to be more parsimonious with swap.
5903  if (!_virtual_space.initialize(brs, brs.size())) {
5904    warning("CMS bit map backing store failure");
5905    return false;
5906  }
5907  assert(_virtual_space.committed_size() == brs.size(),
5908         "didn't reserve backing store for all of CMS bit map?");
5909  _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5910  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5911         _bmWordSize, "inconsistency in bit map sizing");
5912  _bm.set_size(_bmWordSize >> _shifter);
5913
5914  // bm.clear(); // can we rely on getting zero'd memory? verify below
5915  assert(isAllClear(),
5916         "Expected zero'd memory from ReservedSpace constructor");
5917  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5918         "consistency check");
5919  return true;
5920}
5921
5922void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5923  HeapWord *next_addr, *end_addr, *last_addr;
5924  assert_locked();
5925  assert(covers(mr), "out-of-range error");
5926  // XXX assert that start and end are appropriately aligned
5927  for (next_addr = mr.start(), end_addr = mr.end();
5928       next_addr < end_addr; next_addr = last_addr) {
5929    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5930    last_addr = dirty_region.end();
5931    if (!dirty_region.is_empty()) {
5932      cl->do_MemRegion(dirty_region);
5933    } else {
5934      assert(last_addr == end_addr, "program logic");
5935      return;
5936    }
5937  }
5938}
5939
5940void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5941  _bm.print_on_error(st, prefix);
5942}
5943
5944#ifndef PRODUCT
5945void CMSBitMap::assert_locked() const {
5946  CMSLockVerifier::assert_locked(lock());
5947}
5948
5949bool CMSBitMap::covers(MemRegion mr) const {
5950  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5951  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5952         "size inconsistency");
5953  return (mr.start() >= _bmStartWord) &&
5954         (mr.end()   <= endWord());
5955}
5956
5957bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5958    return (start >= _bmStartWord && (start + size) <= endWord());
5959}
5960
5961void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5962  // verify that there are no 1 bits in the interval [left, right)
5963  FalseBitMapClosure falseBitMapClosure;
5964  iterate(&falseBitMapClosure, left, right);
5965}
5966
5967void CMSBitMap::region_invariant(MemRegion mr)
5968{
5969  assert_locked();
5970  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5971  assert(!mr.is_empty(), "unexpected empty region");
5972  assert(covers(mr), "mr should be covered by bit map");
5973  // convert address range into offset range
5974  size_t start_ofs = heapWordToOffset(mr.start());
5975  // Make sure that end() is appropriately aligned
5976  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5977                        (1 << (_shifter+LogHeapWordSize))),
5978         "Misaligned mr.end()");
5979  size_t end_ofs   = heapWordToOffset(mr.end());
5980  assert(end_ofs > start_ofs, "Should mark at least one bit");
5981}
5982
5983#endif
5984
5985bool CMSMarkStack::allocate(size_t size) {
5986  // allocate a stack of the requisite depth
5987  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5988                   size * sizeof(oop)));
5989  if (!rs.is_reserved()) {
5990    warning("CMSMarkStack allocation failure");
5991    return false;
5992  }
5993  if (!_virtual_space.initialize(rs, rs.size())) {
5994    warning("CMSMarkStack backing store failure");
5995    return false;
5996  }
5997  assert(_virtual_space.committed_size() == rs.size(),
5998         "didn't reserve backing store for all of CMS stack?");
5999  _base = (oop*)(_virtual_space.low());
6000  _index = 0;
6001  _capacity = size;
6002  NOT_PRODUCT(_max_depth = 0);
6003  return true;
6004}
6005
6006// XXX FIX ME !!! In the MT case we come in here holding a
6007// leaf lock. For printing we need to take a further lock
6008// which has lower rank. We need to recalibrate the two
6009// lock-ranks involved in order to be able to print the
6010// messages below. (Or defer the printing to the caller.
6011// For now we take the expedient path of just disabling the
6012// messages for the problematic case.)
6013void CMSMarkStack::expand() {
6014  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6015  if (_capacity == MarkStackSizeMax) {
6016    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6017      // We print a warning message only once per CMS cycle.
6018      gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6019    }
6020    return;
6021  }
6022  // Double capacity if possible
6023  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6024  // Do not give up existing stack until we have managed to
6025  // get the double capacity that we desired.
6026  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6027                   new_capacity * sizeof(oop)));
6028  if (rs.is_reserved()) {
6029    // Release the backing store associated with old stack
6030    _virtual_space.release();
6031    // Reinitialize virtual space for new stack
6032    if (!_virtual_space.initialize(rs, rs.size())) {
6033      fatal("Not enough swap for expanded marking stack");
6034    }
6035    _base = (oop*)(_virtual_space.low());
6036    _index = 0;
6037    _capacity = new_capacity;
6038  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6039    // Failed to double capacity, continue;
6040    // we print a detail message only once per CMS cycle.
6041    gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6042            SIZE_FORMAT"K",
6043            _capacity / K, new_capacity / K);
6044  }
6045}
6046
6047
6048// Closures
6049// XXX: there seems to be a lot of code  duplication here;
6050// should refactor and consolidate common code.
6051
6052// This closure is used to mark refs into the CMS generation in
6053// the CMS bit map. Called at the first checkpoint. This closure
6054// assumes that we do not need to re-mark dirty cards; if the CMS
6055// generation on which this is used is not an oldest
6056// generation then this will lose younger_gen cards!
6057
6058MarkRefsIntoClosure::MarkRefsIntoClosure(
6059  MemRegion span, CMSBitMap* bitMap):
6060    _span(span),
6061    _bitMap(bitMap)
6062{
6063    assert(_ref_processor == NULL, "deliberately left NULL");
6064    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6065}
6066
6067void MarkRefsIntoClosure::do_oop(oop obj) {
6068  // if p points into _span, then mark corresponding bit in _markBitMap
6069  assert(obj->is_oop(), "expected an oop");
6070  HeapWord* addr = (HeapWord*)obj;
6071  if (_span.contains(addr)) {
6072    // this should be made more efficient
6073    _bitMap->mark(addr);
6074  }
6075}
6076
6077void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6078void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6079
6080Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6081  MemRegion span, CMSBitMap* bitMap):
6082    _span(span),
6083    _bitMap(bitMap)
6084{
6085    assert(_ref_processor == NULL, "deliberately left NULL");
6086    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6087}
6088
6089void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6090  // if p points into _span, then mark corresponding bit in _markBitMap
6091  assert(obj->is_oop(), "expected an oop");
6092  HeapWord* addr = (HeapWord*)obj;
6093  if (_span.contains(addr)) {
6094    // this should be made more efficient
6095    _bitMap->par_mark(addr);
6096  }
6097}
6098
6099void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6100void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6101
6102// A variant of the above, used for CMS marking verification.
6103MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6104  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6105    _span(span),
6106    _verification_bm(verification_bm),
6107    _cms_bm(cms_bm)
6108{
6109    assert(_ref_processor == NULL, "deliberately left NULL");
6110    assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6111}
6112
6113void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6114  // if p points into _span, then mark corresponding bit in _markBitMap
6115  assert(obj->is_oop(), "expected an oop");
6116  HeapWord* addr = (HeapWord*)obj;
6117  if (_span.contains(addr)) {
6118    _verification_bm->mark(addr);
6119    if (!_cms_bm->isMarked(addr)) {
6120      oop(addr)->print();
6121      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6122      fatal("... aborting");
6123    }
6124  }
6125}
6126
6127void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6128void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6129
6130//////////////////////////////////////////////////
6131// MarkRefsIntoAndScanClosure
6132//////////////////////////////////////////////////
6133
6134MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6135                                                       ReferenceProcessor* rp,
6136                                                       CMSBitMap* bit_map,
6137                                                       CMSBitMap* mod_union_table,
6138                                                       CMSMarkStack*  mark_stack,
6139                                                       CMSCollector* collector,
6140                                                       bool should_yield,
6141                                                       bool concurrent_precleaning):
6142  _collector(collector),
6143  _span(span),
6144  _bit_map(bit_map),
6145  _mark_stack(mark_stack),
6146  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6147                      mark_stack, concurrent_precleaning),
6148  _yield(should_yield),
6149  _concurrent_precleaning(concurrent_precleaning),
6150  _freelistLock(NULL)
6151{
6152  _ref_processor = rp;
6153  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6154}
6155
6156// This closure is used to mark refs into the CMS generation at the
6157// second (final) checkpoint, and to scan and transitively follow
6158// the unmarked oops. It is also used during the concurrent precleaning
6159// phase while scanning objects on dirty cards in the CMS generation.
6160// The marks are made in the marking bit map and the marking stack is
6161// used for keeping the (newly) grey objects during the scan.
6162// The parallel version (Par_...) appears further below.
6163void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6164  if (obj != NULL) {
6165    assert(obj->is_oop(), "expected an oop");
6166    HeapWord* addr = (HeapWord*)obj;
6167    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6168    assert(_collector->overflow_list_is_empty(),
6169           "overflow list should be empty");
6170    if (_span.contains(addr) &&
6171        !_bit_map->isMarked(addr)) {
6172      // mark bit map (object is now grey)
6173      _bit_map->mark(addr);
6174      // push on marking stack (stack should be empty), and drain the
6175      // stack by applying this closure to the oops in the oops popped
6176      // from the stack (i.e. blacken the grey objects)
6177      bool res = _mark_stack->push(obj);
6178      assert(res, "Should have space to push on empty stack");
6179      do {
6180        oop new_oop = _mark_stack->pop();
6181        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6182        assert(_bit_map->isMarked((HeapWord*)new_oop),
6183               "only grey objects on this stack");
6184        // iterate over the oops in this oop, marking and pushing
6185        // the ones in CMS heap (i.e. in _span).
6186        new_oop->oop_iterate(&_pushAndMarkClosure);
6187        // check if it's time to yield
6188        do_yield_check();
6189      } while (!_mark_stack->isEmpty() ||
6190               (!_concurrent_precleaning && take_from_overflow_list()));
6191        // if marking stack is empty, and we are not doing this
6192        // during precleaning, then check the overflow list
6193    }
6194    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6195    assert(_collector->overflow_list_is_empty(),
6196           "overflow list was drained above");
6197    // We could restore evacuated mark words, if any, used for
6198    // overflow list links here because the overflow list is
6199    // provably empty here. That would reduce the maximum
6200    // size requirements for preserved_{oop,mark}_stack.
6201    // But we'll just postpone it until we are all done
6202    // so we can just stream through.
6203    if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6204      _collector->restore_preserved_marks_if_any();
6205      assert(_collector->no_preserved_marks(), "No preserved marks");
6206    }
6207    assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6208           "All preserved marks should have been restored above");
6209  }
6210}
6211
6212void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6213void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6214
6215void MarkRefsIntoAndScanClosure::do_yield_work() {
6216  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6217         "CMS thread should hold CMS token");
6218  assert_lock_strong(_freelistLock);
6219  assert_lock_strong(_bit_map->lock());
6220  // relinquish the free_list_lock and bitMaplock()
6221  _bit_map->lock()->unlock();
6222  _freelistLock->unlock();
6223  ConcurrentMarkSweepThread::desynchronize(true);
6224  _collector->stopTimer();
6225  if (PrintCMSStatistics != 0) {
6226    _collector->incrementYields();
6227  }
6228
6229  // See the comment in coordinator_yield()
6230  for (unsigned i = 0;
6231       i < CMSYieldSleepCount &&
6232       ConcurrentMarkSweepThread::should_yield() &&
6233       !CMSCollector::foregroundGCIsActive();
6234       ++i) {
6235    os::sleep(Thread::current(), 1, false);
6236  }
6237
6238  ConcurrentMarkSweepThread::synchronize(true);
6239  _freelistLock->lock_without_safepoint_check();
6240  _bit_map->lock()->lock_without_safepoint_check();
6241  _collector->startTimer();
6242}
6243
6244///////////////////////////////////////////////////////////
6245// Par_MarkRefsIntoAndScanClosure: a parallel version of
6246//                                 MarkRefsIntoAndScanClosure
6247///////////////////////////////////////////////////////////
6248Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6249  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6250  CMSBitMap* bit_map, OopTaskQueue* work_queue):
6251  _span(span),
6252  _bit_map(bit_map),
6253  _work_queue(work_queue),
6254  _low_water_mark(MIN2((work_queue->max_elems()/4),
6255                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6256  _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6257{
6258  _ref_processor = rp;
6259  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6260}
6261
6262// This closure is used to mark refs into the CMS generation at the
6263// second (final) checkpoint, and to scan and transitively follow
6264// the unmarked oops. The marks are made in the marking bit map and
6265// the work_queue is used for keeping the (newly) grey objects during
6266// the scan phase whence they are also available for stealing by parallel
6267// threads. Since the marking bit map is shared, updates are
6268// synchronized (via CAS).
6269void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6270  if (obj != NULL) {
6271    // Ignore mark word because this could be an already marked oop
6272    // that may be chained at the end of the overflow list.
6273    assert(obj->is_oop(true), "expected an oop");
6274    HeapWord* addr = (HeapWord*)obj;
6275    if (_span.contains(addr) &&
6276        !_bit_map->isMarked(addr)) {
6277      // mark bit map (object will become grey):
6278      // It is possible for several threads to be
6279      // trying to "claim" this object concurrently;
6280      // the unique thread that succeeds in marking the
6281      // object first will do the subsequent push on
6282      // to the work queue (or overflow list).
6283      if (_bit_map->par_mark(addr)) {
6284        // push on work_queue (which may not be empty), and trim the
6285        // queue to an appropriate length by applying this closure to
6286        // the oops in the oops popped from the stack (i.e. blacken the
6287        // grey objects)
6288        bool res = _work_queue->push(obj);
6289        assert(res, "Low water mark should be less than capacity?");
6290        trim_queue(_low_water_mark);
6291      } // Else, another thread claimed the object
6292    }
6293  }
6294}
6295
6296void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6297void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6298
6299// This closure is used to rescan the marked objects on the dirty cards
6300// in the mod union table and the card table proper.
6301size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6302  oop p, MemRegion mr) {
6303
6304  size_t size = 0;
6305  HeapWord* addr = (HeapWord*)p;
6306  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6307  assert(_span.contains(addr), "we are scanning the CMS generation");
6308  // check if it's time to yield
6309  if (do_yield_check()) {
6310    // We yielded for some foreground stop-world work,
6311    // and we have been asked to abort this ongoing preclean cycle.
6312    return 0;
6313  }
6314  if (_bitMap->isMarked(addr)) {
6315    // it's marked; is it potentially uninitialized?
6316    if (p->klass_or_null() != NULL) {
6317        // an initialized object; ignore mark word in verification below
6318        // since we are running concurrent with mutators
6319        assert(p->is_oop(true), "should be an oop");
6320        if (p->is_objArray()) {
6321          // objArrays are precisely marked; restrict scanning
6322          // to dirty cards only.
6323          size = CompactibleFreeListSpace::adjustObjectSize(
6324                   p->oop_iterate(_scanningClosure, mr));
6325        } else {
6326          // A non-array may have been imprecisely marked; we need
6327          // to scan object in its entirety.
6328          size = CompactibleFreeListSpace::adjustObjectSize(
6329                   p->oop_iterate(_scanningClosure));
6330        }
6331        #ifdef ASSERT
6332          size_t direct_size =
6333            CompactibleFreeListSpace::adjustObjectSize(p->size());
6334          assert(size == direct_size, "Inconsistency in size");
6335          assert(size >= 3, "Necessary for Printezis marks to work");
6336          if (!_bitMap->isMarked(addr+1)) {
6337            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6338          } else {
6339            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6340            assert(_bitMap->isMarked(addr+size-1),
6341                   "inconsistent Printezis mark");
6342          }
6343        #endif // ASSERT
6344    } else {
6345      // An uninitialized object.
6346      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6347      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6348      size = pointer_delta(nextOneAddr + 1, addr);
6349      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6350             "alignment problem");
6351      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6352      // will dirty the card when the klass pointer is installed in the
6353      // object (signaling the completion of initialization).
6354    }
6355  } else {
6356    // Either a not yet marked object or an uninitialized object
6357    if (p->klass_or_null() == NULL) {
6358      // An uninitialized object, skip to the next card, since
6359      // we may not be able to read its P-bits yet.
6360      assert(size == 0, "Initial value");
6361    } else {
6362      // An object not (yet) reached by marking: we merely need to
6363      // compute its size so as to go look at the next block.
6364      assert(p->is_oop(true), "should be an oop");
6365      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6366    }
6367  }
6368  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6369  return size;
6370}
6371
6372void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6373  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6374         "CMS thread should hold CMS token");
6375  assert_lock_strong(_freelistLock);
6376  assert_lock_strong(_bitMap->lock());
6377  // relinquish the free_list_lock and bitMaplock()
6378  _bitMap->lock()->unlock();
6379  _freelistLock->unlock();
6380  ConcurrentMarkSweepThread::desynchronize(true);
6381  _collector->stopTimer();
6382  if (PrintCMSStatistics != 0) {
6383    _collector->incrementYields();
6384  }
6385
6386  // See the comment in coordinator_yield()
6387  for (unsigned i = 0; i < CMSYieldSleepCount &&
6388                   ConcurrentMarkSweepThread::should_yield() &&
6389                   !CMSCollector::foregroundGCIsActive(); ++i) {
6390    os::sleep(Thread::current(), 1, false);
6391  }
6392
6393  ConcurrentMarkSweepThread::synchronize(true);
6394  _freelistLock->lock_without_safepoint_check();
6395  _bitMap->lock()->lock_without_safepoint_check();
6396  _collector->startTimer();
6397}
6398
6399
6400//////////////////////////////////////////////////////////////////
6401// SurvivorSpacePrecleanClosure
6402//////////////////////////////////////////////////////////////////
6403// This (single-threaded) closure is used to preclean the oops in
6404// the survivor spaces.
6405size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6406
6407  HeapWord* addr = (HeapWord*)p;
6408  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6409  assert(!_span.contains(addr), "we are scanning the survivor spaces");
6410  assert(p->klass_or_null() != NULL, "object should be initialized");
6411  // an initialized object; ignore mark word in verification below
6412  // since we are running concurrent with mutators
6413  assert(p->is_oop(true), "should be an oop");
6414  // Note that we do not yield while we iterate over
6415  // the interior oops of p, pushing the relevant ones
6416  // on our marking stack.
6417  size_t size = p->oop_iterate(_scanning_closure);
6418  do_yield_check();
6419  // Observe that below, we do not abandon the preclean
6420  // phase as soon as we should; rather we empty the
6421  // marking stack before returning. This is to satisfy
6422  // some existing assertions. In general, it may be a
6423  // good idea to abort immediately and complete the marking
6424  // from the grey objects at a later time.
6425  while (!_mark_stack->isEmpty()) {
6426    oop new_oop = _mark_stack->pop();
6427    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6428    assert(_bit_map->isMarked((HeapWord*)new_oop),
6429           "only grey objects on this stack");
6430    // iterate over the oops in this oop, marking and pushing
6431    // the ones in CMS heap (i.e. in _span).
6432    new_oop->oop_iterate(_scanning_closure);
6433    // check if it's time to yield
6434    do_yield_check();
6435  }
6436  unsigned int after_count =
6437    GenCollectedHeap::heap()->total_collections();
6438  bool abort = (_before_count != after_count) ||
6439               _collector->should_abort_preclean();
6440  return abort ? 0 : size;
6441}
6442
6443void SurvivorSpacePrecleanClosure::do_yield_work() {
6444  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6445         "CMS thread should hold CMS token");
6446  assert_lock_strong(_bit_map->lock());
6447  // Relinquish the bit map lock
6448  _bit_map->lock()->unlock();
6449  ConcurrentMarkSweepThread::desynchronize(true);
6450  _collector->stopTimer();
6451  if (PrintCMSStatistics != 0) {
6452    _collector->incrementYields();
6453  }
6454
6455  // See the comment in coordinator_yield()
6456  for (unsigned i = 0; i < CMSYieldSleepCount &&
6457                       ConcurrentMarkSweepThread::should_yield() &&
6458                       !CMSCollector::foregroundGCIsActive(); ++i) {
6459    os::sleep(Thread::current(), 1, false);
6460  }
6461
6462  ConcurrentMarkSweepThread::synchronize(true);
6463  _bit_map->lock()->lock_without_safepoint_check();
6464  _collector->startTimer();
6465}
6466
6467// This closure is used to rescan the marked objects on the dirty cards
6468// in the mod union table and the card table proper. In the parallel
6469// case, although the bitMap is shared, we do a single read so the
6470// isMarked() query is "safe".
6471bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6472  // Ignore mark word because we are running concurrent with mutators
6473  assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6474  HeapWord* addr = (HeapWord*)p;
6475  assert(_span.contains(addr), "we are scanning the CMS generation");
6476  bool is_obj_array = false;
6477  #ifdef ASSERT
6478    if (!_parallel) {
6479      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6480      assert(_collector->overflow_list_is_empty(),
6481             "overflow list should be empty");
6482
6483    }
6484  #endif // ASSERT
6485  if (_bit_map->isMarked(addr)) {
6486    // Obj arrays are precisely marked, non-arrays are not;
6487    // so we scan objArrays precisely and non-arrays in their
6488    // entirety.
6489    if (p->is_objArray()) {
6490      is_obj_array = true;
6491      if (_parallel) {
6492        p->oop_iterate(_par_scan_closure, mr);
6493      } else {
6494        p->oop_iterate(_scan_closure, mr);
6495      }
6496    } else {
6497      if (_parallel) {
6498        p->oop_iterate(_par_scan_closure);
6499      } else {
6500        p->oop_iterate(_scan_closure);
6501      }
6502    }
6503  }
6504  #ifdef ASSERT
6505    if (!_parallel) {
6506      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6507      assert(_collector->overflow_list_is_empty(),
6508             "overflow list should be empty");
6509
6510    }
6511  #endif // ASSERT
6512  return is_obj_array;
6513}
6514
6515MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6516                        MemRegion span,
6517                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
6518                        bool should_yield, bool verifying):
6519  _collector(collector),
6520  _span(span),
6521  _bitMap(bitMap),
6522  _mut(&collector->_modUnionTable),
6523  _markStack(markStack),
6524  _yield(should_yield),
6525  _skipBits(0)
6526{
6527  assert(_markStack->isEmpty(), "stack should be empty");
6528  _finger = _bitMap->startWord();
6529  _threshold = _finger;
6530  assert(_collector->_restart_addr == NULL, "Sanity check");
6531  assert(_span.contains(_finger), "Out of bounds _finger?");
6532  DEBUG_ONLY(_verifying = verifying;)
6533}
6534
6535void MarkFromRootsClosure::reset(HeapWord* addr) {
6536  assert(_markStack->isEmpty(), "would cause duplicates on stack");
6537  assert(_span.contains(addr), "Out of bounds _finger?");
6538  _finger = addr;
6539  _threshold = (HeapWord*)round_to(
6540                 (intptr_t)_finger, CardTableModRefBS::card_size);
6541}
6542
6543// Should revisit to see if this should be restructured for
6544// greater efficiency.
6545bool MarkFromRootsClosure::do_bit(size_t offset) {
6546  if (_skipBits > 0) {
6547    _skipBits--;
6548    return true;
6549  }
6550  // convert offset into a HeapWord*
6551  HeapWord* addr = _bitMap->startWord() + offset;
6552  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6553         "address out of range");
6554  assert(_bitMap->isMarked(addr), "tautology");
6555  if (_bitMap->isMarked(addr+1)) {
6556    // this is an allocated but not yet initialized object
6557    assert(_skipBits == 0, "tautology");
6558    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6559    oop p = oop(addr);
6560    if (p->klass_or_null() == NULL) {
6561      DEBUG_ONLY(if (!_verifying) {)
6562        // We re-dirty the cards on which this object lies and increase
6563        // the _threshold so that we'll come back to scan this object
6564        // during the preclean or remark phase. (CMSCleanOnEnter)
6565        if (CMSCleanOnEnter) {
6566          size_t sz = _collector->block_size_using_printezis_bits(addr);
6567          HeapWord* end_card_addr   = (HeapWord*)round_to(
6568                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6569          MemRegion redirty_range = MemRegion(addr, end_card_addr);
6570          assert(!redirty_range.is_empty(), "Arithmetical tautology");
6571          // Bump _threshold to end_card_addr; note that
6572          // _threshold cannot possibly exceed end_card_addr, anyhow.
6573          // This prevents future clearing of the card as the scan proceeds
6574          // to the right.
6575          assert(_threshold <= end_card_addr,
6576                 "Because we are just scanning into this object");
6577          if (_threshold < end_card_addr) {
6578            _threshold = end_card_addr;
6579          }
6580          if (p->klass_or_null() != NULL) {
6581            // Redirty the range of cards...
6582            _mut->mark_range(redirty_range);
6583          } // ...else the setting of klass will dirty the card anyway.
6584        }
6585      DEBUG_ONLY(})
6586      return true;
6587    }
6588  }
6589  scanOopsInOop(addr);
6590  return true;
6591}
6592
6593// We take a break if we've been at this for a while,
6594// so as to avoid monopolizing the locks involved.
6595void MarkFromRootsClosure::do_yield_work() {
6596  // First give up the locks, then yield, then re-lock
6597  // We should probably use a constructor/destructor idiom to
6598  // do this unlock/lock or modify the MutexUnlocker class to
6599  // serve our purpose. XXX
6600  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6601         "CMS thread should hold CMS token");
6602  assert_lock_strong(_bitMap->lock());
6603  _bitMap->lock()->unlock();
6604  ConcurrentMarkSweepThread::desynchronize(true);
6605  _collector->stopTimer();
6606  if (PrintCMSStatistics != 0) {
6607    _collector->incrementYields();
6608  }
6609
6610  // See the comment in coordinator_yield()
6611  for (unsigned i = 0; i < CMSYieldSleepCount &&
6612                       ConcurrentMarkSweepThread::should_yield() &&
6613                       !CMSCollector::foregroundGCIsActive(); ++i) {
6614    os::sleep(Thread::current(), 1, false);
6615  }
6616
6617  ConcurrentMarkSweepThread::synchronize(true);
6618  _bitMap->lock()->lock_without_safepoint_check();
6619  _collector->startTimer();
6620}
6621
6622void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6623  assert(_bitMap->isMarked(ptr), "expected bit to be set");
6624  assert(_markStack->isEmpty(),
6625         "should drain stack to limit stack usage");
6626  // convert ptr to an oop preparatory to scanning
6627  oop obj = oop(ptr);
6628  // Ignore mark word in verification below, since we
6629  // may be running concurrent with mutators.
6630  assert(obj->is_oop(true), "should be an oop");
6631  assert(_finger <= ptr, "_finger runneth ahead");
6632  // advance the finger to right end of this object
6633  _finger = ptr + obj->size();
6634  assert(_finger > ptr, "we just incremented it above");
6635  // On large heaps, it may take us some time to get through
6636  // the marking phase. During
6637  // this time it's possible that a lot of mutations have
6638  // accumulated in the card table and the mod union table --
6639  // these mutation records are redundant until we have
6640  // actually traced into the corresponding card.
6641  // Here, we check whether advancing the finger would make
6642  // us cross into a new card, and if so clear corresponding
6643  // cards in the MUT (preclean them in the card-table in the
6644  // future).
6645
6646  DEBUG_ONLY(if (!_verifying) {)
6647    // The clean-on-enter optimization is disabled by default,
6648    // until we fix 6178663.
6649    if (CMSCleanOnEnter && (_finger > _threshold)) {
6650      // [_threshold, _finger) represents the interval
6651      // of cards to be cleared  in MUT (or precleaned in card table).
6652      // The set of cards to be cleared is all those that overlap
6653      // with the interval [_threshold, _finger); note that
6654      // _threshold is always kept card-aligned but _finger isn't
6655      // always card-aligned.
6656      HeapWord* old_threshold = _threshold;
6657      assert(old_threshold == (HeapWord*)round_to(
6658              (intptr_t)old_threshold, CardTableModRefBS::card_size),
6659             "_threshold should always be card-aligned");
6660      _threshold = (HeapWord*)round_to(
6661                     (intptr_t)_finger, CardTableModRefBS::card_size);
6662      MemRegion mr(old_threshold, _threshold);
6663      assert(!mr.is_empty(), "Control point invariant");
6664      assert(_span.contains(mr), "Should clear within span");
6665      _mut->clear_range(mr);
6666    }
6667  DEBUG_ONLY(})
6668  // Note: the finger doesn't advance while we drain
6669  // the stack below.
6670  PushOrMarkClosure pushOrMarkClosure(_collector,
6671                                      _span, _bitMap, _markStack,
6672                                      _finger, this);
6673  bool res = _markStack->push(obj);
6674  assert(res, "Empty non-zero size stack should have space for single push");
6675  while (!_markStack->isEmpty()) {
6676    oop new_oop = _markStack->pop();
6677    // Skip verifying header mark word below because we are
6678    // running concurrent with mutators.
6679    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6680    // now scan this oop's oops
6681    new_oop->oop_iterate(&pushOrMarkClosure);
6682    do_yield_check();
6683  }
6684  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6685}
6686
6687Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6688                       CMSCollector* collector, MemRegion span,
6689                       CMSBitMap* bit_map,
6690                       OopTaskQueue* work_queue,
6691                       CMSMarkStack*  overflow_stack):
6692  _collector(collector),
6693  _whole_span(collector->_span),
6694  _span(span),
6695  _bit_map(bit_map),
6696  _mut(&collector->_modUnionTable),
6697  _work_queue(work_queue),
6698  _overflow_stack(overflow_stack),
6699  _skip_bits(0),
6700  _task(task)
6701{
6702  assert(_work_queue->size() == 0, "work_queue should be empty");
6703  _finger = span.start();
6704  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6705  assert(_span.contains(_finger), "Out of bounds _finger?");
6706}
6707
6708// Should revisit to see if this should be restructured for
6709// greater efficiency.
6710bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
6711  if (_skip_bits > 0) {
6712    _skip_bits--;
6713    return true;
6714  }
6715  // convert offset into a HeapWord*
6716  HeapWord* addr = _bit_map->startWord() + offset;
6717  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6718         "address out of range");
6719  assert(_bit_map->isMarked(addr), "tautology");
6720  if (_bit_map->isMarked(addr+1)) {
6721    // this is an allocated object that might not yet be initialized
6722    assert(_skip_bits == 0, "tautology");
6723    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6724    oop p = oop(addr);
6725    if (p->klass_or_null() == NULL) {
6726      // in the case of Clean-on-Enter optimization, redirty card
6727      // and avoid clearing card by increasing  the threshold.
6728      return true;
6729    }
6730  }
6731  scan_oops_in_oop(addr);
6732  return true;
6733}
6734
6735void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6736  assert(_bit_map->isMarked(ptr), "expected bit to be set");
6737  // Should we assert that our work queue is empty or
6738  // below some drain limit?
6739  assert(_work_queue->size() == 0,
6740         "should drain stack to limit stack usage");
6741  // convert ptr to an oop preparatory to scanning
6742  oop obj = oop(ptr);
6743  // Ignore mark word in verification below, since we
6744  // may be running concurrent with mutators.
6745  assert(obj->is_oop(true), "should be an oop");
6746  assert(_finger <= ptr, "_finger runneth ahead");
6747  // advance the finger to right end of this object
6748  _finger = ptr + obj->size();
6749  assert(_finger > ptr, "we just incremented it above");
6750  // On large heaps, it may take us some time to get through
6751  // the marking phase. During
6752  // this time it's possible that a lot of mutations have
6753  // accumulated in the card table and the mod union table --
6754  // these mutation records are redundant until we have
6755  // actually traced into the corresponding card.
6756  // Here, we check whether advancing the finger would make
6757  // us cross into a new card, and if so clear corresponding
6758  // cards in the MUT (preclean them in the card-table in the
6759  // future).
6760
6761  // The clean-on-enter optimization is disabled by default,
6762  // until we fix 6178663.
6763  if (CMSCleanOnEnter && (_finger > _threshold)) {
6764    // [_threshold, _finger) represents the interval
6765    // of cards to be cleared  in MUT (or precleaned in card table).
6766    // The set of cards to be cleared is all those that overlap
6767    // with the interval [_threshold, _finger); note that
6768    // _threshold is always kept card-aligned but _finger isn't
6769    // always card-aligned.
6770    HeapWord* old_threshold = _threshold;
6771    assert(old_threshold == (HeapWord*)round_to(
6772            (intptr_t)old_threshold, CardTableModRefBS::card_size),
6773           "_threshold should always be card-aligned");
6774    _threshold = (HeapWord*)round_to(
6775                   (intptr_t)_finger, CardTableModRefBS::card_size);
6776    MemRegion mr(old_threshold, _threshold);
6777    assert(!mr.is_empty(), "Control point invariant");
6778    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6779    _mut->clear_range(mr);
6780  }
6781
6782  // Note: the local finger doesn't advance while we drain
6783  // the stack below, but the global finger sure can and will.
6784  HeapWord** gfa = _task->global_finger_addr();
6785  Par_PushOrMarkClosure pushOrMarkClosure(_collector,
6786                                      _span, _bit_map,
6787                                      _work_queue,
6788                                      _overflow_stack,
6789                                      _finger,
6790                                      gfa, this);
6791  bool res = _work_queue->push(obj);   // overflow could occur here
6792  assert(res, "Will hold once we use workqueues");
6793  while (true) {
6794    oop new_oop;
6795    if (!_work_queue->pop_local(new_oop)) {
6796      // We emptied our work_queue; check if there's stuff that can
6797      // be gotten from the overflow stack.
6798      if (CMSConcMarkingTask::get_work_from_overflow_stack(
6799            _overflow_stack, _work_queue)) {
6800        do_yield_check();
6801        continue;
6802      } else {  // done
6803        break;
6804      }
6805    }
6806    // Skip verifying header mark word below because we are
6807    // running concurrent with mutators.
6808    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6809    // now scan this oop's oops
6810    new_oop->oop_iterate(&pushOrMarkClosure);
6811    do_yield_check();
6812  }
6813  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6814}
6815
6816// Yield in response to a request from VM Thread or
6817// from mutators.
6818void Par_MarkFromRootsClosure::do_yield_work() {
6819  assert(_task != NULL, "sanity");
6820  _task->yield();
6821}
6822
6823// A variant of the above used for verifying CMS marking work.
6824MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6825                        MemRegion span,
6826                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6827                        CMSMarkStack*  mark_stack):
6828  _collector(collector),
6829  _span(span),
6830  _verification_bm(verification_bm),
6831  _cms_bm(cms_bm),
6832  _mark_stack(mark_stack),
6833  _pam_verify_closure(collector, span, verification_bm, cms_bm,
6834                      mark_stack)
6835{
6836  assert(_mark_stack->isEmpty(), "stack should be empty");
6837  _finger = _verification_bm->startWord();
6838  assert(_collector->_restart_addr == NULL, "Sanity check");
6839  assert(_span.contains(_finger), "Out of bounds _finger?");
6840}
6841
6842void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6843  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6844  assert(_span.contains(addr), "Out of bounds _finger?");
6845  _finger = addr;
6846}
6847
6848// Should revisit to see if this should be restructured for
6849// greater efficiency.
6850bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6851  // convert offset into a HeapWord*
6852  HeapWord* addr = _verification_bm->startWord() + offset;
6853  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6854         "address out of range");
6855  assert(_verification_bm->isMarked(addr), "tautology");
6856  assert(_cms_bm->isMarked(addr), "tautology");
6857
6858  assert(_mark_stack->isEmpty(),
6859         "should drain stack to limit stack usage");
6860  // convert addr to an oop preparatory to scanning
6861  oop obj = oop(addr);
6862  assert(obj->is_oop(), "should be an oop");
6863  assert(_finger <= addr, "_finger runneth ahead");
6864  // advance the finger to right end of this object
6865  _finger = addr + obj->size();
6866  assert(_finger > addr, "we just incremented it above");
6867  // Note: the finger doesn't advance while we drain
6868  // the stack below.
6869  bool res = _mark_stack->push(obj);
6870  assert(res, "Empty non-zero size stack should have space for single push");
6871  while (!_mark_stack->isEmpty()) {
6872    oop new_oop = _mark_stack->pop();
6873    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6874    // now scan this oop's oops
6875    new_oop->oop_iterate(&_pam_verify_closure);
6876  }
6877  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6878  return true;
6879}
6880
6881PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6882  CMSCollector* collector, MemRegion span,
6883  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6884  CMSMarkStack*  mark_stack):
6885  MetadataAwareOopClosure(collector->ref_processor()),
6886  _collector(collector),
6887  _span(span),
6888  _verification_bm(verification_bm),
6889  _cms_bm(cms_bm),
6890  _mark_stack(mark_stack)
6891{ }
6892
6893void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6894void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6895
6896// Upon stack overflow, we discard (part of) the stack,
6897// remembering the least address amongst those discarded
6898// in CMSCollector's _restart_address.
6899void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6900  // Remember the least grey address discarded
6901  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6902  _collector->lower_restart_addr(ra);
6903  _mark_stack->reset();  // discard stack contents
6904  _mark_stack->expand(); // expand the stack if possible
6905}
6906
6907void PushAndMarkVerifyClosure::do_oop(oop obj) {
6908  assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6909  HeapWord* addr = (HeapWord*)obj;
6910  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6911    // Oop lies in _span and isn't yet grey or black
6912    _verification_bm->mark(addr);            // now grey
6913    if (!_cms_bm->isMarked(addr)) {
6914      oop(addr)->print();
6915      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6916                             p2i(addr));
6917      fatal("... aborting");
6918    }
6919
6920    if (!_mark_stack->push(obj)) { // stack overflow
6921      if (PrintCMSStatistics != 0) {
6922        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6923                               SIZE_FORMAT, _mark_stack->capacity());
6924      }
6925      assert(_mark_stack->isFull(), "Else push should have succeeded");
6926      handle_stack_overflow(addr);
6927    }
6928    // anything including and to the right of _finger
6929    // will be scanned as we iterate over the remainder of the
6930    // bit map
6931  }
6932}
6933
6934PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6935                     MemRegion span,
6936                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
6937                     HeapWord* finger, MarkFromRootsClosure* parent) :
6938  MetadataAwareOopClosure(collector->ref_processor()),
6939  _collector(collector),
6940  _span(span),
6941  _bitMap(bitMap),
6942  _markStack(markStack),
6943  _finger(finger),
6944  _parent(parent)
6945{ }
6946
6947Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
6948                     MemRegion span,
6949                     CMSBitMap* bit_map,
6950                     OopTaskQueue* work_queue,
6951                     CMSMarkStack*  overflow_stack,
6952                     HeapWord* finger,
6953                     HeapWord** global_finger_addr,
6954                     Par_MarkFromRootsClosure* parent) :
6955  MetadataAwareOopClosure(collector->ref_processor()),
6956  _collector(collector),
6957  _whole_span(collector->_span),
6958  _span(span),
6959  _bit_map(bit_map),
6960  _work_queue(work_queue),
6961  _overflow_stack(overflow_stack),
6962  _finger(finger),
6963  _global_finger_addr(global_finger_addr),
6964  _parent(parent)
6965{ }
6966
6967// Assumes thread-safe access by callers, who are
6968// responsible for mutual exclusion.
6969void CMSCollector::lower_restart_addr(HeapWord* low) {
6970  assert(_span.contains(low), "Out of bounds addr");
6971  if (_restart_addr == NULL) {
6972    _restart_addr = low;
6973  } else {
6974    _restart_addr = MIN2(_restart_addr, low);
6975  }
6976}
6977
6978// Upon stack overflow, we discard (part of) the stack,
6979// remembering the least address amongst those discarded
6980// in CMSCollector's _restart_address.
6981void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6982  // Remember the least grey address discarded
6983  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6984  _collector->lower_restart_addr(ra);
6985  _markStack->reset();  // discard stack contents
6986  _markStack->expand(); // expand the stack if possible
6987}
6988
6989// Upon stack overflow, we discard (part of) the stack,
6990// remembering the least address amongst those discarded
6991// in CMSCollector's _restart_address.
6992void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6993  // We need to do this under a mutex to prevent other
6994  // workers from interfering with the work done below.
6995  MutexLockerEx ml(_overflow_stack->par_lock(),
6996                   Mutex::_no_safepoint_check_flag);
6997  // Remember the least grey address discarded
6998  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6999  _collector->lower_restart_addr(ra);
7000  _overflow_stack->reset();  // discard stack contents
7001  _overflow_stack->expand(); // expand the stack if possible
7002}
7003
7004void PushOrMarkClosure::do_oop(oop obj) {
7005  // Ignore mark word because we are running concurrent with mutators.
7006  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7007  HeapWord* addr = (HeapWord*)obj;
7008  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7009    // Oop lies in _span and isn't yet grey or black
7010    _bitMap->mark(addr);            // now grey
7011    if (addr < _finger) {
7012      // the bit map iteration has already either passed, or
7013      // sampled, this bit in the bit map; we'll need to
7014      // use the marking stack to scan this oop's oops.
7015      bool simulate_overflow = false;
7016      NOT_PRODUCT(
7017        if (CMSMarkStackOverflowALot &&
7018            _collector->simulate_overflow()) {
7019          // simulate a stack overflow
7020          simulate_overflow = true;
7021        }
7022      )
7023      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7024        if (PrintCMSStatistics != 0) {
7025          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7026                                 SIZE_FORMAT, _markStack->capacity());
7027        }
7028        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7029        handle_stack_overflow(addr);
7030      }
7031    }
7032    // anything including and to the right of _finger
7033    // will be scanned as we iterate over the remainder of the
7034    // bit map
7035    do_yield_check();
7036  }
7037}
7038
7039void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7040void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7041
7042void Par_PushOrMarkClosure::do_oop(oop obj) {
7043  // Ignore mark word because we are running concurrent with mutators.
7044  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7045  HeapWord* addr = (HeapWord*)obj;
7046  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7047    // Oop lies in _span and isn't yet grey or black
7048    // We read the global_finger (volatile read) strictly after marking oop
7049    bool res = _bit_map->par_mark(addr);    // now grey
7050    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7051    // Should we push this marked oop on our stack?
7052    // -- if someone else marked it, nothing to do
7053    // -- if target oop is above global finger nothing to do
7054    // -- if target oop is in chunk and above local finger
7055    //      then nothing to do
7056    // -- else push on work queue
7057    if (   !res       // someone else marked it, they will deal with it
7058        || (addr >= *gfa)  // will be scanned in a later task
7059        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7060      return;
7061    }
7062    // the bit map iteration has already either passed, or
7063    // sampled, this bit in the bit map; we'll need to
7064    // use the marking stack to scan this oop's oops.
7065    bool simulate_overflow = false;
7066    NOT_PRODUCT(
7067      if (CMSMarkStackOverflowALot &&
7068          _collector->simulate_overflow()) {
7069        // simulate a stack overflow
7070        simulate_overflow = true;
7071      }
7072    )
7073    if (simulate_overflow ||
7074        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7075      // stack overflow
7076      if (PrintCMSStatistics != 0) {
7077        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7078                               SIZE_FORMAT, _overflow_stack->capacity());
7079      }
7080      // We cannot assert that the overflow stack is full because
7081      // it may have been emptied since.
7082      assert(simulate_overflow ||
7083             _work_queue->size() == _work_queue->max_elems(),
7084            "Else push should have succeeded");
7085      handle_stack_overflow(addr);
7086    }
7087    do_yield_check();
7088  }
7089}
7090
7091void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7092void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7093
7094PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7095                                       MemRegion span,
7096                                       ReferenceProcessor* rp,
7097                                       CMSBitMap* bit_map,
7098                                       CMSBitMap* mod_union_table,
7099                                       CMSMarkStack*  mark_stack,
7100                                       bool           concurrent_precleaning):
7101  MetadataAwareOopClosure(rp),
7102  _collector(collector),
7103  _span(span),
7104  _bit_map(bit_map),
7105  _mod_union_table(mod_union_table),
7106  _mark_stack(mark_stack),
7107  _concurrent_precleaning(concurrent_precleaning)
7108{
7109  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7110}
7111
7112// Grey object rescan during pre-cleaning and second checkpoint phases --
7113// the non-parallel version (the parallel version appears further below.)
7114void PushAndMarkClosure::do_oop(oop obj) {
7115  // Ignore mark word verification. If during concurrent precleaning,
7116  // the object monitor may be locked. If during the checkpoint
7117  // phases, the object may already have been reached by a  different
7118  // path and may be at the end of the global overflow list (so
7119  // the mark word may be NULL).
7120  assert(obj->is_oop_or_null(true /* ignore mark word */),
7121         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7122  HeapWord* addr = (HeapWord*)obj;
7123  // Check if oop points into the CMS generation
7124  // and is not marked
7125  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7126    // a white object ...
7127    _bit_map->mark(addr);         // ... now grey
7128    // push on the marking stack (grey set)
7129    bool simulate_overflow = false;
7130    NOT_PRODUCT(
7131      if (CMSMarkStackOverflowALot &&
7132          _collector->simulate_overflow()) {
7133        // simulate a stack overflow
7134        simulate_overflow = true;
7135      }
7136    )
7137    if (simulate_overflow || !_mark_stack->push(obj)) {
7138      if (_concurrent_precleaning) {
7139         // During precleaning we can just dirty the appropriate card(s)
7140         // in the mod union table, thus ensuring that the object remains
7141         // in the grey set  and continue. In the case of object arrays
7142         // we need to dirty all of the cards that the object spans,
7143         // since the rescan of object arrays will be limited to the
7144         // dirty cards.
7145         // Note that no one can be interfering with us in this action
7146         // of dirtying the mod union table, so no locking or atomics
7147         // are required.
7148         if (obj->is_objArray()) {
7149           size_t sz = obj->size();
7150           HeapWord* end_card_addr = (HeapWord*)round_to(
7151                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7152           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7153           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7154           _mod_union_table->mark_range(redirty_range);
7155         } else {
7156           _mod_union_table->mark(addr);
7157         }
7158         _collector->_ser_pmc_preclean_ovflw++;
7159      } else {
7160         // During the remark phase, we need to remember this oop
7161         // in the overflow list.
7162         _collector->push_on_overflow_list(obj);
7163         _collector->_ser_pmc_remark_ovflw++;
7164      }
7165    }
7166  }
7167}
7168
7169Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7170                                               MemRegion span,
7171                                               ReferenceProcessor* rp,
7172                                               CMSBitMap* bit_map,
7173                                               OopTaskQueue* work_queue):
7174  MetadataAwareOopClosure(rp),
7175  _collector(collector),
7176  _span(span),
7177  _bit_map(bit_map),
7178  _work_queue(work_queue)
7179{
7180  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7181}
7182
7183void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7184void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7185
7186// Grey object rescan during second checkpoint phase --
7187// the parallel version.
7188void Par_PushAndMarkClosure::do_oop(oop obj) {
7189  // In the assert below, we ignore the mark word because
7190  // this oop may point to an already visited object that is
7191  // on the overflow stack (in which case the mark word has
7192  // been hijacked for chaining into the overflow stack --
7193  // if this is the last object in the overflow stack then
7194  // its mark word will be NULL). Because this object may
7195  // have been subsequently popped off the global overflow
7196  // stack, and the mark word possibly restored to the prototypical
7197  // value, by the time we get to examined this failing assert in
7198  // the debugger, is_oop_or_null(false) may subsequently start
7199  // to hold.
7200  assert(obj->is_oop_or_null(true),
7201         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7202  HeapWord* addr = (HeapWord*)obj;
7203  // Check if oop points into the CMS generation
7204  // and is not marked
7205  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7206    // a white object ...
7207    // If we manage to "claim" the object, by being the
7208    // first thread to mark it, then we push it on our
7209    // marking stack
7210    if (_bit_map->par_mark(addr)) {     // ... now grey
7211      // push on work queue (grey set)
7212      bool simulate_overflow = false;
7213      NOT_PRODUCT(
7214        if (CMSMarkStackOverflowALot &&
7215            _collector->par_simulate_overflow()) {
7216          // simulate a stack overflow
7217          simulate_overflow = true;
7218        }
7219      )
7220      if (simulate_overflow || !_work_queue->push(obj)) {
7221        _collector->par_push_on_overflow_list(obj);
7222        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7223      }
7224    } // Else, some other thread got there first
7225  }
7226}
7227
7228void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7229void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7230
7231void CMSPrecleanRefsYieldClosure::do_yield_work() {
7232  Mutex* bml = _collector->bitMapLock();
7233  assert_lock_strong(bml);
7234  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7235         "CMS thread should hold CMS token");
7236
7237  bml->unlock();
7238  ConcurrentMarkSweepThread::desynchronize(true);
7239
7240  _collector->stopTimer();
7241  if (PrintCMSStatistics != 0) {
7242    _collector->incrementYields();
7243  }
7244
7245  // See the comment in coordinator_yield()
7246  for (unsigned i = 0; i < CMSYieldSleepCount &&
7247                       ConcurrentMarkSweepThread::should_yield() &&
7248                       !CMSCollector::foregroundGCIsActive(); ++i) {
7249    os::sleep(Thread::current(), 1, false);
7250  }
7251
7252  ConcurrentMarkSweepThread::synchronize(true);
7253  bml->lock();
7254
7255  _collector->startTimer();
7256}
7257
7258bool CMSPrecleanRefsYieldClosure::should_return() {
7259  if (ConcurrentMarkSweepThread::should_yield()) {
7260    do_yield_work();
7261  }
7262  return _collector->foregroundGCIsActive();
7263}
7264
7265void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7266  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7267         "mr should be aligned to start at a card boundary");
7268  // We'd like to assert:
7269  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7270  //        "mr should be a range of cards");
7271  // However, that would be too strong in one case -- the last
7272  // partition ends at _unallocated_block which, in general, can be
7273  // an arbitrary boundary, not necessarily card aligned.
7274  if (PrintCMSStatistics != 0) {
7275    _num_dirty_cards +=
7276         mr.word_size()/CardTableModRefBS::card_size_in_words;
7277  }
7278  _space->object_iterate_mem(mr, &_scan_cl);
7279}
7280
7281SweepClosure::SweepClosure(CMSCollector* collector,
7282                           ConcurrentMarkSweepGeneration* g,
7283                           CMSBitMap* bitMap, bool should_yield) :
7284  _collector(collector),
7285  _g(g),
7286  _sp(g->cmsSpace()),
7287  _limit(_sp->sweep_limit()),
7288  _freelistLock(_sp->freelistLock()),
7289  _bitMap(bitMap),
7290  _yield(should_yield),
7291  _inFreeRange(false),           // No free range at beginning of sweep
7292  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7293  _lastFreeRangeCoalesced(false),
7294  _freeFinger(g->used_region().start())
7295{
7296  NOT_PRODUCT(
7297    _numObjectsFreed = 0;
7298    _numWordsFreed   = 0;
7299    _numObjectsLive = 0;
7300    _numWordsLive = 0;
7301    _numObjectsAlreadyFree = 0;
7302    _numWordsAlreadyFree = 0;
7303    _last_fc = NULL;
7304
7305    _sp->initializeIndexedFreeListArrayReturnedBytes();
7306    _sp->dictionary()->initialize_dict_returned_bytes();
7307  )
7308  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7309         "sweep _limit out of bounds");
7310  if (CMSTraceSweeper) {
7311    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7312                        p2i(_limit));
7313  }
7314}
7315
7316void SweepClosure::print_on(outputStream* st) const {
7317  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7318                p2i(_sp->bottom()), p2i(_sp->end()));
7319  tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7320  tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7321  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7322  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7323                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7324}
7325
7326#ifndef PRODUCT
7327// Assertion checking only:  no useful work in product mode --
7328// however, if any of the flags below become product flags,
7329// you may need to review this code to see if it needs to be
7330// enabled in product mode.
7331SweepClosure::~SweepClosure() {
7332  assert_lock_strong(_freelistLock);
7333  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7334         "sweep _limit out of bounds");
7335  if (inFreeRange()) {
7336    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7337    print();
7338    ShouldNotReachHere();
7339  }
7340  if (Verbose && PrintGC) {
7341    gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
7342                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7343    gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
7344                           SIZE_FORMAT" bytes  "
7345      "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7346      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7347      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7348    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7349                        * sizeof(HeapWord);
7350    gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7351
7352    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7353      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7354      size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7355      size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7356      gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7357      gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
7358        indexListReturnedBytes);
7359      gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
7360        dict_returned_bytes);
7361    }
7362  }
7363  if (CMSTraceSweeper) {
7364    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7365                           p2i(_limit));
7366  }
7367}
7368#endif  // PRODUCT
7369
7370void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7371    bool freeRangeInFreeLists) {
7372  if (CMSTraceSweeper) {
7373    gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7374               p2i(freeFinger), freeRangeInFreeLists);
7375  }
7376  assert(!inFreeRange(), "Trampling existing free range");
7377  set_inFreeRange(true);
7378  set_lastFreeRangeCoalesced(false);
7379
7380  set_freeFinger(freeFinger);
7381  set_freeRangeInFreeLists(freeRangeInFreeLists);
7382  if (CMSTestInFreeList) {
7383    if (freeRangeInFreeLists) {
7384      FreeChunk* fc = (FreeChunk*) freeFinger;
7385      assert(fc->is_free(), "A chunk on the free list should be free.");
7386      assert(fc->size() > 0, "Free range should have a size");
7387      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7388    }
7389  }
7390}
7391
7392// Note that the sweeper runs concurrently with mutators. Thus,
7393// it is possible for direct allocation in this generation to happen
7394// in the middle of the sweep. Note that the sweeper also coalesces
7395// contiguous free blocks. Thus, unless the sweeper and the allocator
7396// synchronize appropriately freshly allocated blocks may get swept up.
7397// This is accomplished by the sweeper locking the free lists while
7398// it is sweeping. Thus blocks that are determined to be free are
7399// indeed free. There is however one additional complication:
7400// blocks that have been allocated since the final checkpoint and
7401// mark, will not have been marked and so would be treated as
7402// unreachable and swept up. To prevent this, the allocator marks
7403// the bit map when allocating during the sweep phase. This leads,
7404// however, to a further complication -- objects may have been allocated
7405// but not yet initialized -- in the sense that the header isn't yet
7406// installed. The sweeper can not then determine the size of the block
7407// in order to skip over it. To deal with this case, we use a technique
7408// (due to Printezis) to encode such uninitialized block sizes in the
7409// bit map. Since the bit map uses a bit per every HeapWord, but the
7410// CMS generation has a minimum object size of 3 HeapWords, it follows
7411// that "normal marks" won't be adjacent in the bit map (there will
7412// always be at least two 0 bits between successive 1 bits). We make use
7413// of these "unused" bits to represent uninitialized blocks -- the bit
7414// corresponding to the start of the uninitialized object and the next
7415// bit are both set. Finally, a 1 bit marks the end of the object that
7416// started with the two consecutive 1 bits to indicate its potentially
7417// uninitialized state.
7418
7419size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7420  FreeChunk* fc = (FreeChunk*)addr;
7421  size_t res;
7422
7423  // Check if we are done sweeping. Below we check "addr >= _limit" rather
7424  // than "addr == _limit" because although _limit was a block boundary when
7425  // we started the sweep, it may no longer be one because heap expansion
7426  // may have caused us to coalesce the block ending at the address _limit
7427  // with a newly expanded chunk (this happens when _limit was set to the
7428  // previous _end of the space), so we may have stepped past _limit:
7429  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7430  if (addr >= _limit) { // we have swept up to or past the limit: finish up
7431    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7432           "sweep _limit out of bounds");
7433    assert(addr < _sp->end(), "addr out of bounds");
7434    // Flush any free range we might be holding as a single
7435    // coalesced chunk to the appropriate free list.
7436    if (inFreeRange()) {
7437      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7438             err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", p2i(freeFinger())));
7439      flush_cur_free_chunk(freeFinger(),
7440                           pointer_delta(addr, freeFinger()));
7441      if (CMSTraceSweeper) {
7442        gclog_or_tty->print("Sweep: last chunk: ");
7443        gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
7444                   "[coalesced:%d]\n",
7445                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7446                   lastFreeRangeCoalesced() ? 1 : 0);
7447      }
7448    }
7449
7450    // help the iterator loop finish
7451    return pointer_delta(_sp->end(), addr);
7452  }
7453
7454  assert(addr < _limit, "sweep invariant");
7455  // check if we should yield
7456  do_yield_check(addr);
7457  if (fc->is_free()) {
7458    // Chunk that is already free
7459    res = fc->size();
7460    do_already_free_chunk(fc);
7461    debug_only(_sp->verifyFreeLists());
7462    // If we flush the chunk at hand in lookahead_and_flush()
7463    // and it's coalesced with a preceding chunk, then the
7464    // process of "mangling" the payload of the coalesced block
7465    // will cause erasure of the size information from the
7466    // (erstwhile) header of all the coalesced blocks but the
7467    // first, so the first disjunct in the assert will not hold
7468    // in that specific case (in which case the second disjunct
7469    // will hold).
7470    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7471           "Otherwise the size info doesn't change at this step");
7472    NOT_PRODUCT(
7473      _numObjectsAlreadyFree++;
7474      _numWordsAlreadyFree += res;
7475    )
7476    NOT_PRODUCT(_last_fc = fc;)
7477  } else if (!_bitMap->isMarked(addr)) {
7478    // Chunk is fresh garbage
7479    res = do_garbage_chunk(fc);
7480    debug_only(_sp->verifyFreeLists());
7481    NOT_PRODUCT(
7482      _numObjectsFreed++;
7483      _numWordsFreed += res;
7484    )
7485  } else {
7486    // Chunk that is alive.
7487    res = do_live_chunk(fc);
7488    debug_only(_sp->verifyFreeLists());
7489    NOT_PRODUCT(
7490        _numObjectsLive++;
7491        _numWordsLive += res;
7492    )
7493  }
7494  return res;
7495}
7496
7497// For the smart allocation, record following
7498//  split deaths - a free chunk is removed from its free list because
7499//      it is being split into two or more chunks.
7500//  split birth - a free chunk is being added to its free list because
7501//      a larger free chunk has been split and resulted in this free chunk.
7502//  coal death - a free chunk is being removed from its free list because
7503//      it is being coalesced into a large free chunk.
7504//  coal birth - a free chunk is being added to its free list because
7505//      it was created when two or more free chunks where coalesced into
7506//      this free chunk.
7507//
7508// These statistics are used to determine the desired number of free
7509// chunks of a given size.  The desired number is chosen to be relative
7510// to the end of a CMS sweep.  The desired number at the end of a sweep
7511// is the
7512//      count-at-end-of-previous-sweep (an amount that was enough)
7513//              - count-at-beginning-of-current-sweep  (the excess)
7514//              + split-births  (gains in this size during interval)
7515//              - split-deaths  (demands on this size during interval)
7516// where the interval is from the end of one sweep to the end of the
7517// next.
7518//
7519// When sweeping the sweeper maintains an accumulated chunk which is
7520// the chunk that is made up of chunks that have been coalesced.  That
7521// will be termed the left-hand chunk.  A new chunk of garbage that
7522// is being considered for coalescing will be referred to as the
7523// right-hand chunk.
7524//
7525// When making a decision on whether to coalesce a right-hand chunk with
7526// the current left-hand chunk, the current count vs. the desired count
7527// of the left-hand chunk is considered.  Also if the right-hand chunk
7528// is near the large chunk at the end of the heap (see
7529// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7530// left-hand chunk is coalesced.
7531//
7532// When making a decision about whether to split a chunk, the desired count
7533// vs. the current count of the candidate to be split is also considered.
7534// If the candidate is underpopulated (currently fewer chunks than desired)
7535// a chunk of an overpopulated (currently more chunks than desired) size may
7536// be chosen.  The "hint" associated with a free list, if non-null, points
7537// to a free list which may be overpopulated.
7538//
7539
7540void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7541  const size_t size = fc->size();
7542  // Chunks that cannot be coalesced are not in the
7543  // free lists.
7544  if (CMSTestInFreeList && !fc->cantCoalesce()) {
7545    assert(_sp->verify_chunk_in_free_list(fc),
7546      "free chunk should be in free lists");
7547  }
7548  // a chunk that is already free, should not have been
7549  // marked in the bit map
7550  HeapWord* const addr = (HeapWord*) fc;
7551  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7552  // Verify that the bit map has no bits marked between
7553  // addr and purported end of this block.
7554  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7555
7556  // Some chunks cannot be coalesced under any circumstances.
7557  // See the definition of cantCoalesce().
7558  if (!fc->cantCoalesce()) {
7559    // This chunk can potentially be coalesced.
7560    if (_sp->adaptive_freelists()) {
7561      // All the work is done in
7562      do_post_free_or_garbage_chunk(fc, size);
7563    } else {  // Not adaptive free lists
7564      // this is a free chunk that can potentially be coalesced by the sweeper;
7565      if (!inFreeRange()) {
7566        // if the next chunk is a free block that can't be coalesced
7567        // it doesn't make sense to remove this chunk from the free lists
7568        FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7569        assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7570        if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
7571            nextChunk->is_free()               &&     // ... which is free...
7572            nextChunk->cantCoalesce()) {             // ... but can't be coalesced
7573          // nothing to do
7574        } else {
7575          // Potentially the start of a new free range:
7576          // Don't eagerly remove it from the free lists.
7577          // No need to remove it if it will just be put
7578          // back again.  (Also from a pragmatic point of view
7579          // if it is a free block in a region that is beyond
7580          // any allocated blocks, an assertion will fail)
7581          // Remember the start of a free run.
7582          initialize_free_range(addr, true);
7583          // end - can coalesce with next chunk
7584        }
7585      } else {
7586        // the midst of a free range, we are coalescing
7587        print_free_block_coalesced(fc);
7588        if (CMSTraceSweeper) {
7589          gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7590        }
7591        // remove it from the free lists
7592        _sp->removeFreeChunkFromFreeLists(fc);
7593        set_lastFreeRangeCoalesced(true);
7594        // If the chunk is being coalesced and the current free range is
7595        // in the free lists, remove the current free range so that it
7596        // will be returned to the free lists in its entirety - all
7597        // the coalesced pieces included.
7598        if (freeRangeInFreeLists()) {
7599          FreeChunk* ffc = (FreeChunk*) freeFinger();
7600          assert(ffc->size() == pointer_delta(addr, freeFinger()),
7601            "Size of free range is inconsistent with chunk size.");
7602          if (CMSTestInFreeList) {
7603            assert(_sp->verify_chunk_in_free_list(ffc),
7604              "free range is not in free lists");
7605          }
7606          _sp->removeFreeChunkFromFreeLists(ffc);
7607          set_freeRangeInFreeLists(false);
7608        }
7609      }
7610    }
7611    // Note that if the chunk is not coalescable (the else arm
7612    // below), we unconditionally flush, without needing to do
7613    // a "lookahead," as we do below.
7614    if (inFreeRange()) lookahead_and_flush(fc, size);
7615  } else {
7616    // Code path common to both original and adaptive free lists.
7617
7618    // cant coalesce with previous block; this should be treated
7619    // as the end of a free run if any
7620    if (inFreeRange()) {
7621      // we kicked some butt; time to pick up the garbage
7622      assert(freeFinger() < addr, "freeFinger points too high");
7623      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7624    }
7625    // else, nothing to do, just continue
7626  }
7627}
7628
7629size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7630  // This is a chunk of garbage.  It is not in any free list.
7631  // Add it to a free list or let it possibly be coalesced into
7632  // a larger chunk.
7633  HeapWord* const addr = (HeapWord*) fc;
7634  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7635
7636  if (_sp->adaptive_freelists()) {
7637    // Verify that the bit map has no bits marked between
7638    // addr and purported end of just dead object.
7639    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7640
7641    do_post_free_or_garbage_chunk(fc, size);
7642  } else {
7643    if (!inFreeRange()) {
7644      // start of a new free range
7645      assert(size > 0, "A free range should have a size");
7646      initialize_free_range(addr, false);
7647    } else {
7648      // this will be swept up when we hit the end of the
7649      // free range
7650      if (CMSTraceSweeper) {
7651        gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7652      }
7653      // If the chunk is being coalesced and the current free range is
7654      // in the free lists, remove the current free range so that it
7655      // will be returned to the free lists in its entirety - all
7656      // the coalesced pieces included.
7657      if (freeRangeInFreeLists()) {
7658        FreeChunk* ffc = (FreeChunk*)freeFinger();
7659        assert(ffc->size() == pointer_delta(addr, freeFinger()),
7660          "Size of free range is inconsistent with chunk size.");
7661        if (CMSTestInFreeList) {
7662          assert(_sp->verify_chunk_in_free_list(ffc),
7663            "free range is not in free lists");
7664        }
7665        _sp->removeFreeChunkFromFreeLists(ffc);
7666        set_freeRangeInFreeLists(false);
7667      }
7668      set_lastFreeRangeCoalesced(true);
7669    }
7670    // this will be swept up when we hit the end of the free range
7671
7672    // Verify that the bit map has no bits marked between
7673    // addr and purported end of just dead object.
7674    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7675  }
7676  assert(_limit >= addr + size,
7677         "A freshly garbage chunk can't possibly straddle over _limit");
7678  if (inFreeRange()) lookahead_and_flush(fc, size);
7679  return size;
7680}
7681
7682size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7683  HeapWord* addr = (HeapWord*) fc;
7684  // The sweeper has just found a live object. Return any accumulated
7685  // left hand chunk to the free lists.
7686  if (inFreeRange()) {
7687    assert(freeFinger() < addr, "freeFinger points too high");
7688    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7689  }
7690
7691  // This object is live: we'd normally expect this to be
7692  // an oop, and like to assert the following:
7693  // assert(oop(addr)->is_oop(), "live block should be an oop");
7694  // However, as we commented above, this may be an object whose
7695  // header hasn't yet been initialized.
7696  size_t size;
7697  assert(_bitMap->isMarked(addr), "Tautology for this control point");
7698  if (_bitMap->isMarked(addr + 1)) {
7699    // Determine the size from the bit map, rather than trying to
7700    // compute it from the object header.
7701    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7702    size = pointer_delta(nextOneAddr + 1, addr);
7703    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7704           "alignment problem");
7705
7706#ifdef ASSERT
7707      if (oop(addr)->klass_or_null() != NULL) {
7708        // Ignore mark word because we are running concurrent with mutators
7709        assert(oop(addr)->is_oop(true), "live block should be an oop");
7710        assert(size ==
7711               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7712               "P-mark and computed size do not agree");
7713      }
7714#endif
7715
7716  } else {
7717    // This should be an initialized object that's alive.
7718    assert(oop(addr)->klass_or_null() != NULL,
7719           "Should be an initialized object");
7720    // Ignore mark word because we are running concurrent with mutators
7721    assert(oop(addr)->is_oop(true), "live block should be an oop");
7722    // Verify that the bit map has no bits marked between
7723    // addr and purported end of this block.
7724    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7725    assert(size >= 3, "Necessary for Printezis marks to work");
7726    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7727    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7728  }
7729  return size;
7730}
7731
7732void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7733                                                 size_t chunkSize) {
7734  // do_post_free_or_garbage_chunk() should only be called in the case
7735  // of the adaptive free list allocator.
7736  const bool fcInFreeLists = fc->is_free();
7737  assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7738  assert((HeapWord*)fc <= _limit, "sweep invariant");
7739  if (CMSTestInFreeList && fcInFreeLists) {
7740    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7741  }
7742
7743  if (CMSTraceSweeper) {
7744    gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7745  }
7746
7747  HeapWord* const fc_addr = (HeapWord*) fc;
7748
7749  bool coalesce;
7750  const size_t left  = pointer_delta(fc_addr, freeFinger());
7751  const size_t right = chunkSize;
7752  switch (FLSCoalescePolicy) {
7753    // numeric value forms a coalition aggressiveness metric
7754    case 0:  { // never coalesce
7755      coalesce = false;
7756      break;
7757    }
7758    case 1: { // coalesce if left & right chunks on overpopulated lists
7759      coalesce = _sp->coalOverPopulated(left) &&
7760                 _sp->coalOverPopulated(right);
7761      break;
7762    }
7763    case 2: { // coalesce if left chunk on overpopulated list (default)
7764      coalesce = _sp->coalOverPopulated(left);
7765      break;
7766    }
7767    case 3: { // coalesce if left OR right chunk on overpopulated list
7768      coalesce = _sp->coalOverPopulated(left) ||
7769                 _sp->coalOverPopulated(right);
7770      break;
7771    }
7772    case 4: { // always coalesce
7773      coalesce = true;
7774      break;
7775    }
7776    default:
7777     ShouldNotReachHere();
7778  }
7779
7780  // Should the current free range be coalesced?
7781  // If the chunk is in a free range and either we decided to coalesce above
7782  // or the chunk is near the large block at the end of the heap
7783  // (isNearLargestChunk() returns true), then coalesce this chunk.
7784  const bool doCoalesce = inFreeRange()
7785                          && (coalesce || _g->isNearLargestChunk(fc_addr));
7786  if (doCoalesce) {
7787    // Coalesce the current free range on the left with the new
7788    // chunk on the right.  If either is on a free list,
7789    // it must be removed from the list and stashed in the closure.
7790    if (freeRangeInFreeLists()) {
7791      FreeChunk* const ffc = (FreeChunk*)freeFinger();
7792      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7793        "Size of free range is inconsistent with chunk size.");
7794      if (CMSTestInFreeList) {
7795        assert(_sp->verify_chunk_in_free_list(ffc),
7796          "Chunk is not in free lists");
7797      }
7798      _sp->coalDeath(ffc->size());
7799      _sp->removeFreeChunkFromFreeLists(ffc);
7800      set_freeRangeInFreeLists(false);
7801    }
7802    if (fcInFreeLists) {
7803      _sp->coalDeath(chunkSize);
7804      assert(fc->size() == chunkSize,
7805        "The chunk has the wrong size or is not in the free lists");
7806      _sp->removeFreeChunkFromFreeLists(fc);
7807    }
7808    set_lastFreeRangeCoalesced(true);
7809    print_free_block_coalesced(fc);
7810  } else {  // not in a free range and/or should not coalesce
7811    // Return the current free range and start a new one.
7812    if (inFreeRange()) {
7813      // In a free range but cannot coalesce with the right hand chunk.
7814      // Put the current free range into the free lists.
7815      flush_cur_free_chunk(freeFinger(),
7816                           pointer_delta(fc_addr, freeFinger()));
7817    }
7818    // Set up for new free range.  Pass along whether the right hand
7819    // chunk is in the free lists.
7820    initialize_free_range((HeapWord*)fc, fcInFreeLists);
7821  }
7822}
7823
7824// Lookahead flush:
7825// If we are tracking a free range, and this is the last chunk that
7826// we'll look at because its end crosses past _limit, we'll preemptively
7827// flush it along with any free range we may be holding on to. Note that
7828// this can be the case only for an already free or freshly garbage
7829// chunk. If this block is an object, it can never straddle
7830// over _limit. The "straddling" occurs when _limit is set at
7831// the previous end of the space when this cycle started, and
7832// a subsequent heap expansion caused the previously co-terminal
7833// free block to be coalesced with the newly expanded portion,
7834// thus rendering _limit a non-block-boundary making it dangerous
7835// for the sweeper to step over and examine.
7836void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7837  assert(inFreeRange(), "Should only be called if currently in a free range.");
7838  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7839  assert(_sp->used_region().contains(eob - 1),
7840         err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7841                 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7842                 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7843                 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size));
7844  if (eob >= _limit) {
7845    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7846    if (CMSTraceSweeper) {
7847      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7848                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7849                             "[" PTR_FORMAT "," PTR_FORMAT ")",
7850                             p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7851    }
7852    // Return the storage we are tracking back into the free lists.
7853    if (CMSTraceSweeper) {
7854      gclog_or_tty->print_cr("Flushing ... ");
7855    }
7856    assert(freeFinger() < eob, "Error");
7857    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7858  }
7859}
7860
7861void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7862  assert(inFreeRange(), "Should only be called if currently in a free range.");
7863  assert(size > 0,
7864    "A zero sized chunk cannot be added to the free lists.");
7865  if (!freeRangeInFreeLists()) {
7866    if (CMSTestInFreeList) {
7867      FreeChunk* fc = (FreeChunk*) chunk;
7868      fc->set_size(size);
7869      assert(!_sp->verify_chunk_in_free_list(fc),
7870        "chunk should not be in free lists yet");
7871    }
7872    if (CMSTraceSweeper) {
7873      gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7874                    p2i(chunk), size);
7875    }
7876    // A new free range is going to be starting.  The current
7877    // free range has not been added to the free lists yet or
7878    // was removed so add it back.
7879    // If the current free range was coalesced, then the death
7880    // of the free range was recorded.  Record a birth now.
7881    if (lastFreeRangeCoalesced()) {
7882      _sp->coalBirth(size);
7883    }
7884    _sp->addChunkAndRepairOffsetTable(chunk, size,
7885            lastFreeRangeCoalesced());
7886  } else if (CMSTraceSweeper) {
7887    gclog_or_tty->print_cr("Already in free list: nothing to flush");
7888  }
7889  set_inFreeRange(false);
7890  set_freeRangeInFreeLists(false);
7891}
7892
7893// We take a break if we've been at this for a while,
7894// so as to avoid monopolizing the locks involved.
7895void SweepClosure::do_yield_work(HeapWord* addr) {
7896  // Return current free chunk being used for coalescing (if any)
7897  // to the appropriate freelist.  After yielding, the next
7898  // free block encountered will start a coalescing range of
7899  // free blocks.  If the next free block is adjacent to the
7900  // chunk just flushed, they will need to wait for the next
7901  // sweep to be coalesced.
7902  if (inFreeRange()) {
7903    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7904  }
7905
7906  // First give up the locks, then yield, then re-lock.
7907  // We should probably use a constructor/destructor idiom to
7908  // do this unlock/lock or modify the MutexUnlocker class to
7909  // serve our purpose. XXX
7910  assert_lock_strong(_bitMap->lock());
7911  assert_lock_strong(_freelistLock);
7912  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7913         "CMS thread should hold CMS token");
7914  _bitMap->lock()->unlock();
7915  _freelistLock->unlock();
7916  ConcurrentMarkSweepThread::desynchronize(true);
7917  _collector->stopTimer();
7918  if (PrintCMSStatistics != 0) {
7919    _collector->incrementYields();
7920  }
7921
7922  // See the comment in coordinator_yield()
7923  for (unsigned i = 0; i < CMSYieldSleepCount &&
7924                       ConcurrentMarkSweepThread::should_yield() &&
7925                       !CMSCollector::foregroundGCIsActive(); ++i) {
7926    os::sleep(Thread::current(), 1, false);
7927  }
7928
7929  ConcurrentMarkSweepThread::synchronize(true);
7930  _freelistLock->lock();
7931  _bitMap->lock()->lock_without_safepoint_check();
7932  _collector->startTimer();
7933}
7934
7935#ifndef PRODUCT
7936// This is actually very useful in a product build if it can
7937// be called from the debugger.  Compile it into the product
7938// as needed.
7939bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7940  return debug_cms_space->verify_chunk_in_free_list(fc);
7941}
7942#endif
7943
7944void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7945  if (CMSTraceSweeper) {
7946    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7947                           p2i(fc), fc->size());
7948  }
7949}
7950
7951// CMSIsAliveClosure
7952bool CMSIsAliveClosure::do_object_b(oop obj) {
7953  HeapWord* addr = (HeapWord*)obj;
7954  return addr != NULL &&
7955         (!_span.contains(addr) || _bit_map->isMarked(addr));
7956}
7957
7958
7959CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7960                      MemRegion span,
7961                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7962                      bool cpc):
7963  _collector(collector),
7964  _span(span),
7965  _bit_map(bit_map),
7966  _mark_stack(mark_stack),
7967  _concurrent_precleaning(cpc) {
7968  assert(!_span.is_empty(), "Empty span could spell trouble");
7969}
7970
7971
7972// CMSKeepAliveClosure: the serial version
7973void CMSKeepAliveClosure::do_oop(oop obj) {
7974  HeapWord* addr = (HeapWord*)obj;
7975  if (_span.contains(addr) &&
7976      !_bit_map->isMarked(addr)) {
7977    _bit_map->mark(addr);
7978    bool simulate_overflow = false;
7979    NOT_PRODUCT(
7980      if (CMSMarkStackOverflowALot &&
7981          _collector->simulate_overflow()) {
7982        // simulate a stack overflow
7983        simulate_overflow = true;
7984      }
7985    )
7986    if (simulate_overflow || !_mark_stack->push(obj)) {
7987      if (_concurrent_precleaning) {
7988        // We dirty the overflown object and let the remark
7989        // phase deal with it.
7990        assert(_collector->overflow_list_is_empty(), "Error");
7991        // In the case of object arrays, we need to dirty all of
7992        // the cards that the object spans. No locking or atomics
7993        // are needed since no one else can be mutating the mod union
7994        // table.
7995        if (obj->is_objArray()) {
7996          size_t sz = obj->size();
7997          HeapWord* end_card_addr =
7998            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7999          MemRegion redirty_range = MemRegion(addr, end_card_addr);
8000          assert(!redirty_range.is_empty(), "Arithmetical tautology");
8001          _collector->_modUnionTable.mark_range(redirty_range);
8002        } else {
8003          _collector->_modUnionTable.mark(addr);
8004        }
8005        _collector->_ser_kac_preclean_ovflw++;
8006      } else {
8007        _collector->push_on_overflow_list(obj);
8008        _collector->_ser_kac_ovflw++;
8009      }
8010    }
8011  }
8012}
8013
8014void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8015void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8016
8017// CMSParKeepAliveClosure: a parallel version of the above.
8018// The work queues are private to each closure (thread),
8019// but (may be) available for stealing by other threads.
8020void CMSParKeepAliveClosure::do_oop(oop obj) {
8021  HeapWord* addr = (HeapWord*)obj;
8022  if (_span.contains(addr) &&
8023      !_bit_map->isMarked(addr)) {
8024    // In general, during recursive tracing, several threads
8025    // may be concurrently getting here; the first one to
8026    // "tag" it, claims it.
8027    if (_bit_map->par_mark(addr)) {
8028      bool res = _work_queue->push(obj);
8029      assert(res, "Low water mark should be much less than capacity");
8030      // Do a recursive trim in the hope that this will keep
8031      // stack usage lower, but leave some oops for potential stealers
8032      trim_queue(_low_water_mark);
8033    } // Else, another thread got there first
8034  }
8035}
8036
8037void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8038void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8039
8040void CMSParKeepAliveClosure::trim_queue(uint max) {
8041  while (_work_queue->size() > max) {
8042    oop new_oop;
8043    if (_work_queue->pop_local(new_oop)) {
8044      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8045      assert(_bit_map->isMarked((HeapWord*)new_oop),
8046             "no white objects on this stack!");
8047      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8048      // iterate over the oops in this oop, marking and pushing
8049      // the ones in CMS heap (i.e. in _span).
8050      new_oop->oop_iterate(&_mark_and_push);
8051    }
8052  }
8053}
8054
8055CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8056                                CMSCollector* collector,
8057                                MemRegion span, CMSBitMap* bit_map,
8058                                OopTaskQueue* work_queue):
8059  _collector(collector),
8060  _span(span),
8061  _bit_map(bit_map),
8062  _work_queue(work_queue) { }
8063
8064void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8065  HeapWord* addr = (HeapWord*)obj;
8066  if (_span.contains(addr) &&
8067      !_bit_map->isMarked(addr)) {
8068    if (_bit_map->par_mark(addr)) {
8069      bool simulate_overflow = false;
8070      NOT_PRODUCT(
8071        if (CMSMarkStackOverflowALot &&
8072            _collector->par_simulate_overflow()) {
8073          // simulate a stack overflow
8074          simulate_overflow = true;
8075        }
8076      )
8077      if (simulate_overflow || !_work_queue->push(obj)) {
8078        _collector->par_push_on_overflow_list(obj);
8079        _collector->_par_kac_ovflw++;
8080      }
8081    } // Else another thread got there already
8082  }
8083}
8084
8085void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8086void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8087
8088//////////////////////////////////////////////////////////////////
8089//  CMSExpansionCause                /////////////////////////////
8090//////////////////////////////////////////////////////////////////
8091const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8092  switch (cause) {
8093    case _no_expansion:
8094      return "No expansion";
8095    case _satisfy_free_ratio:
8096      return "Free ratio";
8097    case _satisfy_promotion:
8098      return "Satisfy promotion";
8099    case _satisfy_allocation:
8100      return "allocation";
8101    case _allocate_par_lab:
8102      return "Par LAB";
8103    case _allocate_par_spooling_space:
8104      return "Par Spooling Space";
8105    case _adaptive_size_policy:
8106      return "Ergonomics";
8107    default:
8108      return "unknown";
8109  }
8110}
8111
8112void CMSDrainMarkingStackClosure::do_void() {
8113  // the max number to take from overflow list at a time
8114  const size_t num = _mark_stack->capacity()/4;
8115  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8116         "Overflow list should be NULL during concurrent phases");
8117  while (!_mark_stack->isEmpty() ||
8118         // if stack is empty, check the overflow list
8119         _collector->take_from_overflow_list(num, _mark_stack)) {
8120    oop obj = _mark_stack->pop();
8121    HeapWord* addr = (HeapWord*)obj;
8122    assert(_span.contains(addr), "Should be within span");
8123    assert(_bit_map->isMarked(addr), "Should be marked");
8124    assert(obj->is_oop(), "Should be an oop");
8125    obj->oop_iterate(_keep_alive);
8126  }
8127}
8128
8129void CMSParDrainMarkingStackClosure::do_void() {
8130  // drain queue
8131  trim_queue(0);
8132}
8133
8134// Trim our work_queue so its length is below max at return
8135void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8136  while (_work_queue->size() > max) {
8137    oop new_oop;
8138    if (_work_queue->pop_local(new_oop)) {
8139      assert(new_oop->is_oop(), "Expected an oop");
8140      assert(_bit_map->isMarked((HeapWord*)new_oop),
8141             "no white objects on this stack!");
8142      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8143      // iterate over the oops in this oop, marking and pushing
8144      // the ones in CMS heap (i.e. in _span).
8145      new_oop->oop_iterate(&_mark_and_push);
8146    }
8147  }
8148}
8149
8150////////////////////////////////////////////////////////////////////
8151// Support for Marking Stack Overflow list handling and related code
8152////////////////////////////////////////////////////////////////////
8153// Much of the following code is similar in shape and spirit to the
8154// code used in ParNewGC. We should try and share that code
8155// as much as possible in the future.
8156
8157#ifndef PRODUCT
8158// Debugging support for CMSStackOverflowALot
8159
8160// It's OK to call this multi-threaded;  the worst thing
8161// that can happen is that we'll get a bunch of closely
8162// spaced simulated overflows, but that's OK, in fact
8163// probably good as it would exercise the overflow code
8164// under contention.
8165bool CMSCollector::simulate_overflow() {
8166  if (_overflow_counter-- <= 0) { // just being defensive
8167    _overflow_counter = CMSMarkStackOverflowInterval;
8168    return true;
8169  } else {
8170    return false;
8171  }
8172}
8173
8174bool CMSCollector::par_simulate_overflow() {
8175  return simulate_overflow();
8176}
8177#endif
8178
8179// Single-threaded
8180bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8181  assert(stack->isEmpty(), "Expected precondition");
8182  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8183  size_t i = num;
8184  oop  cur = _overflow_list;
8185  const markOop proto = markOopDesc::prototype();
8186  NOT_PRODUCT(ssize_t n = 0;)
8187  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8188    next = oop(cur->mark());
8189    cur->set_mark(proto);   // until proven otherwise
8190    assert(cur->is_oop(), "Should be an oop");
8191    bool res = stack->push(cur);
8192    assert(res, "Bit off more than can chew?");
8193    NOT_PRODUCT(n++;)
8194  }
8195  _overflow_list = cur;
8196#ifndef PRODUCT
8197  assert(_num_par_pushes >= n, "Too many pops?");
8198  _num_par_pushes -=n;
8199#endif
8200  return !stack->isEmpty();
8201}
8202
8203#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
8204// (MT-safe) Get a prefix of at most "num" from the list.
8205// The overflow list is chained through the mark word of
8206// each object in the list. We fetch the entire list,
8207// break off a prefix of the right size and return the
8208// remainder. If other threads try to take objects from
8209// the overflow list at that time, they will wait for
8210// some time to see if data becomes available. If (and
8211// only if) another thread places one or more object(s)
8212// on the global list before we have returned the suffix
8213// to the global list, we will walk down our local list
8214// to find its end and append the global list to
8215// our suffix before returning it. This suffix walk can
8216// prove to be expensive (quadratic in the amount of traffic)
8217// when there are many objects in the overflow list and
8218// there is much producer-consumer contention on the list.
8219// *NOTE*: The overflow list manipulation code here and
8220// in ParNewGeneration:: are very similar in shape,
8221// except that in the ParNew case we use the old (from/eden)
8222// copy of the object to thread the list via its klass word.
8223// Because of the common code, if you make any changes in
8224// the code below, please check the ParNew version to see if
8225// similar changes might be needed.
8226// CR 6797058 has been filed to consolidate the common code.
8227bool CMSCollector::par_take_from_overflow_list(size_t num,
8228                                               OopTaskQueue* work_q,
8229                                               int no_of_gc_threads) {
8230  assert(work_q->size() == 0, "First empty local work queue");
8231  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8232  if (_overflow_list == NULL) {
8233    return false;
8234  }
8235  // Grab the entire list; we'll put back a suffix
8236  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8237  Thread* tid = Thread::current();
8238  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8239  // set to ParallelGCThreads.
8240  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8241  size_t sleep_time_millis = MAX2((size_t)1, num/100);
8242  // If the list is busy, we spin for a short while,
8243  // sleeping between attempts to get the list.
8244  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8245    os::sleep(tid, sleep_time_millis, false);
8246    if (_overflow_list == NULL) {
8247      // Nothing left to take
8248      return false;
8249    } else if (_overflow_list != BUSY) {
8250      // Try and grab the prefix
8251      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8252    }
8253  }
8254  // If the list was found to be empty, or we spun long
8255  // enough, we give up and return empty-handed. If we leave
8256  // the list in the BUSY state below, it must be the case that
8257  // some other thread holds the overflow list and will set it
8258  // to a non-BUSY state in the future.
8259  if (prefix == NULL || prefix == BUSY) {
8260     // Nothing to take or waited long enough
8261     if (prefix == NULL) {
8262       // Write back the NULL in case we overwrote it with BUSY above
8263       // and it is still the same value.
8264       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8265     }
8266     return false;
8267  }
8268  assert(prefix != NULL && prefix != BUSY, "Error");
8269  size_t i = num;
8270  oop cur = prefix;
8271  // Walk down the first "num" objects, unless we reach the end.
8272  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8273  if (cur->mark() == NULL) {
8274    // We have "num" or fewer elements in the list, so there
8275    // is nothing to return to the global list.
8276    // Write back the NULL in lieu of the BUSY we wrote
8277    // above, if it is still the same value.
8278    if (_overflow_list == BUSY) {
8279      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8280    }
8281  } else {
8282    // Chop off the suffix and return it to the global list.
8283    assert(cur->mark() != BUSY, "Error");
8284    oop suffix_head = cur->mark(); // suffix will be put back on global list
8285    cur->set_mark(NULL);           // break off suffix
8286    // It's possible that the list is still in the empty(busy) state
8287    // we left it in a short while ago; in that case we may be
8288    // able to place back the suffix without incurring the cost
8289    // of a walk down the list.
8290    oop observed_overflow_list = _overflow_list;
8291    oop cur_overflow_list = observed_overflow_list;
8292    bool attached = false;
8293    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8294      observed_overflow_list =
8295        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8296      if (cur_overflow_list == observed_overflow_list) {
8297        attached = true;
8298        break;
8299      } else cur_overflow_list = observed_overflow_list;
8300    }
8301    if (!attached) {
8302      // Too bad, someone else sneaked in (at least) an element; we'll need
8303      // to do a splice. Find tail of suffix so we can prepend suffix to global
8304      // list.
8305      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8306      oop suffix_tail = cur;
8307      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8308             "Tautology");
8309      observed_overflow_list = _overflow_list;
8310      do {
8311        cur_overflow_list = observed_overflow_list;
8312        if (cur_overflow_list != BUSY) {
8313          // Do the splice ...
8314          suffix_tail->set_mark(markOop(cur_overflow_list));
8315        } else { // cur_overflow_list == BUSY
8316          suffix_tail->set_mark(NULL);
8317        }
8318        // ... and try to place spliced list back on overflow_list ...
8319        observed_overflow_list =
8320          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8321      } while (cur_overflow_list != observed_overflow_list);
8322      // ... until we have succeeded in doing so.
8323    }
8324  }
8325
8326  // Push the prefix elements on work_q
8327  assert(prefix != NULL, "control point invariant");
8328  const markOop proto = markOopDesc::prototype();
8329  oop next;
8330  NOT_PRODUCT(ssize_t n = 0;)
8331  for (cur = prefix; cur != NULL; cur = next) {
8332    next = oop(cur->mark());
8333    cur->set_mark(proto);   // until proven otherwise
8334    assert(cur->is_oop(), "Should be an oop");
8335    bool res = work_q->push(cur);
8336    assert(res, "Bit off more than we can chew?");
8337    NOT_PRODUCT(n++;)
8338  }
8339#ifndef PRODUCT
8340  assert(_num_par_pushes >= n, "Too many pops?");
8341  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8342#endif
8343  return true;
8344}
8345
8346// Single-threaded
8347void CMSCollector::push_on_overflow_list(oop p) {
8348  NOT_PRODUCT(_num_par_pushes++;)
8349  assert(p->is_oop(), "Not an oop");
8350  preserve_mark_if_necessary(p);
8351  p->set_mark((markOop)_overflow_list);
8352  _overflow_list = p;
8353}
8354
8355// Multi-threaded; use CAS to prepend to overflow list
8356void CMSCollector::par_push_on_overflow_list(oop p) {
8357  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8358  assert(p->is_oop(), "Not an oop");
8359  par_preserve_mark_if_necessary(p);
8360  oop observed_overflow_list = _overflow_list;
8361  oop cur_overflow_list;
8362  do {
8363    cur_overflow_list = observed_overflow_list;
8364    if (cur_overflow_list != BUSY) {
8365      p->set_mark(markOop(cur_overflow_list));
8366    } else {
8367      p->set_mark(NULL);
8368    }
8369    observed_overflow_list =
8370      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8371  } while (cur_overflow_list != observed_overflow_list);
8372}
8373#undef BUSY
8374
8375// Single threaded
8376// General Note on GrowableArray: pushes may silently fail
8377// because we are (temporarily) out of C-heap for expanding
8378// the stack. The problem is quite ubiquitous and affects
8379// a lot of code in the JVM. The prudent thing for GrowableArray
8380// to do (for now) is to exit with an error. However, that may
8381// be too draconian in some cases because the caller may be
8382// able to recover without much harm. For such cases, we
8383// should probably introduce a "soft_push" method which returns
8384// an indication of success or failure with the assumption that
8385// the caller may be able to recover from a failure; code in
8386// the VM can then be changed, incrementally, to deal with such
8387// failures where possible, thus, incrementally hardening the VM
8388// in such low resource situations.
8389void CMSCollector::preserve_mark_work(oop p, markOop m) {
8390  _preserved_oop_stack.push(p);
8391  _preserved_mark_stack.push(m);
8392  assert(m == p->mark(), "Mark word changed");
8393  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8394         "bijection");
8395}
8396
8397// Single threaded
8398void CMSCollector::preserve_mark_if_necessary(oop p) {
8399  markOop m = p->mark();
8400  if (m->must_be_preserved(p)) {
8401    preserve_mark_work(p, m);
8402  }
8403}
8404
8405void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8406  markOop m = p->mark();
8407  if (m->must_be_preserved(p)) {
8408    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8409    // Even though we read the mark word without holding
8410    // the lock, we are assured that it will not change
8411    // because we "own" this oop, so no other thread can
8412    // be trying to push it on the overflow list; see
8413    // the assertion in preserve_mark_work() that checks
8414    // that m == p->mark().
8415    preserve_mark_work(p, m);
8416  }
8417}
8418
8419// We should be able to do this multi-threaded,
8420// a chunk of stack being a task (this is
8421// correct because each oop only ever appears
8422// once in the overflow list. However, it's
8423// not very easy to completely overlap this with
8424// other operations, so will generally not be done
8425// until all work's been completed. Because we
8426// expect the preserved oop stack (set) to be small,
8427// it's probably fine to do this single-threaded.
8428// We can explore cleverer concurrent/overlapped/parallel
8429// processing of preserved marks if we feel the
8430// need for this in the future. Stack overflow should
8431// be so rare in practice and, when it happens, its
8432// effect on performance so great that this will
8433// likely just be in the noise anyway.
8434void CMSCollector::restore_preserved_marks_if_any() {
8435  assert(SafepointSynchronize::is_at_safepoint(),
8436         "world should be stopped");
8437  assert(Thread::current()->is_ConcurrentGC_thread() ||
8438         Thread::current()->is_VM_thread(),
8439         "should be single-threaded");
8440  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8441         "bijection");
8442
8443  while (!_preserved_oop_stack.is_empty()) {
8444    oop p = _preserved_oop_stack.pop();
8445    assert(p->is_oop(), "Should be an oop");
8446    assert(_span.contains(p), "oop should be in _span");
8447    assert(p->mark() == markOopDesc::prototype(),
8448           "Set when taken from overflow list");
8449    markOop m = _preserved_mark_stack.pop();
8450    p->set_mark(m);
8451  }
8452  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8453         "stacks were cleared above");
8454}
8455
8456#ifndef PRODUCT
8457bool CMSCollector::no_preserved_marks() const {
8458  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8459}
8460#endif
8461
8462// Transfer some number of overflown objects to usual marking
8463// stack. Return true if some objects were transferred.
8464bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8465  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8466                    (size_t)ParGCDesiredObjsFromOverflowList);
8467
8468  bool res = _collector->take_from_overflow_list(num, _mark_stack);
8469  assert(_collector->overflow_list_is_empty() || res,
8470         "If list is not empty, we should have taken something");
8471  assert(!res || !_mark_stack->isEmpty(),
8472         "If we took something, it should now be on our stack");
8473  return res;
8474}
8475
8476size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8477  size_t res = _sp->block_size_no_stall(addr, _collector);
8478  if (_sp->block_is_obj(addr)) {
8479    if (_live_bit_map->isMarked(addr)) {
8480      // It can't have been dead in a previous cycle
8481      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8482    } else {
8483      _dead_bit_map->mark(addr);      // mark the dead object
8484    }
8485  }
8486  // Could be 0, if the block size could not be computed without stalling.
8487  return res;
8488}
8489
8490TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8491
8492  switch (phase) {
8493    case CMSCollector::InitialMarking:
8494      initialize(true  /* fullGC */ ,
8495                 cause /* cause of the GC */,
8496                 true  /* recordGCBeginTime */,
8497                 true  /* recordPreGCUsage */,
8498                 false /* recordPeakUsage */,
8499                 false /* recordPostGCusage */,
8500                 true  /* recordAccumulatedGCTime */,
8501                 false /* recordGCEndTime */,
8502                 false /* countCollection */  );
8503      break;
8504
8505    case CMSCollector::FinalMarking:
8506      initialize(true  /* fullGC */ ,
8507                 cause /* cause of the GC */,
8508                 false /* recordGCBeginTime */,
8509                 false /* recordPreGCUsage */,
8510                 false /* recordPeakUsage */,
8511                 false /* recordPostGCusage */,
8512                 true  /* recordAccumulatedGCTime */,
8513                 false /* recordGCEndTime */,
8514                 false /* countCollection */  );
8515      break;
8516
8517    case CMSCollector::Sweeping:
8518      initialize(true  /* fullGC */ ,
8519                 cause /* cause of the GC */,
8520                 false /* recordGCBeginTime */,
8521                 false /* recordPreGCUsage */,
8522                 true  /* recordPeakUsage */,
8523                 true  /* recordPostGCusage */,
8524                 false /* recordAccumulatedGCTime */,
8525                 true  /* recordGCEndTime */,
8526                 true  /* countCollection */  );
8527      break;
8528
8529    default:
8530      ShouldNotReachHere();
8531  }
8532}
8533