1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
26#define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
27
28#include "gc/cms/cmsOopClosures.hpp"
29#include "gc/cms/gSpaceCounters.hpp"
30#include "gc/cms/yieldingWorkgroup.hpp"
31#include "gc/shared/cardGeneration.hpp"
32#include "gc/shared/gcHeapSummary.hpp"
33#include "gc/shared/gcStats.hpp"
34#include "gc/shared/gcWhen.hpp"
35#include "gc/shared/generationCounters.hpp"
36#include "gc/shared/space.hpp"
37#include "gc/shared/taskqueue.hpp"
38#include "logging/log.hpp"
39#include "memory/freeBlockDictionary.hpp"
40#include "memory/iterator.hpp"
41#include "memory/virtualspace.hpp"
42#include "runtime/mutexLocker.hpp"
43#include "services/memoryService.hpp"
44#include "utilities/bitMap.hpp"
45#include "utilities/stack.hpp"
46
47// ConcurrentMarkSweepGeneration is in support of a concurrent
48// mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
49// style. We assume, for now, that this generation is always the
50// seniormost generation and for simplicity
51// in the first implementation, that this generation is a single compactible
52// space. Neither of these restrictions appears essential, and will be
53// relaxed in the future when more time is available to implement the
54// greater generality (and there's a need for it).
55//
56// Concurrent mode failures are currently handled by
57// means of a sliding mark-compact.
58
59class AdaptiveSizePolicy;
60class CMSCollector;
61class CMSConcMarkingTask;
62class CMSGCAdaptivePolicyCounters;
63class CMSTracer;
64class ConcurrentGCTimer;
65class ConcurrentMarkSweepGeneration;
66class ConcurrentMarkSweepPolicy;
67class ConcurrentMarkSweepThread;
68class CompactibleFreeListSpace;
69class FreeChunk;
70class ParNewGeneration;
71class PromotionInfo;
72class ScanMarkedObjectsAgainCarefullyClosure;
73class TenuredGeneration;
74class SerialOldTracer;
75
76// A generic CMS bit map. It's the basis for both the CMS marking bit map
77// as well as for the mod union table (in each case only a subset of the
78// methods are used). This is essentially a wrapper around the BitMap class,
79// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
80// we have _shifter == 0. and for the mod union table we have
81// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
82// XXX 64-bit issues in BitMap?
83class CMSBitMap VALUE_OBJ_CLASS_SPEC {
84  friend class VMStructs;
85
86  HeapWord*    _bmStartWord;   // base address of range covered by map
87  size_t       _bmWordSize;    // map size (in #HeapWords covered)
88  const int    _shifter;       // shifts to convert HeapWord to bit position
89  VirtualSpace _virtual_space; // underlying the bit map
90  BitMapView   _bm;            // the bit map itself
91  Mutex* const _lock;          // mutex protecting _bm;
92
93 public:
94  // constructor
95  CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
96
97  // allocates the actual storage for the map
98  bool allocate(MemRegion mr);
99  // field getter
100  Mutex* lock() const { return _lock; }
101  // locking verifier convenience function
102  void assert_locked() const PRODUCT_RETURN;
103
104  // inquiries
105  HeapWord* startWord()   const { return _bmStartWord; }
106  size_t    sizeInWords() const { return _bmWordSize;  }
107  size_t    sizeInBits()  const { return _bm.size();   }
108  // the following is one past the last word in space
109  HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
110
111  // reading marks
112  bool isMarked(HeapWord* addr) const;
113  bool par_isMarked(HeapWord* addr) const; // do not lock checks
114  bool isUnmarked(HeapWord* addr) const;
115  bool isAllClear() const;
116
117  // writing marks
118  void mark(HeapWord* addr);
119  // For marking by parallel GC threads;
120  // returns true if we did, false if another thread did
121  bool par_mark(HeapWord* addr);
122
123  void mark_range(MemRegion mr);
124  void par_mark_range(MemRegion mr);
125  void mark_large_range(MemRegion mr);
126  void par_mark_large_range(MemRegion mr);
127  void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
128  void clear_range(MemRegion mr);
129  void par_clear_range(MemRegion mr);
130  void clear_large_range(MemRegion mr);
131  void par_clear_large_range(MemRegion mr);
132  void clear_all();
133  void clear_all_incrementally();  // Not yet implemented!!
134
135  NOT_PRODUCT(
136    // checks the memory region for validity
137    void region_invariant(MemRegion mr);
138  )
139
140  // iteration
141  void iterate(BitMapClosure* cl) {
142    _bm.iterate(cl);
143  }
144  void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
145  void dirty_range_iterate_clear(MemRegionClosure* cl);
146  void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
147
148  // auxiliary support for iteration
149  HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
150  HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
151                                            HeapWord* end_addr) const;
152  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
153  HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
154                                              HeapWord* end_addr) const;
155  MemRegion getAndClearMarkedRegion(HeapWord* addr);
156  MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
157                                           HeapWord* end_addr);
158
159  // conversion utilities
160  HeapWord* offsetToHeapWord(size_t offset) const;
161  size_t    heapWordToOffset(HeapWord* addr) const;
162  size_t    heapWordDiffToOffsetDiff(size_t diff) const;
163
164  void print_on_error(outputStream* st, const char* prefix) const;
165
166  // debugging
167  // is this address range covered by the bit-map?
168  NOT_PRODUCT(
169    bool covers(MemRegion mr) const;
170    bool covers(HeapWord* start, size_t size = 0) const;
171  )
172  void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
173};
174
175// Represents a marking stack used by the CMS collector.
176// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
177class CMSMarkStack: public CHeapObj<mtGC>  {
178  friend class CMSCollector;   // To get at expansion stats further below.
179
180  VirtualSpace _virtual_space;  // Space for the stack
181  oop*   _base;      // Bottom of stack
182  size_t _index;     // One more than last occupied index
183  size_t _capacity;  // Max #elements
184  Mutex  _par_lock;  // An advisory lock used in case of parallel access
185  NOT_PRODUCT(size_t _max_depth;)  // Max depth plumbed during run
186
187 protected:
188  size_t _hit_limit;      // We hit max stack size limit
189  size_t _failed_double;  // We failed expansion before hitting limit
190
191 public:
192  CMSMarkStack():
193    _par_lock(Mutex::event, "CMSMarkStack._par_lock", true,
194              Monitor::_safepoint_check_never),
195    _hit_limit(0),
196    _failed_double(0) {}
197
198  bool allocate(size_t size);
199
200  size_t capacity() const { return _capacity; }
201
202  oop pop() {
203    if (!isEmpty()) {
204      return _base[--_index] ;
205    }
206    return NULL;
207  }
208
209  bool push(oop ptr) {
210    if (isFull()) {
211      return false;
212    } else {
213      _base[_index++] = ptr;
214      NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
215      return true;
216    }
217  }
218
219  bool isEmpty() const { return _index == 0; }
220  bool isFull()  const {
221    assert(_index <= _capacity, "buffer overflow");
222    return _index == _capacity;
223  }
224
225  size_t length() { return _index; }
226
227  // "Parallel versions" of some of the above
228  oop par_pop() {
229    // lock and pop
230    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
231    return pop();
232  }
233
234  bool par_push(oop ptr) {
235    // lock and push
236    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
237    return push(ptr);
238  }
239
240  // Forcibly reset the stack, losing all of its contents.
241  void reset() {
242    _index = 0;
243  }
244
245  // Expand the stack, typically in response to an overflow condition.
246  void expand();
247
248  // Compute the least valued stack element.
249  oop least_value(HeapWord* low) {
250     oop least = (oop)low;
251     for (size_t i = 0; i < _index; i++) {
252       least = MIN2(least, _base[i]);
253     }
254     return least;
255  }
256
257  // Exposed here to allow stack expansion in || case.
258  Mutex* par_lock() { return &_par_lock; }
259};
260
261class CardTableRS;
262class CMSParGCThreadState;
263
264class ModUnionClosure: public MemRegionClosure {
265 protected:
266  CMSBitMap* _t;
267 public:
268  ModUnionClosure(CMSBitMap* t): _t(t) { }
269  void do_MemRegion(MemRegion mr);
270};
271
272class ModUnionClosurePar: public ModUnionClosure {
273 public:
274  ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
275  void do_MemRegion(MemRegion mr);
276};
277
278// Survivor Chunk Array in support of parallelization of
279// Survivor Space rescan.
280class ChunkArray: public CHeapObj<mtGC> {
281  size_t _index;
282  size_t _capacity;
283  size_t _overflows;
284  HeapWord** _array;   // storage for array
285
286 public:
287  ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
288  ChunkArray(HeapWord** a, size_t c):
289    _index(0), _capacity(c), _overflows(0), _array(a) {}
290
291  HeapWord** array() { return _array; }
292  void set_array(HeapWord** a) { _array = a; }
293
294  size_t capacity() { return _capacity; }
295  void set_capacity(size_t c) { _capacity = c; }
296
297  size_t end() {
298    assert(_index <= capacity(),
299           "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
300           _index, _capacity);
301    return _index;
302  }  // exclusive
303
304  HeapWord* nth(size_t n) {
305    assert(n < end(), "Out of bounds access");
306    return _array[n];
307  }
308
309  void reset() {
310    _index = 0;
311    if (_overflows > 0) {
312      log_trace(gc)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", _capacity, _overflows);
313    }
314    _overflows = 0;
315  }
316
317  void record_sample(HeapWord* p, size_t sz) {
318    // For now we do not do anything with the size
319    if (_index < _capacity) {
320      _array[_index++] = p;
321    } else {
322      ++_overflows;
323      assert(_index == _capacity,
324             "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
325             "): out of bounds at overflow#" SIZE_FORMAT,
326             _index, _capacity, _overflows);
327    }
328  }
329};
330
331//
332// Timing, allocation and promotion statistics for gc scheduling and incremental
333// mode pacing.  Most statistics are exponential averages.
334//
335class CMSStats VALUE_OBJ_CLASS_SPEC {
336 private:
337  ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
338
339  // The following are exponential averages with factor alpha:
340  //   avg = (100 - alpha) * avg + alpha * cur_sample
341  //
342  //   The durations measure:  end_time[n] - start_time[n]
343  //   The periods measure:    start_time[n] - start_time[n-1]
344  //
345  // The cms period and duration include only concurrent collections; time spent
346  // in foreground cms collections due to System.gc() or because of a failure to
347  // keep up are not included.
348  //
349  // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
350  // real value, but is used only after the first period.  A value of 100 is
351  // used for the first sample so it gets the entire weight.
352  unsigned int _saved_alpha; // 0-100
353  unsigned int _gc0_alpha;
354  unsigned int _cms_alpha;
355
356  double _gc0_duration;
357  double _gc0_period;
358  size_t _gc0_promoted;         // bytes promoted per gc0
359  double _cms_duration;
360  double _cms_duration_pre_sweep; // time from initiation to start of sweep
361  double _cms_period;
362  size_t _cms_allocated;        // bytes of direct allocation per gc0 period
363
364  // Timers.
365  elapsedTimer _cms_timer;
366  TimeStamp    _gc0_begin_time;
367  TimeStamp    _cms_begin_time;
368  TimeStamp    _cms_end_time;
369
370  // Snapshots of the amount used in the CMS generation.
371  size_t _cms_used_at_gc0_begin;
372  size_t _cms_used_at_gc0_end;
373  size_t _cms_used_at_cms_begin;
374
375  // Used to prevent the duty cycle from being reduced in the middle of a cms
376  // cycle.
377  bool _allow_duty_cycle_reduction;
378
379  enum {
380    _GC0_VALID = 0x1,
381    _CMS_VALID = 0x2,
382    _ALL_VALID = _GC0_VALID | _CMS_VALID
383  };
384
385  unsigned int _valid_bits;
386
387 protected:
388  // In support of adjusting of cms trigger ratios based on history
389  // of concurrent mode failure.
390  double cms_free_adjustment_factor(size_t free) const;
391  void   adjust_cms_free_adjustment_factor(bool fail, size_t free);
392
393 public:
394  CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
395           unsigned int alpha = CMSExpAvgFactor);
396
397  // Whether or not the statistics contain valid data; higher level statistics
398  // cannot be called until this returns true (they require at least one young
399  // gen and one cms cycle to have completed).
400  bool valid() const;
401
402  // Record statistics.
403  void record_gc0_begin();
404  void record_gc0_end(size_t cms_gen_bytes_used);
405  void record_cms_begin();
406  void record_cms_end();
407
408  // Allow management of the cms timer, which must be stopped/started around
409  // yield points.
410  elapsedTimer& cms_timer()     { return _cms_timer; }
411  void start_cms_timer()        { _cms_timer.start(); }
412  void stop_cms_timer()         { _cms_timer.stop(); }
413
414  // Basic statistics; units are seconds or bytes.
415  double gc0_period() const     { return _gc0_period; }
416  double gc0_duration() const   { return _gc0_duration; }
417  size_t gc0_promoted() const   { return _gc0_promoted; }
418  double cms_period() const          { return _cms_period; }
419  double cms_duration() const        { return _cms_duration; }
420  size_t cms_allocated() const       { return _cms_allocated; }
421
422  size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
423
424  // Seconds since the last background cms cycle began or ended.
425  double cms_time_since_begin() const;
426  double cms_time_since_end() const;
427
428  // Higher level statistics--caller must check that valid() returns true before
429  // calling.
430
431  // Returns bytes promoted per second of wall clock time.
432  double promotion_rate() const;
433
434  // Returns bytes directly allocated per second of wall clock time.
435  double cms_allocation_rate() const;
436
437  // Rate at which space in the cms generation is being consumed (sum of the
438  // above two).
439  double cms_consumption_rate() const;
440
441  // Returns an estimate of the number of seconds until the cms generation will
442  // fill up, assuming no collection work is done.
443  double time_until_cms_gen_full() const;
444
445  // Returns an estimate of the number of seconds remaining until
446  // the cms generation collection should start.
447  double time_until_cms_start() const;
448
449  // End of higher level statistics.
450
451  // Debugging.
452  void print_on(outputStream* st) const PRODUCT_RETURN;
453  void print() const { print_on(tty); }
454};
455
456// A closure related to weak references processing which
457// we embed in the CMSCollector, since we need to pass
458// it to the reference processor for secondary filtering
459// of references based on reachability of referent;
460// see role of _is_alive_non_header closure in the
461// ReferenceProcessor class.
462// For objects in the CMS generation, this closure checks
463// if the object is "live" (reachable). Used in weak
464// reference processing.
465class CMSIsAliveClosure: public BoolObjectClosure {
466  const MemRegion  _span;
467  const CMSBitMap* _bit_map;
468
469  friend class CMSCollector;
470 public:
471  CMSIsAliveClosure(MemRegion span,
472                    CMSBitMap* bit_map):
473    _span(span),
474    _bit_map(bit_map) {
475    assert(!span.is_empty(), "Empty span could spell trouble");
476  }
477
478  bool do_object_b(oop obj);
479};
480
481
482// Implements AbstractRefProcTaskExecutor for CMS.
483class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
484public:
485
486  CMSRefProcTaskExecutor(CMSCollector& collector)
487    : _collector(collector)
488  { }
489
490  // Executes a task using worker threads.
491  virtual void execute(ProcessTask& task);
492  virtual void execute(EnqueueTask& task);
493private:
494  CMSCollector& _collector;
495};
496
497
498class CMSCollector: public CHeapObj<mtGC> {
499  friend class VMStructs;
500  friend class ConcurrentMarkSweepThread;
501  friend class ConcurrentMarkSweepGeneration;
502  friend class CompactibleFreeListSpace;
503  friend class CMSParMarkTask;
504  friend class CMSParInitialMarkTask;
505  friend class CMSParRemarkTask;
506  friend class CMSConcMarkingTask;
507  friend class CMSRefProcTaskProxy;
508  friend class CMSRefProcTaskExecutor;
509  friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
510  friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
511  friend class PushOrMarkClosure;             // to access _restart_addr
512  friend class ParPushOrMarkClosure;          // to access _restart_addr
513  friend class MarkFromRootsClosure;          //  -- ditto --
514                                              // ... and for clearing cards
515  friend class ParMarkFromRootsClosure;       //  to access _restart_addr
516                                              // ... and for clearing cards
517  friend class ParConcMarkingClosure;         //  to access _restart_addr etc.
518  friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
519  friend class PushAndMarkVerifyClosure;      //  -- ditto --
520  friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
521  friend class PushAndMarkClosure;            //  -- ditto --
522  friend class ParPushAndMarkClosure;         //  -- ditto --
523  friend class CMSKeepAliveClosure;           //  -- ditto --
524  friend class CMSDrainMarkingStackClosure;   //  -- ditto --
525  friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
526  NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
527  friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
528  friend class VM_CMS_Operation;
529  friend class VM_CMS_Initial_Mark;
530  friend class VM_CMS_Final_Remark;
531  friend class TraceCMSMemoryManagerStats;
532
533 private:
534  jlong _time_of_last_gc;
535  void update_time_of_last_gc(jlong now) {
536    _time_of_last_gc = now;
537  }
538
539  OopTaskQueueSet* _task_queues;
540
541  // Overflow list of grey objects, threaded through mark-word
542  // Manipulated with CAS in the parallel/multi-threaded case.
543  oopDesc* volatile _overflow_list;
544  // The following array-pair keeps track of mark words
545  // displaced for accommodating overflow list above.
546  // This code will likely be revisited under RFE#4922830.
547  Stack<oop, mtGC>     _preserved_oop_stack;
548  Stack<markOop, mtGC> _preserved_mark_stack;
549
550  int*             _hash_seed;
551
552  // In support of multi-threaded concurrent phases
553  YieldingFlexibleWorkGang* _conc_workers;
554
555  // Performance Counters
556  CollectorCounters* _gc_counters;
557
558  // Initialization Errors
559  bool _completed_initialization;
560
561  // In support of ExplicitGCInvokesConcurrent
562  static bool _full_gc_requested;
563  static GCCause::Cause _full_gc_cause;
564  unsigned int _collection_count_start;
565
566  // Should we unload classes this concurrent cycle?
567  bool _should_unload_classes;
568  unsigned int  _concurrent_cycles_since_last_unload;
569  unsigned int concurrent_cycles_since_last_unload() const {
570    return _concurrent_cycles_since_last_unload;
571  }
572  // Did we (allow) unload classes in the previous concurrent cycle?
573  bool unloaded_classes_last_cycle() const {
574    return concurrent_cycles_since_last_unload() == 0;
575  }
576  // Root scanning options for perm gen
577  int _roots_scanning_options;
578  int roots_scanning_options() const      { return _roots_scanning_options; }
579  void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
580  void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
581
582  // Verification support
583  CMSBitMap     _verification_mark_bm;
584  void verify_after_remark_work_1();
585  void verify_after_remark_work_2();
586
587  // True if any verification flag is on.
588  bool _verifying;
589  bool verifying() const { return _verifying; }
590  void set_verifying(bool v) { _verifying = v; }
591
592  // Collector policy
593  ConcurrentMarkSweepPolicy* _collector_policy;
594  ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
595
596  void set_did_compact(bool v);
597
598  // XXX Move these to CMSStats ??? FIX ME !!!
599  elapsedTimer _inter_sweep_timer;   // Time between sweeps
600  elapsedTimer _intra_sweep_timer;   // Time _in_ sweeps
601  // Padded decaying average estimates of the above
602  AdaptivePaddedAverage _inter_sweep_estimate;
603  AdaptivePaddedAverage _intra_sweep_estimate;
604
605  CMSTracer* _gc_tracer_cm;
606  ConcurrentGCTimer* _gc_timer_cm;
607
608  bool _cms_start_registered;
609
610  GCHeapSummary _last_heap_summary;
611  MetaspaceSummary _last_metaspace_summary;
612
613  void register_gc_start(GCCause::Cause cause);
614  void register_gc_end();
615  void save_heap_summary();
616  void report_heap_summary(GCWhen::Type when);
617
618 protected:
619  ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
620  MemRegion                      _span;    // Span covering above two
621  CardTableRS*                   _ct;      // Card table
622
623  // CMS marking support structures
624  CMSBitMap     _markBitMap;
625  CMSBitMap     _modUnionTable;
626  CMSMarkStack  _markStack;
627
628  HeapWord*     _restart_addr; // In support of marking stack overflow
629  void          lower_restart_addr(HeapWord* low);
630
631  // Counters in support of marking stack / work queue overflow handling:
632  // a non-zero value indicates certain types of overflow events during
633  // the current CMS cycle and could lead to stack resizing efforts at
634  // an opportune future time.
635  size_t        _ser_pmc_preclean_ovflw;
636  size_t        _ser_pmc_remark_ovflw;
637  size_t        _par_pmc_remark_ovflw;
638  size_t        _ser_kac_preclean_ovflw;
639  size_t        _ser_kac_ovflw;
640  size_t        _par_kac_ovflw;
641  NOT_PRODUCT(ssize_t _num_par_pushes;)
642
643  // ("Weak") Reference processing support.
644  ReferenceProcessor*            _ref_processor;
645  CMSIsAliveClosure              _is_alive_closure;
646  // Keep this textually after _markBitMap and _span; c'tor dependency.
647
648  ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
649  ModUnionClosurePar _modUnionClosurePar;
650
651  // CMS abstract state machine
652  // initial_state: Idling
653  // next_state(Idling)            = {Marking}
654  // next_state(Marking)           = {Precleaning, Sweeping}
655  // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
656  // next_state(AbortablePreclean) = {FinalMarking}
657  // next_state(FinalMarking)      = {Sweeping}
658  // next_state(Sweeping)          = {Resizing}
659  // next_state(Resizing)          = {Resetting}
660  // next_state(Resetting)         = {Idling}
661  // The numeric values below are chosen so that:
662  // . _collectorState <= Idling ==  post-sweep && pre-mark
663  // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
664  //                                            precleaning || abortablePrecleanb
665 public:
666  enum CollectorState {
667    Resizing            = 0,
668    Resetting           = 1,
669    Idling              = 2,
670    InitialMarking      = 3,
671    Marking             = 4,
672    Precleaning         = 5,
673    AbortablePreclean   = 6,
674    FinalMarking        = 7,
675    Sweeping            = 8
676  };
677 protected:
678  static CollectorState _collectorState;
679
680  // State related to prologue/epilogue invocation for my generations
681  bool _between_prologue_and_epilogue;
682
683  // Signaling/State related to coordination between fore- and background GC
684  // Note: When the baton has been passed from background GC to foreground GC,
685  // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
686  static bool _foregroundGCIsActive;    // true iff foreground collector is active or
687                                 // wants to go active
688  static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
689                                 // yet passed the baton to the foreground GC
690
691  // Support for CMSScheduleRemark (abortable preclean)
692  bool _abort_preclean;
693  bool _start_sampling;
694
695  int    _numYields;
696  size_t _numDirtyCards;
697  size_t _sweep_count;
698
699  // Occupancy used for bootstrapping stats
700  double _bootstrap_occupancy;
701
702  // Timer
703  elapsedTimer _timer;
704
705  // Timing, allocation and promotion statistics, used for scheduling.
706  CMSStats      _stats;
707
708  enum CMS_op_type {
709    CMS_op_checkpointRootsInitial,
710    CMS_op_checkpointRootsFinal
711  };
712
713  void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
714  bool stop_world_and_do(CMS_op_type op);
715
716  OopTaskQueueSet* task_queues() { return _task_queues; }
717  int*             hash_seed(int i) { return &_hash_seed[i]; }
718  YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
719
720  // Support for parallelizing Eden rescan in CMS remark phase
721  void sample_eden(); // ... sample Eden space top
722
723 private:
724  // Support for parallelizing young gen rescan in CMS remark phase
725  ParNewGeneration* _young_gen;
726
727  HeapWord* volatile* _top_addr;    // ... Top of Eden
728  HeapWord**          _end_addr;    // ... End of Eden
729  Mutex*              _eden_chunk_lock;
730  HeapWord**          _eden_chunk_array; // ... Eden partitioning array
731  size_t              _eden_chunk_index; // ... top (exclusive) of array
732  size_t              _eden_chunk_capacity;  // ... max entries in array
733
734  // Support for parallelizing survivor space rescan
735  HeapWord** _survivor_chunk_array;
736  size_t     _survivor_chunk_index;
737  size_t     _survivor_chunk_capacity;
738  size_t*    _cursor;
739  ChunkArray* _survivor_plab_array;
740
741  // Support for marking stack overflow handling
742  bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
743  bool par_take_from_overflow_list(size_t num,
744                                   OopTaskQueue* to_work_q,
745                                   int no_of_gc_threads);
746  void push_on_overflow_list(oop p);
747  void par_push_on_overflow_list(oop p);
748  // The following is, obviously, not, in general, "MT-stable"
749  bool overflow_list_is_empty() const;
750
751  void preserve_mark_if_necessary(oop p);
752  void par_preserve_mark_if_necessary(oop p);
753  void preserve_mark_work(oop p, markOop m);
754  void restore_preserved_marks_if_any();
755  NOT_PRODUCT(bool no_preserved_marks() const;)
756  // In support of testing overflow code
757  NOT_PRODUCT(int _overflow_counter;)
758  NOT_PRODUCT(bool simulate_overflow();)       // Sequential
759  NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
760
761  // CMS work methods
762  void checkpointRootsInitialWork(); // Initial checkpoint work
763
764  // A return value of false indicates failure due to stack overflow
765  bool markFromRootsWork();  // Concurrent marking work
766
767 public:   // FIX ME!!! only for testing
768  bool do_marking_st();      // Single-threaded marking
769  bool do_marking_mt();      // Multi-threaded  marking
770
771 private:
772
773  // Concurrent precleaning work
774  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
775                                  ScanMarkedObjectsAgainCarefullyClosure* cl);
776  size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
777                             ScanMarkedObjectsAgainCarefullyClosure* cl);
778  // Does precleaning work, returning a quantity indicative of
779  // the amount of "useful work" done.
780  size_t preclean_work(bool clean_refs, bool clean_survivors);
781  void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
782  void abortable_preclean(); // Preclean while looking for possible abort
783  void initialize_sequential_subtasks_for_young_gen_rescan(int i);
784  // Helper function for above; merge-sorts the per-thread plab samples
785  void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
786  // Resets (i.e. clears) the per-thread plab sample vectors
787  void reset_survivor_plab_arrays();
788
789  // Final (second) checkpoint work
790  void checkpointRootsFinalWork();
791  // Work routine for parallel version of remark
792  void do_remark_parallel();
793  // Work routine for non-parallel version of remark
794  void do_remark_non_parallel();
795  // Reference processing work routine (during second checkpoint)
796  void refProcessingWork();
797
798  // Concurrent sweeping work
799  void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
800
801  // Concurrent resetting of support data structures
802  void reset_concurrent();
803  // Resetting of support data structures from a STW full GC
804  void reset_stw();
805
806  // Clear _expansion_cause fields of constituent generations
807  void clear_expansion_cause();
808
809  // An auxiliary method used to record the ends of
810  // used regions of each generation to limit the extent of sweep
811  void save_sweep_limits();
812
813  // A work method used by the foreground collector to do
814  // a mark-sweep-compact.
815  void do_compaction_work(bool clear_all_soft_refs);
816
817  // Work methods for reporting concurrent mode interruption or failure
818  bool is_external_interruption();
819  void report_concurrent_mode_interruption();
820
821  // If the background GC is active, acquire control from the background
822  // GC and do the collection.
823  void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
824
825  // For synchronizing passing of control from background to foreground
826  // GC.  waitForForegroundGC() is called by the background
827  // collector.  It if had to wait for a foreground collection,
828  // it returns true and the background collection should assume
829  // that the collection was finished by the foreground
830  // collector.
831  bool waitForForegroundGC();
832
833  size_t block_size_using_printezis_bits(HeapWord* addr) const;
834  size_t block_size_if_printezis_bits(HeapWord* addr) const;
835  HeapWord* next_card_start_after_block(HeapWord* addr) const;
836
837  void setup_cms_unloading_and_verification_state();
838 public:
839  CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
840               CardTableRS*                   ct,
841               ConcurrentMarkSweepPolicy*     cp);
842  ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
843
844  ReferenceProcessor* ref_processor() { return _ref_processor; }
845  void ref_processor_init();
846
847  Mutex* bitMapLock()        const { return _markBitMap.lock();    }
848  static CollectorState abstract_state() { return _collectorState;  }
849
850  bool should_abort_preclean() const; // Whether preclean should be aborted.
851  size_t get_eden_used() const;
852  size_t get_eden_capacity() const;
853
854  ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
855
856  // Locking checks
857  NOT_PRODUCT(static bool have_cms_token();)
858
859  bool shouldConcurrentCollect();
860
861  void collect(bool   full,
862               bool   clear_all_soft_refs,
863               size_t size,
864               bool   tlab);
865  void collect_in_background(GCCause::Cause cause);
866
867  // In support of ExplicitGCInvokesConcurrent
868  static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
869  // Should we unload classes in a particular concurrent cycle?
870  bool should_unload_classes() const {
871    return _should_unload_classes;
872  }
873  void update_should_unload_classes();
874
875  void direct_allocated(HeapWord* start, size_t size);
876
877  // Object is dead if not marked and current phase is sweeping.
878  bool is_dead_obj(oop obj) const;
879
880  // After a promotion (of "start"), do any necessary marking.
881  // If "par", then it's being done by a parallel GC thread.
882  // The last two args indicate if we need precise marking
883  // and if so the size of the object so it can be dirtied
884  // in its entirety.
885  void promoted(bool par, HeapWord* start,
886                bool is_obj_array, size_t obj_size);
887
888  void getFreelistLocks() const;
889  void releaseFreelistLocks() const;
890  bool haveFreelistLocks() const;
891
892  // Adjust size of underlying generation
893  void compute_new_size();
894
895  // GC prologue and epilogue
896  void gc_prologue(bool full);
897  void gc_epilogue(bool full);
898
899  jlong time_of_last_gc(jlong now) {
900    if (_collectorState <= Idling) {
901      // gc not in progress
902      return _time_of_last_gc;
903    } else {
904      // collection in progress
905      return now;
906    }
907  }
908
909  // Support for parallel remark of survivor space
910  void* get_data_recorder(int thr_num);
911  void sample_eden_chunk();
912
913  CMSBitMap* markBitMap()  { return &_markBitMap; }
914  void directAllocated(HeapWord* start, size_t size);
915
916  // Main CMS steps and related support
917  void checkpointRootsInitial();
918  bool markFromRoots();  // a return value of false indicates failure
919                         // due to stack overflow
920  void preclean();
921  void checkpointRootsFinal();
922  void sweep();
923
924  // Check that the currently executing thread is the expected
925  // one (foreground collector or background collector).
926  static void check_correct_thread_executing() PRODUCT_RETURN;
927
928  NOT_PRODUCT(bool is_cms_reachable(HeapWord* addr);)
929
930  // Performance Counter Support
931  CollectorCounters* counters()    { return _gc_counters; }
932
933  // Timer stuff
934  void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
935  void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
936  void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
937  jlong   timerTicks() { assert(!_timer.is_active(), "Error"); return _timer.ticks(); }
938
939  int  yields()          { return _numYields; }
940  void resetYields()     { _numYields = 0;    }
941  void incrementYields() { _numYields++;      }
942  void resetNumDirtyCards()               { _numDirtyCards = 0; }
943  void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
944  size_t  numDirtyCards()                 { return _numDirtyCards; }
945
946  static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
947  static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
948  static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
949  static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
950  size_t sweep_count() const             { return _sweep_count; }
951  void   increment_sweep_count()         { _sweep_count++; }
952
953  // Timers/stats for gc scheduling and incremental mode pacing.
954  CMSStats& stats() { return _stats; }
955
956  // Adaptive size policy
957  AdaptiveSizePolicy* size_policy();
958
959  static void print_on_error(outputStream* st);
960
961  // Debugging
962  void verify();
963  bool verify_after_remark();
964  void verify_ok_to_terminate() const PRODUCT_RETURN;
965  void verify_work_stacks_empty() const PRODUCT_RETURN;
966  void verify_overflow_empty() const PRODUCT_RETURN;
967
968  // Convenience methods in support of debugging
969  static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
970  HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
971
972  // Accessors
973  CMSMarkStack* verification_mark_stack() { return &_markStack; }
974  CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
975
976  // Initialization errors
977  bool completed_initialization() { return _completed_initialization; }
978
979  void print_eden_and_survivor_chunk_arrays();
980
981  ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
982};
983
984class CMSExpansionCause : public AllStatic  {
985 public:
986  enum Cause {
987    _no_expansion,
988    _satisfy_free_ratio,
989    _satisfy_promotion,
990    _satisfy_allocation,
991    _allocate_par_lab,
992    _allocate_par_spooling_space,
993    _adaptive_size_policy
994  };
995  // Return a string describing the cause of the expansion.
996  static const char* to_string(CMSExpansionCause::Cause cause);
997};
998
999class ConcurrentMarkSweepGeneration: public CardGeneration {
1000  friend class VMStructs;
1001  friend class ConcurrentMarkSweepThread;
1002  friend class ConcurrentMarkSweep;
1003  friend class CMSCollector;
1004 protected:
1005  static CMSCollector*       _collector; // the collector that collects us
1006  CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
1007
1008  // Performance Counters
1009  GenerationCounters*      _gen_counters;
1010  GSpaceCounters*          _space_counters;
1011
1012  // Words directly allocated, used by CMSStats.
1013  size_t _direct_allocated_words;
1014
1015  // Non-product stat counters
1016  NOT_PRODUCT(
1017    size_t _numObjectsPromoted;
1018    size_t _numWordsPromoted;
1019    size_t _numObjectsAllocated;
1020    size_t _numWordsAllocated;
1021  )
1022
1023  // Used for sizing decisions
1024  bool _incremental_collection_failed;
1025  bool incremental_collection_failed() {
1026    return _incremental_collection_failed;
1027  }
1028  void set_incremental_collection_failed() {
1029    _incremental_collection_failed = true;
1030  }
1031  void clear_incremental_collection_failed() {
1032    _incremental_collection_failed = false;
1033  }
1034
1035  // accessors
1036  void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1037  CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1038
1039  // Accessing spaces
1040  CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; }
1041
1042 private:
1043  // For parallel young-gen GC support.
1044  CMSParGCThreadState** _par_gc_thread_states;
1045
1046  // Reason generation was expanded
1047  CMSExpansionCause::Cause _expansion_cause;
1048
1049  // In support of MinChunkSize being larger than min object size
1050  const double _dilatation_factor;
1051
1052  // True if a compacting collection was done.
1053  bool _did_compact;
1054  bool did_compact() { return _did_compact; }
1055
1056  // Fraction of current occupancy at which to start a CMS collection which
1057  // will collect this generation (at least).
1058  double _initiating_occupancy;
1059
1060 protected:
1061  // Shrink generation by specified size (returns false if unable to shrink)
1062  void shrink_free_list_by(size_t bytes);
1063
1064  // Update statistics for GC
1065  virtual void update_gc_stats(Generation* current_generation, bool full);
1066
1067  // Maximum available space in the generation (including uncommitted)
1068  // space.
1069  size_t max_available() const;
1070
1071  // getter and initializer for _initiating_occupancy field.
1072  double initiating_occupancy() const { return _initiating_occupancy; }
1073  void   init_initiating_occupancy(intx io, uintx tr);
1074
1075  void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
1076
1077  void assert_correct_size_change_locking();
1078
1079 public:
1080  ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct);
1081
1082  // Accessors
1083  CMSCollector* collector() const { return _collector; }
1084  static void set_collector(CMSCollector* collector) {
1085    assert(_collector == NULL, "already set");
1086    _collector = collector;
1087  }
1088  CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1089
1090  Mutex* freelistLock() const;
1091
1092  virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1093
1094  void set_did_compact(bool v) { _did_compact = v; }
1095
1096  bool refs_discovery_is_atomic() const { return false; }
1097  bool refs_discovery_is_mt()     const {
1098    // Note: CMS does MT-discovery during the parallel-remark
1099    // phases. Use ReferenceProcessorMTMutator to make refs
1100    // discovery MT-safe during such phases or other parallel
1101    // discovery phases in the future. This may all go away
1102    // if/when we decide that refs discovery is sufficiently
1103    // rare that the cost of the CAS's involved is in the
1104    // noise. That's a measurement that should be done, and
1105    // the code simplified if that turns out to be the case.
1106    return ConcGCThreads > 1;
1107  }
1108
1109  // Override
1110  virtual void ref_processor_init();
1111
1112  void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1113
1114  // Space enquiries
1115  double occupancy() const { return ((double)used())/((double)capacity()); }
1116  size_t contiguous_available() const;
1117  size_t unsafe_max_alloc_nogc() const;
1118
1119  // over-rides
1120  MemRegion used_region_at_save_marks() const;
1121
1122  // Adjust quantities in the generation affected by
1123  // the compaction.
1124  void reset_after_compaction();
1125
1126  // Allocation support
1127  HeapWord* allocate(size_t size, bool tlab);
1128  HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1129  oop       promote(oop obj, size_t obj_size);
1130  HeapWord* par_allocate(size_t size, bool tlab) {
1131    return allocate(size, tlab);
1132  }
1133
1134
1135  // Used by CMSStats to track direct allocation.  The value is sampled and
1136  // reset after each young gen collection.
1137  size_t direct_allocated_words() const { return _direct_allocated_words; }
1138  void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1139
1140  // Overrides for parallel promotion.
1141  virtual oop par_promote(int thread_num,
1142                          oop obj, markOop m, size_t word_sz);
1143  virtual void par_promote_alloc_done(int thread_num);
1144  virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1145
1146  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1147
1148  // Inform this (old) generation that a promotion failure was
1149  // encountered during a collection of the young generation.
1150  virtual void promotion_failure_occurred();
1151
1152  bool should_collect(bool full, size_t size, bool tlab);
1153  virtual bool should_concurrent_collect() const;
1154  virtual bool is_too_full() const;
1155  void collect(bool   full,
1156               bool   clear_all_soft_refs,
1157               size_t size,
1158               bool   tlab);
1159
1160  HeapWord* expand_and_allocate(size_t word_size,
1161                                bool tlab,
1162                                bool parallel = false);
1163
1164  // GC prologue and epilogue
1165  void gc_prologue(bool full);
1166  void gc_prologue_work(bool full, bool registerClosure,
1167                        ModUnionClosure* modUnionClosure);
1168  void gc_epilogue(bool full);
1169  void gc_epilogue_work(bool full);
1170
1171  // Time since last GC of this generation
1172  jlong time_of_last_gc(jlong now) {
1173    return collector()->time_of_last_gc(now);
1174  }
1175  void update_time_of_last_gc(jlong now) {
1176    collector()-> update_time_of_last_gc(now);
1177  }
1178
1179  // Allocation failure
1180  void shrink(size_t bytes);
1181  HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1182  bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1183
1184  // Iteration support and related enquiries
1185  void save_marks();
1186  bool no_allocs_since_save_marks();
1187
1188  // Iteration support specific to CMS generations
1189  void save_sweep_limit();
1190
1191  // More iteration support
1192  virtual void oop_iterate(ExtendedOopClosure* cl);
1193  virtual void safe_object_iterate(ObjectClosure* cl);
1194  virtual void object_iterate(ObjectClosure* cl);
1195
1196  // Need to declare the full complement of closures, whether we'll
1197  // override them or not, or get message from the compiler:
1198  //   oop_since_save_marks_iterate_nv hides virtual function...
1199  #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1200    void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1201  ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1202
1203  // Smart allocation  XXX -- move to CFLSpace?
1204  void setNearLargestChunk();
1205  bool isNearLargestChunk(HeapWord* addr);
1206
1207  // Get the chunk at the end of the space.  Delegates to
1208  // the space.
1209  FreeChunk* find_chunk_at_end();
1210
1211  void post_compact();
1212
1213  // Debugging
1214  void prepare_for_verify();
1215  void verify();
1216  void print_statistics()               PRODUCT_RETURN;
1217
1218  // Performance Counters support
1219  virtual void update_counters();
1220  virtual void update_counters(size_t used);
1221  void initialize_performance_counters();
1222  CollectorCounters* counters()  { return collector()->counters(); }
1223
1224  // Support for parallel remark of survivor space
1225  void* get_data_recorder(int thr_num) {
1226    //Delegate to collector
1227    return collector()->get_data_recorder(thr_num);
1228  }
1229  void sample_eden_chunk() {
1230    //Delegate to collector
1231    return collector()->sample_eden_chunk();
1232  }
1233
1234  // Printing
1235  const char* name() const;
1236  virtual const char* short_name() const { return "CMS"; }
1237  void        print() const;
1238
1239  // Resize the generation after a compacting GC.  The
1240  // generation can be treated as a contiguous space
1241  // after the compaction.
1242  virtual void compute_new_size();
1243  // Resize the generation after a non-compacting
1244  // collection.
1245  void compute_new_size_free_list();
1246};
1247
1248//
1249// Closures of various sorts used by CMS to accomplish its work
1250//
1251
1252// This closure is used to do concurrent marking from the roots
1253// following the first checkpoint.
1254class MarkFromRootsClosure: public BitMapClosure {
1255  CMSCollector*  _collector;
1256  MemRegion      _span;
1257  CMSBitMap*     _bitMap;
1258  CMSBitMap*     _mut;
1259  CMSMarkStack*  _markStack;
1260  bool           _yield;
1261  int            _skipBits;
1262  HeapWord*      _finger;
1263  HeapWord*      _threshold;
1264  DEBUG_ONLY(bool _verifying;)
1265
1266 public:
1267  MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1268                       CMSBitMap* bitMap,
1269                       CMSMarkStack*  markStack,
1270                       bool should_yield, bool verifying = false);
1271  bool do_bit(size_t offset);
1272  void reset(HeapWord* addr);
1273  inline void do_yield_check();
1274
1275 private:
1276  void scanOopsInOop(HeapWord* ptr);
1277  void do_yield_work();
1278};
1279
1280// This closure is used to do concurrent multi-threaded
1281// marking from the roots following the first checkpoint.
1282// XXX This should really be a subclass of The serial version
1283// above, but i have not had the time to refactor things cleanly.
1284class ParMarkFromRootsClosure: public BitMapClosure {
1285  CMSCollector*  _collector;
1286  MemRegion      _whole_span;
1287  MemRegion      _span;
1288  CMSBitMap*     _bit_map;
1289  CMSBitMap*     _mut;
1290  OopTaskQueue*  _work_queue;
1291  CMSMarkStack*  _overflow_stack;
1292  int            _skip_bits;
1293  HeapWord*      _finger;
1294  HeapWord*      _threshold;
1295  CMSConcMarkingTask* _task;
1296 public:
1297  ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1298                          MemRegion span,
1299                          CMSBitMap* bit_map,
1300                          OopTaskQueue* work_queue,
1301                          CMSMarkStack*  overflow_stack);
1302  bool do_bit(size_t offset);
1303  inline void do_yield_check();
1304
1305 private:
1306  void scan_oops_in_oop(HeapWord* ptr);
1307  void do_yield_work();
1308  bool get_work_from_overflow_stack();
1309};
1310
1311// The following closures are used to do certain kinds of verification of
1312// CMS marking.
1313class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
1314  CMSCollector*    _collector;
1315  MemRegion        _span;
1316  CMSBitMap*       _verification_bm;
1317  CMSBitMap*       _cms_bm;
1318  CMSMarkStack*    _mark_stack;
1319 protected:
1320  void do_oop(oop p);
1321  template <class T> inline void do_oop_work(T *p) {
1322    oop obj = oopDesc::load_decode_heap_oop(p);
1323    do_oop(obj);
1324  }
1325 public:
1326  PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1327                           MemRegion span,
1328                           CMSBitMap* verification_bm,
1329                           CMSBitMap* cms_bm,
1330                           CMSMarkStack*  mark_stack);
1331  void do_oop(oop* p);
1332  void do_oop(narrowOop* p);
1333
1334  // Deal with a stack overflow condition
1335  void handle_stack_overflow(HeapWord* lost);
1336};
1337
1338class MarkFromRootsVerifyClosure: public BitMapClosure {
1339  CMSCollector*  _collector;
1340  MemRegion      _span;
1341  CMSBitMap*     _verification_bm;
1342  CMSBitMap*     _cms_bm;
1343  CMSMarkStack*  _mark_stack;
1344  HeapWord*      _finger;
1345  PushAndMarkVerifyClosure _pam_verify_closure;
1346 public:
1347  MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1348                             CMSBitMap* verification_bm,
1349                             CMSBitMap* cms_bm,
1350                             CMSMarkStack*  mark_stack);
1351  bool do_bit(size_t offset);
1352  void reset(HeapWord* addr);
1353};
1354
1355
1356// This closure is used to check that a certain set of bits is
1357// "empty" (i.e. the bit vector doesn't have any 1-bits).
1358class FalseBitMapClosure: public BitMapClosure {
1359 public:
1360  bool do_bit(size_t offset) {
1361    guarantee(false, "Should not have a 1 bit");
1362    return true;
1363  }
1364};
1365
1366// A version of ObjectClosure with "memory" (see _previous_address below)
1367class UpwardsObjectClosure: public BoolObjectClosure {
1368  HeapWord* _previous_address;
1369 public:
1370  UpwardsObjectClosure() : _previous_address(NULL) { }
1371  void set_previous(HeapWord* addr) { _previous_address = addr; }
1372  HeapWord* previous()              { return _previous_address; }
1373  // A return value of "true" can be used by the caller to decide
1374  // if this object's end should *NOT* be recorded in
1375  // _previous_address above.
1376  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
1377};
1378
1379// This closure is used during the second checkpointing phase
1380// to rescan the marked objects on the dirty cards in the mod
1381// union table and the card table proper. It's invoked via
1382// MarkFromDirtyCardsClosure below. It uses either
1383// [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1384// declared in genOopClosures.hpp to accomplish some of its work.
1385// In the parallel case the bitMap is shared, so access to
1386// it needs to be suitably synchronized for updates by embedded
1387// closures that update it; however, this closure itself only
1388// reads the bit_map and because it is idempotent, is immune to
1389// reading stale values.
1390class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1391  #ifdef ASSERT
1392    CMSCollector*          _collector;
1393    MemRegion              _span;
1394    union {
1395      CMSMarkStack*        _mark_stack;
1396      OopTaskQueue*        _work_queue;
1397    };
1398  #endif // ASSERT
1399  bool                       _parallel;
1400  CMSBitMap*                 _bit_map;
1401  union {
1402    MarkRefsIntoAndScanClosure*    _scan_closure;
1403    ParMarkRefsIntoAndScanClosure* _par_scan_closure;
1404  };
1405
1406 public:
1407  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1408                                MemRegion span,
1409                                ReferenceProcessor* rp,
1410                                CMSBitMap* bit_map,
1411                                CMSMarkStack*  mark_stack,
1412                                MarkRefsIntoAndScanClosure* cl):
1413    #ifdef ASSERT
1414      _collector(collector),
1415      _span(span),
1416      _mark_stack(mark_stack),
1417    #endif // ASSERT
1418    _parallel(false),
1419    _bit_map(bit_map),
1420    _scan_closure(cl) { }
1421
1422  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1423                                MemRegion span,
1424                                ReferenceProcessor* rp,
1425                                CMSBitMap* bit_map,
1426                                OopTaskQueue* work_queue,
1427                                ParMarkRefsIntoAndScanClosure* cl):
1428    #ifdef ASSERT
1429      _collector(collector),
1430      _span(span),
1431      _work_queue(work_queue),
1432    #endif // ASSERT
1433    _parallel(true),
1434    _bit_map(bit_map),
1435    _par_scan_closure(cl) { }
1436
1437  bool do_object_b(oop obj) {
1438    guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1439    return false;
1440  }
1441  bool do_object_bm(oop p, MemRegion mr);
1442};
1443
1444// This closure is used during the second checkpointing phase
1445// to rescan the marked objects on the dirty cards in the mod
1446// union table and the card table proper. It invokes
1447// ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1448// In the parallel case, the bit map is shared and requires
1449// synchronized access.
1450class MarkFromDirtyCardsClosure: public MemRegionClosure {
1451  CompactibleFreeListSpace*      _space;
1452  ScanMarkedObjectsAgainClosure  _scan_cl;
1453  size_t                         _num_dirty_cards;
1454
1455 public:
1456  MarkFromDirtyCardsClosure(CMSCollector* collector,
1457                            MemRegion span,
1458                            CompactibleFreeListSpace* space,
1459                            CMSBitMap* bit_map,
1460                            CMSMarkStack* mark_stack,
1461                            MarkRefsIntoAndScanClosure* cl):
1462    _space(space),
1463    _num_dirty_cards(0),
1464    _scan_cl(collector, span, collector->ref_processor(), bit_map,
1465                 mark_stack, cl) { }
1466
1467  MarkFromDirtyCardsClosure(CMSCollector* collector,
1468                            MemRegion span,
1469                            CompactibleFreeListSpace* space,
1470                            CMSBitMap* bit_map,
1471                            OopTaskQueue* work_queue,
1472                            ParMarkRefsIntoAndScanClosure* cl):
1473    _space(space),
1474    _num_dirty_cards(0),
1475    _scan_cl(collector, span, collector->ref_processor(), bit_map,
1476             work_queue, cl) { }
1477
1478  void do_MemRegion(MemRegion mr);
1479  void set_space(CompactibleFreeListSpace* space) { _space = space; }
1480  size_t num_dirty_cards() { return _num_dirty_cards; }
1481};
1482
1483// This closure is used in the non-product build to check
1484// that there are no MemRegions with a certain property.
1485class FalseMemRegionClosure: public MemRegionClosure {
1486  void do_MemRegion(MemRegion mr) {
1487    guarantee(!mr.is_empty(), "Shouldn't be empty");
1488    guarantee(false, "Should never be here");
1489  }
1490};
1491
1492// This closure is used during the precleaning phase
1493// to "carefully" rescan marked objects on dirty cards.
1494// It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1495// to accomplish some of its work.
1496class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1497  CMSCollector*                  _collector;
1498  MemRegion                      _span;
1499  bool                           _yield;
1500  Mutex*                         _freelistLock;
1501  CMSBitMap*                     _bitMap;
1502  CMSMarkStack*                  _markStack;
1503  MarkRefsIntoAndScanClosure*    _scanningClosure;
1504  DEBUG_ONLY(HeapWord*           _last_scanned_object;)
1505
1506 public:
1507  ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1508                                         MemRegion     span,
1509                                         CMSBitMap* bitMap,
1510                                         CMSMarkStack*  markStack,
1511                                         MarkRefsIntoAndScanClosure* cl,
1512                                         bool should_yield):
1513    _collector(collector),
1514    _span(span),
1515    _yield(should_yield),
1516    _bitMap(bitMap),
1517    _markStack(markStack),
1518    _scanningClosure(cl)
1519    DEBUG_ONLY(COMMA _last_scanned_object(NULL))
1520  { }
1521
1522  void do_object(oop p) {
1523    guarantee(false, "call do_object_careful instead");
1524  }
1525
1526  size_t      do_object_careful(oop p) {
1527    guarantee(false, "Unexpected caller");
1528    return 0;
1529  }
1530
1531  size_t      do_object_careful_m(oop p, MemRegion mr);
1532
1533  void setFreelistLock(Mutex* m) {
1534    _freelistLock = m;
1535    _scanningClosure->set_freelistLock(m);
1536  }
1537
1538 private:
1539  inline bool do_yield_check();
1540
1541  void do_yield_work();
1542};
1543
1544class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1545  CMSCollector*                  _collector;
1546  MemRegion                      _span;
1547  bool                           _yield;
1548  CMSBitMap*                     _bit_map;
1549  CMSMarkStack*                  _mark_stack;
1550  PushAndMarkClosure*            _scanning_closure;
1551  unsigned int                   _before_count;
1552
1553 public:
1554  SurvivorSpacePrecleanClosure(CMSCollector* collector,
1555                               MemRegion     span,
1556                               CMSBitMap*    bit_map,
1557                               CMSMarkStack* mark_stack,
1558                               PushAndMarkClosure* cl,
1559                               unsigned int  before_count,
1560                               bool          should_yield):
1561    _collector(collector),
1562    _span(span),
1563    _yield(should_yield),
1564    _bit_map(bit_map),
1565    _mark_stack(mark_stack),
1566    _scanning_closure(cl),
1567    _before_count(before_count)
1568  { }
1569
1570  void do_object(oop p) {
1571    guarantee(false, "call do_object_careful instead");
1572  }
1573
1574  size_t      do_object_careful(oop p);
1575
1576  size_t      do_object_careful_m(oop p, MemRegion mr) {
1577    guarantee(false, "Unexpected caller");
1578    return 0;
1579  }
1580
1581 private:
1582  inline void do_yield_check();
1583  void do_yield_work();
1584};
1585
1586// This closure is used to accomplish the sweeping work
1587// after the second checkpoint but before the concurrent reset
1588// phase.
1589//
1590// Terminology
1591//   left hand chunk (LHC) - block of one or more chunks currently being
1592//     coalesced.  The LHC is available for coalescing with a new chunk.
1593//   right hand chunk (RHC) - block that is currently being swept that is
1594//     free or garbage that can be coalesced with the LHC.
1595// _inFreeRange is true if there is currently a LHC
1596// _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1597// _freeRangeInFreeLists is true if the LHC is in the free lists.
1598// _freeFinger is the address of the current LHC
1599class SweepClosure: public BlkClosureCareful {
1600  CMSCollector*                  _collector;  // collector doing the work
1601  ConcurrentMarkSweepGeneration* _g;    // Generation being swept
1602  CompactibleFreeListSpace*      _sp;   // Space being swept
1603  HeapWord*                      _limit;// the address at or above which the sweep should stop
1604                                        // because we do not expect newly garbage blocks
1605                                        // eligible for sweeping past that address.
1606  Mutex*                         _freelistLock; // Free list lock (in space)
1607  CMSBitMap*                     _bitMap;       // Marking bit map (in
1608                                                // generation)
1609  bool                           _inFreeRange;  // Indicates if we are in the
1610                                                // midst of a free run
1611  bool                           _freeRangeInFreeLists;
1612                                        // Often, we have just found
1613                                        // a free chunk and started
1614                                        // a new free range; we do not
1615                                        // eagerly remove this chunk from
1616                                        // the free lists unless there is
1617                                        // a possibility of coalescing.
1618                                        // When true, this flag indicates
1619                                        // that the _freeFinger below
1620                                        // points to a potentially free chunk
1621                                        // that may still be in the free lists
1622  bool                           _lastFreeRangeCoalesced;
1623                                        // free range contains chunks
1624                                        // coalesced
1625  bool                           _yield;
1626                                        // Whether sweeping should be
1627                                        // done with yields. For instance
1628                                        // when done by the foreground
1629                                        // collector we shouldn't yield.
1630  HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
1631                                                // pointer to the "left hand
1632                                                // chunk"
1633  size_t                         _freeRangeSize;
1634                                        // When _inFreeRange is set, this
1635                                        // indicates the accumulated size
1636                                        // of the "left hand chunk"
1637  NOT_PRODUCT(
1638    size_t                       _numObjectsFreed;
1639    size_t                       _numWordsFreed;
1640    size_t                       _numObjectsLive;
1641    size_t                       _numWordsLive;
1642    size_t                       _numObjectsAlreadyFree;
1643    size_t                       _numWordsAlreadyFree;
1644    FreeChunk*                   _last_fc;
1645  )
1646 private:
1647  // Code that is common to a free chunk or garbage when
1648  // encountered during sweeping.
1649  void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
1650  // Process a free chunk during sweeping.
1651  void do_already_free_chunk(FreeChunk *fc);
1652  // Work method called when processing an already free or a
1653  // freshly garbage chunk to do a lookahead and possibly a
1654  // preemptive flush if crossing over _limit.
1655  void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
1656  // Process a garbage chunk during sweeping.
1657  size_t do_garbage_chunk(FreeChunk *fc);
1658  // Process a live chunk during sweeping.
1659  size_t do_live_chunk(FreeChunk* fc);
1660
1661  // Accessors.
1662  HeapWord* freeFinger() const          { return _freeFinger; }
1663  void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
1664  bool inFreeRange()    const           { return _inFreeRange; }
1665  void set_inFreeRange(bool v)          { _inFreeRange = v; }
1666  bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
1667  void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1668  bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
1669  void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1670
1671  // Initialize a free range.
1672  void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1673  // Return this chunk to the free lists.
1674  void flush_cur_free_chunk(HeapWord* chunk, size_t size);
1675
1676  // Check if we should yield and do so when necessary.
1677  inline void do_yield_check(HeapWord* addr);
1678
1679  // Yield
1680  void do_yield_work(HeapWord* addr);
1681
1682  // Debugging/Printing
1683  void print_free_block_coalesced(FreeChunk* fc) const;
1684
1685 public:
1686  SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1687               CMSBitMap* bitMap, bool should_yield);
1688  ~SweepClosure() PRODUCT_RETURN;
1689
1690  size_t       do_blk_careful(HeapWord* addr);
1691  void         print() const { print_on(tty); }
1692  void         print_on(outputStream *st) const;
1693};
1694
1695// Closures related to weak references processing
1696
1697// During CMS' weak reference processing, this is a
1698// work-routine/closure used to complete transitive
1699// marking of objects as live after a certain point
1700// in which an initial set has been completely accumulated.
1701// This closure is currently used both during the final
1702// remark stop-world phase, as well as during the concurrent
1703// precleaning of the discovered reference lists.
1704class CMSDrainMarkingStackClosure: public VoidClosure {
1705  CMSCollector*        _collector;
1706  MemRegion            _span;
1707  CMSMarkStack*        _mark_stack;
1708  CMSBitMap*           _bit_map;
1709  CMSKeepAliveClosure* _keep_alive;
1710  bool                 _concurrent_precleaning;
1711 public:
1712  CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1713                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1714                      CMSKeepAliveClosure* keep_alive,
1715                      bool cpc):
1716    _collector(collector),
1717    _span(span),
1718    _bit_map(bit_map),
1719    _mark_stack(mark_stack),
1720    _keep_alive(keep_alive),
1721    _concurrent_precleaning(cpc) {
1722    assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1723           "Mismatch");
1724  }
1725
1726  void do_void();
1727};
1728
1729// A parallel version of CMSDrainMarkingStackClosure above.
1730class CMSParDrainMarkingStackClosure: public VoidClosure {
1731  CMSCollector*           _collector;
1732  MemRegion               _span;
1733  OopTaskQueue*           _work_queue;
1734  CMSBitMap*              _bit_map;
1735  CMSInnerParMarkAndPushClosure _mark_and_push;
1736
1737 public:
1738  CMSParDrainMarkingStackClosure(CMSCollector* collector,
1739                                 MemRegion span, CMSBitMap* bit_map,
1740                                 OopTaskQueue* work_queue):
1741    _collector(collector),
1742    _span(span),
1743    _bit_map(bit_map),
1744    _work_queue(work_queue),
1745    _mark_and_push(collector, span, bit_map, work_queue) { }
1746
1747 public:
1748  void trim_queue(uint max);
1749  void do_void();
1750};
1751
1752// Allow yielding or short-circuiting of reference list
1753// precleaning work.
1754class CMSPrecleanRefsYieldClosure: public YieldClosure {
1755  CMSCollector* _collector;
1756  void do_yield_work();
1757 public:
1758  CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1759    _collector(collector) {}
1760  virtual bool should_return();
1761};
1762
1763
1764// Convenience class that locks free list locks for given CMS collector
1765class FreelistLocker: public StackObj {
1766 private:
1767  CMSCollector* _collector;
1768 public:
1769  FreelistLocker(CMSCollector* collector):
1770    _collector(collector) {
1771    _collector->getFreelistLocks();
1772  }
1773
1774  ~FreelistLocker() {
1775    _collector->releaseFreelistLocks();
1776  }
1777};
1778
1779// Mark all dead objects in a given space.
1780class MarkDeadObjectsClosure: public BlkClosure {
1781  const CMSCollector*             _collector;
1782  const CompactibleFreeListSpace* _sp;
1783  CMSBitMap*                      _live_bit_map;
1784  CMSBitMap*                      _dead_bit_map;
1785public:
1786  MarkDeadObjectsClosure(const CMSCollector* collector,
1787                         const CompactibleFreeListSpace* sp,
1788                         CMSBitMap *live_bit_map,
1789                         CMSBitMap *dead_bit_map) :
1790    _collector(collector),
1791    _sp(sp),
1792    _live_bit_map(live_bit_map),
1793    _dead_bit_map(dead_bit_map) {}
1794  size_t do_blk(HeapWord* addr);
1795};
1796
1797class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
1798
1799 public:
1800  TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
1801};
1802
1803
1804#endif // SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
1805