concurrentMarkSweepGeneration.inline.hpp revision 13126:853247754844
1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
26#define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
27
28#include "gc/cms/cmsLockVerifier.hpp"
29#include "gc/cms/compactibleFreeListSpace.hpp"
30#include "gc/cms/concurrentMarkSweepGeneration.hpp"
31#include "gc/cms/concurrentMarkSweepThread.hpp"
32#include "gc/cms/parNewGeneration.hpp"
33#include "gc/shared/gcUtil.hpp"
34#include "gc/shared/genCollectedHeap.hpp"
35#include "utilities/bitMap.inline.hpp"
36
37inline void CMSBitMap::clear_all() {
38  assert_locked();
39  // CMS bitmaps are usually cover large memory regions
40  _bm.clear_large();
41  return;
42}
43
44inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
45  return (pointer_delta(addr, _bmStartWord)) >> _shifter;
46}
47
48inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
49  return _bmStartWord + (offset << _shifter);
50}
51
52inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
53  assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
54  return diff >> _shifter;
55}
56
57inline void CMSBitMap::mark(HeapWord* addr) {
58  assert_locked();
59  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
60         "outside underlying space?");
61  _bm.set_bit(heapWordToOffset(addr));
62}
63
64inline bool CMSBitMap::par_mark(HeapWord* addr) {
65  assert_locked();
66  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
67         "outside underlying space?");
68  return _bm.par_at_put(heapWordToOffset(addr), true);
69}
70
71inline void CMSBitMap::par_clear(HeapWord* addr) {
72  assert_locked();
73  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
74         "outside underlying space?");
75  _bm.par_at_put(heapWordToOffset(addr), false);
76}
77
78inline void CMSBitMap::mark_range(MemRegion mr) {
79  NOT_PRODUCT(region_invariant(mr));
80  // Range size is usually just 1 bit.
81  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
82                BitMap::small_range);
83}
84
85inline void CMSBitMap::clear_range(MemRegion mr) {
86  NOT_PRODUCT(region_invariant(mr));
87  // Range size is usually just 1 bit.
88  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
89                  BitMap::small_range);
90}
91
92inline void CMSBitMap::par_mark_range(MemRegion mr) {
93  NOT_PRODUCT(region_invariant(mr));
94  // Range size is usually just 1 bit.
95  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
96                    BitMap::small_range);
97}
98
99inline void CMSBitMap::par_clear_range(MemRegion mr) {
100  NOT_PRODUCT(region_invariant(mr));
101  // Range size is usually just 1 bit.
102  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
103                      BitMap::small_range);
104}
105
106inline void CMSBitMap::mark_large_range(MemRegion mr) {
107  NOT_PRODUCT(region_invariant(mr));
108  // Range size must be greater than 32 bytes.
109  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
110                BitMap::large_range);
111}
112
113inline void CMSBitMap::clear_large_range(MemRegion mr) {
114  NOT_PRODUCT(region_invariant(mr));
115  // Range size must be greater than 32 bytes.
116  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
117                  BitMap::large_range);
118}
119
120inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
121  NOT_PRODUCT(region_invariant(mr));
122  // Range size must be greater than 32 bytes.
123  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
124                    BitMap::large_range);
125}
126
127inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
128  NOT_PRODUCT(region_invariant(mr));
129  // Range size must be greater than 32 bytes.
130  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
131                      BitMap::large_range);
132}
133
134// Starting at "addr" (inclusive) return a memory region
135// corresponding to the first maximally contiguous marked ("1") region.
136inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
137  return getAndClearMarkedRegion(addr, endWord());
138}
139
140// Starting at "start_addr" (inclusive) return a memory region
141// corresponding to the first maximal contiguous marked ("1") region
142// strictly less than end_addr.
143inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
144                                                    HeapWord* end_addr) {
145  HeapWord *start, *end;
146  assert_locked();
147  start = getNextMarkedWordAddress  (start_addr, end_addr);
148  end   = getNextUnmarkedWordAddress(start,      end_addr);
149  assert(start <= end, "Consistency check");
150  MemRegion mr(start, end);
151  if (!mr.is_empty()) {
152    clear_range(mr);
153  }
154  return mr;
155}
156
157inline bool CMSBitMap::isMarked(HeapWord* addr) const {
158  assert_locked();
159  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
160         "outside underlying space?");
161  return _bm.at(heapWordToOffset(addr));
162}
163
164// The same as isMarked() but without a lock check.
165inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
166  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
167         "outside underlying space?");
168  return _bm.at(heapWordToOffset(addr));
169}
170
171
172inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
173  assert_locked();
174  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
175         "outside underlying space?");
176  return !_bm.at(heapWordToOffset(addr));
177}
178
179// Return the HeapWord address corresponding to next "1" bit
180// (inclusive).
181inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
182  return getNextMarkedWordAddress(addr, endWord());
183}
184
185// Return the least HeapWord address corresponding to next "1" bit
186// starting at start_addr (inclusive) but strictly less than end_addr.
187inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
188  HeapWord* start_addr, HeapWord* end_addr) const {
189  assert_locked();
190  size_t nextOffset = _bm.get_next_one_offset(
191                        heapWordToOffset(start_addr),
192                        heapWordToOffset(end_addr));
193  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
194  assert(nextAddr >= start_addr &&
195         nextAddr <= end_addr, "get_next_one postcondition");
196  assert((nextAddr == end_addr) ||
197         isMarked(nextAddr), "get_next_one postcondition");
198  return nextAddr;
199}
200
201
202// Return the HeapWord address corresponding to the next "0" bit
203// (inclusive).
204inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
205  return getNextUnmarkedWordAddress(addr, endWord());
206}
207
208// Return the HeapWord address corresponding to the next "0" bit
209// (inclusive).
210inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
211  HeapWord* start_addr, HeapWord* end_addr) const {
212  assert_locked();
213  size_t nextOffset = _bm.get_next_zero_offset(
214                        heapWordToOffset(start_addr),
215                        heapWordToOffset(end_addr));
216  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
217  assert(nextAddr >= start_addr &&
218         nextAddr <= end_addr, "get_next_zero postcondition");
219  assert((nextAddr == end_addr) ||
220          isUnmarked(nextAddr), "get_next_zero postcondition");
221  return nextAddr;
222}
223
224inline bool CMSBitMap::isAllClear() const {
225  assert_locked();
226  return getNextMarkedWordAddress(startWord()) >= endWord();
227}
228
229inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
230                            HeapWord* right) {
231  assert_locked();
232  left = MAX2(_bmStartWord, left);
233  right = MIN2(_bmStartWord + _bmWordSize, right);
234  if (right > left) {
235    _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
236  }
237}
238
239inline void CMSCollector::save_sweep_limits() {
240  _cmsGen->save_sweep_limit();
241}
242
243inline bool CMSCollector::is_dead_obj(oop obj) const {
244  HeapWord* addr = (HeapWord*)obj;
245  assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
246          && _cmsGen->cmsSpace()->block_is_obj(addr)),
247         "must be object");
248  return  should_unload_classes() &&
249          _collectorState == Sweeping &&
250         !_markBitMap.isMarked(addr);
251}
252
253inline bool CMSCollector::should_abort_preclean() const {
254  // We are in the midst of an "abortable preclean" and either
255  // scavenge is done or foreground GC wants to take over collection
256  return _collectorState == AbortablePreclean &&
257         (_abort_preclean || _foregroundGCIsActive ||
258          GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
259}
260
261inline size_t CMSCollector::get_eden_used() const {
262  return _young_gen->eden()->used();
263}
264
265inline size_t CMSCollector::get_eden_capacity() const {
266  return _young_gen->eden()->capacity();
267}
268
269inline bool CMSStats::valid() const {
270  return _valid_bits == _ALL_VALID;
271}
272
273inline void CMSStats::record_gc0_begin() {
274  if (_gc0_begin_time.is_updated()) {
275    float last_gc0_period = _gc0_begin_time.seconds();
276    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
277      last_gc0_period, _gc0_alpha);
278    _gc0_alpha = _saved_alpha;
279    _valid_bits |= _GC0_VALID;
280  }
281  _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
282
283  _gc0_begin_time.update();
284}
285
286inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
287  float last_gc0_duration = _gc0_begin_time.seconds();
288  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
289    last_gc0_duration, _gc0_alpha);
290
291  // Amount promoted.
292  _cms_used_at_gc0_end = cms_gen_bytes_used;
293
294  size_t promoted_bytes = 0;
295  if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
296    promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
297  }
298
299  // If the young gen collection was skipped, then the
300  // number of promoted bytes will be 0 and adding it to the
301  // average will incorrectly lessen the average.  It is, however,
302  // also possible that no promotion was needed.
303  //
304  // _gc0_promoted used to be calculated as
305  // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
306  //  promoted_bytes, _gc0_alpha);
307  _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
308  _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
309
310  // Amount directly allocated.
311  size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
312  _cms_gen->reset_direct_allocated_words();
313  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
314    allocated_bytes, _gc0_alpha);
315}
316
317inline void CMSStats::record_cms_begin() {
318  _cms_timer.stop();
319
320  // This is just an approximate value, but is good enough.
321  _cms_used_at_cms_begin = _cms_used_at_gc0_end;
322
323  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
324    (float) _cms_timer.seconds(), _cms_alpha);
325  _cms_begin_time.update();
326
327  _cms_timer.reset();
328  _cms_timer.start();
329}
330
331inline void CMSStats::record_cms_end() {
332  _cms_timer.stop();
333
334  float cur_duration = _cms_timer.seconds();
335  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
336    cur_duration, _cms_alpha);
337
338  _cms_end_time.update();
339  _cms_alpha = _saved_alpha;
340  _allow_duty_cycle_reduction = true;
341  _valid_bits |= _CMS_VALID;
342
343  _cms_timer.start();
344}
345
346inline double CMSStats::cms_time_since_begin() const {
347  return _cms_begin_time.seconds();
348}
349
350inline double CMSStats::cms_time_since_end() const {
351  return _cms_end_time.seconds();
352}
353
354inline double CMSStats::promotion_rate() const {
355  assert(valid(), "statistics not valid yet");
356  return gc0_promoted() / gc0_period();
357}
358
359inline double CMSStats::cms_allocation_rate() const {
360  assert(valid(), "statistics not valid yet");
361  return cms_allocated() / gc0_period();
362}
363
364inline double CMSStats::cms_consumption_rate() const {
365  assert(valid(), "statistics not valid yet");
366  return (gc0_promoted() + cms_allocated()) / gc0_period();
367}
368
369inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
370  cmsSpace()->save_sweep_limit();
371}
372
373inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
374  return _cmsSpace->used_region_at_save_marks();
375}
376
377inline void MarkFromRootsClosure::do_yield_check() {
378  if (ConcurrentMarkSweepThread::should_yield() &&
379      !_collector->foregroundGCIsActive() &&
380      _yield) {
381    do_yield_work();
382  }
383}
384
385inline void ParMarkFromRootsClosure::do_yield_check() {
386  if (ConcurrentMarkSweepThread::should_yield() &&
387      !_collector->foregroundGCIsActive()) {
388    do_yield_work();
389  }
390}
391
392inline void PushOrMarkClosure::do_yield_check() {
393  _parent->do_yield_check();
394}
395
396inline void ParPushOrMarkClosure::do_yield_check() {
397  _parent->do_yield_check();
398}
399
400// Return value of "true" indicates that the on-going preclean
401// should be aborted.
402inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
403  if (ConcurrentMarkSweepThread::should_yield() &&
404      !_collector->foregroundGCIsActive() &&
405      _yield) {
406    // Sample young gen size before and after yield
407    _collector->sample_eden();
408    do_yield_work();
409    _collector->sample_eden();
410    return _collector->should_abort_preclean();
411  }
412  return false;
413}
414
415inline void SurvivorSpacePrecleanClosure::do_yield_check() {
416  if (ConcurrentMarkSweepThread::should_yield() &&
417      !_collector->foregroundGCIsActive() &&
418      _yield) {
419    // Sample young gen size before and after yield
420    _collector->sample_eden();
421    do_yield_work();
422    _collector->sample_eden();
423  }
424}
425
426inline void SweepClosure::do_yield_check(HeapWord* addr) {
427  if (ConcurrentMarkSweepThread::should_yield() &&
428      !_collector->foregroundGCIsActive() &&
429      _yield) {
430    do_yield_work(addr);
431  }
432}
433
434inline void MarkRefsIntoAndScanClosure::do_yield_check() {
435  // The conditions are ordered for the remarking phase
436  // when _yield is false.
437  if (_yield &&
438      !_collector->foregroundGCIsActive() &&
439      ConcurrentMarkSweepThread::should_yield()) {
440    do_yield_work();
441  }
442}
443
444
445inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
446  // Align the end of mr so it's at a card boundary.
447  // This is superfluous except at the end of the space;
448  // we should do better than this XXX
449  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
450                 CardTableModRefBS::card_size /* bytes */));
451  _t->mark_range(mr2);
452}
453
454inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
455  // Align the end of mr so it's at a card boundary.
456  // This is superfluous except at the end of the space;
457  // we should do better than this XXX
458  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
459                 CardTableModRefBS::card_size /* bytes */));
460  _t->par_mark_range(mr2);
461}
462
463#endif // SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
464