tenuredGeneration.cpp revision 8638:767f36deb0dc
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/serial/genMarkSweep.hpp"
27#include "gc/serial/tenuredGeneration.inline.hpp"
28#include "gc/shared/blockOffsetTable.inline.hpp"
29#include "gc/shared/cardGeneration.inline.hpp"
30#include "gc/shared/collectorCounters.hpp"
31#include "gc/shared/gcTimer.hpp"
32#include "gc/shared/genOopClosures.inline.hpp"
33#include "gc/shared/generationSpec.hpp"
34#include "gc/shared/space.hpp"
35#include "memory/allocation.inline.hpp"
36#include "oops/oop.inline.hpp"
37#include "runtime/java.hpp"
38#include "utilities/macros.hpp"
39#if INCLUDE_ALL_GCS
40#include "gc/cms/parOopClosures.hpp"
41#endif
42
43TenuredGeneration::TenuredGeneration(ReservedSpace rs,
44                                     size_t initial_byte_size,
45                                     GenRemSet* remset) :
46  CardGeneration(rs, initial_byte_size, remset)
47{
48  HeapWord* bottom = (HeapWord*) _virtual_space.low();
49  HeapWord* end    = (HeapWord*) _virtual_space.high();
50  _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
51  _the_space->reset_saved_mark();
52  _shrink_factor = 0;
53  _capacity_at_prologue = 0;
54
55  _gc_stats = new GCStats();
56
57  // initialize performance counters
58
59  const char* gen_name = "old";
60  GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
61
62  // Generation Counters -- generation 1, 1 subspace
63  _gen_counters = new GenerationCounters(gen_name, 1, 1,
64      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
65
66  _gc_counters = new CollectorCounters("MSC", 1);
67
68  _space_counters = new CSpaceCounters(gen_name, 0,
69                                       _virtual_space.reserved_size(),
70                                       _the_space, _gen_counters);
71}
72
73void TenuredGeneration::gc_prologue(bool full) {
74  _capacity_at_prologue = capacity();
75  _used_at_prologue = used();
76}
77
78bool TenuredGeneration::should_collect(bool  full,
79                                       size_t size,
80                                       bool   is_tlab) {
81  // This should be one big conditional or (||), but I want to be able to tell
82  // why it returns what it returns (without re-evaluating the conditionals
83  // in case they aren't idempotent), so I'm doing it this way.
84  // DeMorgan says it's okay.
85  bool result = false;
86  if (!result && full) {
87    result = true;
88    if (PrintGC && Verbose) {
89      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
90                    " full");
91    }
92  }
93  if (!result && should_allocate(size, is_tlab)) {
94    result = true;
95    if (PrintGC && Verbose) {
96      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
97                    " should_allocate(" SIZE_FORMAT ")",
98                    size);
99    }
100  }
101  // If we don't have very much free space.
102  // XXX: 10000 should be a percentage of the capacity!!!
103  if (!result && free() < 10000) {
104    result = true;
105    if (PrintGC && Verbose) {
106      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
107                    " free(): " SIZE_FORMAT,
108                    free());
109    }
110  }
111  // If we had to expand to accommodate promotions from younger generations
112  if (!result && _capacity_at_prologue < capacity()) {
113    result = true;
114    if (PrintGC && Verbose) {
115      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
116                    "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
117                    _capacity_at_prologue, capacity());
118    }
119  }
120  return result;
121}
122
123void TenuredGeneration::compute_new_size() {
124  assert_locked_or_safepoint(Heap_lock);
125
126  // Compute some numbers about the state of the heap.
127  const size_t used_after_gc = used();
128  const size_t capacity_after_gc = capacity();
129
130  CardGeneration::compute_new_size();
131
132  assert(used() == used_after_gc && used_after_gc <= capacity(),
133         err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
134         " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
135}
136
137void TenuredGeneration::update_gc_stats(Generation* current_generation,
138                                        bool full) {
139  // If the young generation has been collected, gather any statistics
140  // that are of interest at this point.
141  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
142  if (!full && current_is_young) {
143    // Calculate size of data promoted from the younger generations
144    // before doing the collection.
145    size_t used_before_gc = used();
146
147    // If the younger gen collections were skipped, then the
148    // number of promoted bytes will be 0 and adding it to the
149    // average will incorrectly lessen the average.  It is, however,
150    // also possible that no promotion was needed.
151    if (used_before_gc >= _used_at_prologue) {
152      size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
153      gc_stats()->avg_promoted()->sample(promoted_in_bytes);
154    }
155  }
156}
157
158void TenuredGeneration::update_counters() {
159  if (UsePerfData) {
160    _space_counters->update_all();
161    _gen_counters->update_all();
162  }
163}
164
165bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
166  size_t available = max_contiguous_available();
167  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
168  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
169  if (PrintGC && Verbose) {
170    gclog_or_tty->print_cr(
171      "Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
172      "max_promo(" SIZE_FORMAT ")",
173      res? "":" not", available, res? ">=":"<",
174      av_promo, max_promotion_in_bytes);
175  }
176  return res;
177}
178
179void TenuredGeneration::collect(bool   full,
180                                bool   clear_all_soft_refs,
181                                size_t size,
182                                bool   is_tlab) {
183  GenCollectedHeap* gch = GenCollectedHeap::heap();
184
185  // Temporarily expand the span of our ref processor, so
186  // refs discovery is over the entire heap, not just this generation
187  ReferenceProcessorSpanMutator
188    x(ref_processor(), gch->reserved_region());
189
190  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
191  gc_timer->register_gc_start();
192
193  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
194  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
195
196  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
197
198  gc_timer->register_gc_end();
199
200  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
201}
202
203HeapWord*
204TenuredGeneration::expand_and_allocate(size_t word_size,
205                                       bool is_tlab,
206                                       bool parallel) {
207  assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
208  if (parallel) {
209    MutexLocker x(ParGCRareEvent_lock);
210    HeapWord* result = NULL;
211    size_t byte_size = word_size * HeapWordSize;
212    while (true) {
213      expand(byte_size, _min_heap_delta_bytes);
214      if (GCExpandToAllocateDelayMillis > 0) {
215        os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
216      }
217      result = _the_space->par_allocate(word_size);
218      if ( result != NULL) {
219        return result;
220      } else {
221        // If there's not enough expansion space available, give up.
222        if (_virtual_space.uncommitted_size() < byte_size) {
223          return NULL;
224        }
225        // else try again
226      }
227    }
228  } else {
229    expand(word_size*HeapWordSize, _min_heap_delta_bytes);
230    return _the_space->allocate(word_size);
231  }
232}
233
234bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
235  GCMutexLocker x(ExpandHeap_lock);
236  return CardGeneration::expand(bytes, expand_bytes);
237}
238
239size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
240  return _the_space->free();
241}
242
243size_t TenuredGeneration::contiguous_available() const {
244  return _the_space->free() + _virtual_space.uncommitted_size();
245}
246
247void TenuredGeneration::assert_correct_size_change_locking() {
248  assert_locked_or_safepoint(ExpandHeap_lock);
249}
250
251// Currently nothing to do.
252void TenuredGeneration::prepare_for_verify() {}
253
254void TenuredGeneration::object_iterate(ObjectClosure* blk) {
255  _the_space->object_iterate(blk);
256}
257
258void TenuredGeneration::save_marks() {
259  _the_space->set_saved_mark();
260}
261
262void TenuredGeneration::reset_saved_marks() {
263  _the_space->reset_saved_mark();
264}
265
266bool TenuredGeneration::no_allocs_since_save_marks() {
267  return _the_space->saved_mark_at_top();
268}
269
270#define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)     \
271                                                                                \
272void TenuredGeneration::                                                        \
273oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
274  blk->set_generation(this);                                                    \
275  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
276  blk->reset_generation();                                                      \
277  save_marks();                                                                 \
278}
279
280ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
281
282#undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
283
284void TenuredGeneration::gc_epilogue(bool full) {
285  // update the generation and space performance counters
286  update_counters();
287  if (ZapUnusedHeapArea) {
288    _the_space->check_mangled_unused_area_complete();
289  }
290}
291
292void TenuredGeneration::record_spaces_top() {
293  assert(ZapUnusedHeapArea, "Not mangling unused space");
294  _the_space->set_top_for_allocations();
295}
296
297void TenuredGeneration::verify() {
298  _the_space->verify();
299}
300
301void TenuredGeneration::print_on(outputStream* st)  const {
302  Generation::print_on(st);
303  st->print("   the");
304  _the_space->print_on(st);
305}
306