psOldGen.cpp revision 13249:a2753984d2c1
1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/parallel/objectStartArray.inline.hpp"
27#include "gc/parallel/parallelScavengeHeap.hpp"
28#include "gc/parallel/psAdaptiveSizePolicy.hpp"
29#include "gc/parallel/psMarkSweepDecorator.hpp"
30#include "gc/parallel/psOldGen.hpp"
31#include "gc/shared/cardTableModRefBS.hpp"
32#include "gc/shared/gcLocker.inline.hpp"
33#include "gc/shared/spaceDecorator.hpp"
34#include "logging/log.hpp"
35#include "oops/oop.inline.hpp"
36#include "runtime/java.hpp"
37#include "utilities/align.hpp"
38
39inline const char* PSOldGen::select_name() {
40  return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
41}
42
43PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
44                   size_t initial_size, size_t min_size, size_t max_size,
45                   const char* perf_data_name, int level):
46  _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
47  _max_gen_size(max_size)
48{
49  initialize(rs, alignment, perf_data_name, level);
50}
51
52PSOldGen::PSOldGen(size_t initial_size,
53                   size_t min_size, size_t max_size,
54                   const char* perf_data_name, int level):
55  _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
56  _max_gen_size(max_size)
57{}
58
59void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
60                          const char* perf_data_name, int level) {
61  initialize_virtual_space(rs, alignment);
62  initialize_work(perf_data_name, level);
63
64  // The old gen can grow to gen_size_limit().  _reserve reflects only
65  // the current maximum that can be committed.
66  assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
67
68  initialize_performance_counters(perf_data_name, level);
69}
70
71void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
72
73  _virtual_space = new PSVirtualSpace(rs, alignment);
74  if (!_virtual_space->expand_by(_init_gen_size)) {
75    vm_exit_during_initialization("Could not reserve enough space for "
76                                  "object heap");
77  }
78}
79
80void PSOldGen::initialize_work(const char* perf_data_name, int level) {
81  //
82  // Basic memory initialization
83  //
84
85  MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
86    heap_word_size(_max_gen_size));
87  assert(limit_reserved.byte_size() == _max_gen_size,
88    "word vs bytes confusion");
89  //
90  // Object start stuff
91  //
92
93  start_array()->initialize(limit_reserved);
94
95  _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
96                        (HeapWord*)virtual_space()->high_boundary());
97
98  //
99  // Card table stuff
100  //
101
102  MemRegion cmr((HeapWord*)virtual_space()->low(),
103                (HeapWord*)virtual_space()->high());
104  if (ZapUnusedHeapArea) {
105    // Mangle newly committed space immediately rather than
106    // waiting for the initialization of the space even though
107    // mangling is related to spaces.  Doing it here eliminates
108    // the need to carry along information that a complete mangling
109    // (bottom to end) needs to be done.
110    SpaceMangler::mangle_region(cmr);
111  }
112
113  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
114  BarrierSet* bs = heap->barrier_set();
115
116  bs->resize_covered_region(cmr);
117
118  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
119
120  // Verify that the start and end of this generation is the start of a card.
121  // If this wasn't true, a single card could span more than one generation,
122  // which would cause problems when we commit/uncommit memory, and when we
123  // clear and dirty cards.
124  guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
125  if (_reserved.end() != heap->reserved_region().end()) {
126    // Don't check at the very end of the heap as we'll assert that we're probing off
127    // the end if we try.
128    guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
129  }
130
131  //
132  // ObjectSpace stuff
133  //
134
135  _object_space = new MutableSpace(virtual_space()->alignment());
136
137  if (_object_space == NULL)
138    vm_exit_during_initialization("Could not allocate an old gen space");
139
140  object_space()->initialize(cmr,
141                             SpaceDecorator::Clear,
142                             SpaceDecorator::Mangle);
143
144  _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
145
146  if (_object_mark_sweep == NULL)
147    vm_exit_during_initialization("Could not complete allocation of old generation");
148
149  // Update the start_array
150  start_array()->set_covered_region(cmr);
151}
152
153void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
154  // Generation Counters, generation 'level', 1 subspace
155  _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
156                                           _max_gen_size, virtual_space());
157  _space_counters = new SpaceCounters(perf_data_name, 0,
158                                      virtual_space()->reserved_size(),
159                                      _object_space, _gen_counters);
160}
161
162// Assume that the generation has been allocated if its
163// reserved size is not 0.
164bool  PSOldGen::is_allocated() {
165  return virtual_space()->reserved_size() != 0;
166}
167
168void PSOldGen::precompact() {
169  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
170
171  // Reset start array first.
172  start_array()->reset();
173
174  object_mark_sweep()->precompact();
175
176  // Now compact the young gen
177  heap->young_gen()->precompact();
178}
179
180void PSOldGen::adjust_pointers() {
181  object_mark_sweep()->adjust_pointers();
182}
183
184void PSOldGen::compact() {
185  object_mark_sweep()->compact(ZapUnusedHeapArea);
186}
187
188size_t PSOldGen::contiguous_available() const {
189  return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
190}
191
192// Allocation. We report all successful allocations to the size policy
193// Note that the perm gen does not use this method, and should not!
194HeapWord* PSOldGen::allocate(size_t word_size) {
195  assert_locked_or_safepoint(Heap_lock);
196  HeapWord* res = allocate_noexpand(word_size);
197
198  if (res == NULL) {
199    res = expand_and_allocate(word_size);
200  }
201
202  // Allocations in the old generation need to be reported
203  if (res != NULL) {
204    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
205    heap->size_policy()->tenured_allocation(word_size * HeapWordSize);
206  }
207
208  return res;
209}
210
211HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
212  expand(word_size*HeapWordSize);
213  if (GCExpandToAllocateDelayMillis > 0) {
214    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
215  }
216  return allocate_noexpand(word_size);
217}
218
219HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
220  expand(word_size*HeapWordSize);
221  if (GCExpandToAllocateDelayMillis > 0) {
222    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
223  }
224  return cas_allocate_noexpand(word_size);
225}
226
227void PSOldGen::expand(size_t bytes) {
228  if (bytes == 0) {
229    return;
230  }
231  MutexLocker x(ExpandHeap_lock);
232  const size_t alignment = virtual_space()->alignment();
233  size_t aligned_bytes  = align_up(bytes, alignment);
234  size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
235
236  if (UseNUMA) {
237    // With NUMA we use round-robin page allocation for the old gen. Expand by at least
238    // providing a page per lgroup. Alignment is larger or equal to the page size.
239    aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
240  }
241  if (aligned_bytes == 0){
242    // The alignment caused the number of bytes to wrap.  An expand_by(0) will
243    // return true with the implication that and expansion was done when it
244    // was not.  A call to expand implies a best effort to expand by "bytes"
245    // but not a guarantee.  Align down to give a best effort.  This is likely
246    // the most that the generation can expand since it has some capacity to
247    // start with.
248    aligned_bytes = align_down(bytes, alignment);
249  }
250
251  bool success = false;
252  if (aligned_expand_bytes > aligned_bytes) {
253    success = expand_by(aligned_expand_bytes);
254  }
255  if (!success) {
256    success = expand_by(aligned_bytes);
257  }
258  if (!success) {
259    success = expand_to_reserved();
260  }
261
262  if (success && GCLocker::is_active_and_needs_gc()) {
263    log_debug(gc)("Garbage collection disabled, expanded heap instead");
264  }
265}
266
267bool PSOldGen::expand_by(size_t bytes) {
268  assert_lock_strong(ExpandHeap_lock);
269  assert_locked_or_safepoint(Heap_lock);
270  if (bytes == 0) {
271    return true;  // That's what virtual_space()->expand_by(0) would return
272  }
273  bool result = virtual_space()->expand_by(bytes);
274  if (result) {
275    if (ZapUnusedHeapArea) {
276      // We need to mangle the newly expanded area. The memregion spans
277      // end -> new_end, we assume that top -> end is already mangled.
278      // Do the mangling before post_resize() is called because
279      // the space is available for allocation after post_resize();
280      HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
281      assert(object_space()->end() < virtual_space_high,
282        "Should be true before post_resize()");
283      MemRegion mangle_region(object_space()->end(), virtual_space_high);
284      // Note that the object space has not yet been updated to
285      // coincide with the new underlying virtual space.
286      SpaceMangler::mangle_region(mangle_region);
287    }
288    post_resize();
289    if (UsePerfData) {
290      _space_counters->update_capacity();
291      _gen_counters->update_all();
292    }
293  }
294
295  if (result) {
296    size_t new_mem_size = virtual_space()->committed_size();
297    size_t old_mem_size = new_mem_size - bytes;
298    log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
299                  name(), old_mem_size/K, bytes/K, new_mem_size/K);
300  }
301
302  return result;
303}
304
305bool PSOldGen::expand_to_reserved() {
306  assert_lock_strong(ExpandHeap_lock);
307  assert_locked_or_safepoint(Heap_lock);
308
309  bool result = true;
310  const size_t remaining_bytes = virtual_space()->uncommitted_size();
311  if (remaining_bytes > 0) {
312    result = expand_by(remaining_bytes);
313    DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));
314  }
315  return result;
316}
317
318void PSOldGen::shrink(size_t bytes) {
319  assert_lock_strong(ExpandHeap_lock);
320  assert_locked_or_safepoint(Heap_lock);
321
322  size_t size = align_down(bytes, virtual_space()->alignment());
323  if (size > 0) {
324    assert_lock_strong(ExpandHeap_lock);
325    virtual_space()->shrink_by(bytes);
326    post_resize();
327
328    size_t new_mem_size = virtual_space()->committed_size();
329    size_t old_mem_size = new_mem_size + bytes;
330    log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
331                  name(), old_mem_size/K, bytes/K, new_mem_size/K);
332  }
333}
334
335void PSOldGen::resize(size_t desired_free_space) {
336  const size_t alignment = virtual_space()->alignment();
337  const size_t size_before = virtual_space()->committed_size();
338  size_t new_size = used_in_bytes() + desired_free_space;
339  if (new_size < used_in_bytes()) {
340    // Overflowed the addition.
341    new_size = gen_size_limit();
342  }
343  // Adjust according to our min and max
344  new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
345
346  assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
347  new_size = align_up(new_size, alignment);
348
349  const size_t current_size = capacity_in_bytes();
350
351  log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "
352    "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
353    " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
354    " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
355    desired_free_space, used_in_bytes(), new_size, current_size,
356    gen_size_limit(), min_gen_size());
357
358  if (new_size == current_size) {
359    // No change requested
360    return;
361  }
362  if (new_size > current_size) {
363    size_t change_bytes = new_size - current_size;
364    expand(change_bytes);
365  } else {
366    size_t change_bytes = current_size - new_size;
367    // shrink doesn't grab this lock, expand does. Is that right?
368    MutexLocker x(ExpandHeap_lock);
369    shrink(change_bytes);
370  }
371
372  log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
373                      ParallelScavengeHeap::heap()->total_collections(),
374                      size_before,
375                      virtual_space()->committed_size());
376}
377
378// NOTE! We need to be careful about resizing. During a GC, multiple
379// allocators may be active during heap expansion. If we allow the
380// heap resizing to become visible before we have correctly resized
381// all heap related data structures, we may cause program failures.
382void PSOldGen::post_resize() {
383  // First construct a memregion representing the new size
384  MemRegion new_memregion((HeapWord*)virtual_space()->low(),
385    (HeapWord*)virtual_space()->high());
386  size_t new_word_size = new_memregion.word_size();
387
388  start_array()->set_covered_region(new_memregion);
389  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
390
391  // ALWAYS do this last!!
392  object_space()->initialize(new_memregion,
393                             SpaceDecorator::DontClear,
394                             SpaceDecorator::DontMangle);
395
396  assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
397    "Sanity");
398}
399
400size_t PSOldGen::gen_size_limit() {
401  return _max_gen_size;
402}
403
404void PSOldGen::reset_after_change() {
405  ShouldNotReachHere();
406  return;
407}
408
409size_t PSOldGen::available_for_expansion() {
410  ShouldNotReachHere();
411  return 0;
412}
413
414size_t PSOldGen::available_for_contraction() {
415  ShouldNotReachHere();
416  return 0;
417}
418
419void PSOldGen::print() const { print_on(tty);}
420void PSOldGen::print_on(outputStream* st) const {
421  st->print(" %-15s", name());
422  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
423              capacity_in_bytes()/K, used_in_bytes()/K);
424  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
425                p2i(virtual_space()->low_boundary()),
426                p2i(virtual_space()->high()),
427                p2i(virtual_space()->high_boundary()));
428
429  st->print("  object"); object_space()->print_on(st);
430}
431
432void PSOldGen::print_used_change(size_t prev_used) const {
433  log_info(gc, heap)("%s: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
434      name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K);
435}
436
437void PSOldGen::update_counters() {
438  if (UsePerfData) {
439    _space_counters->update_all();
440    _gen_counters->update_all();
441  }
442}
443
444#ifndef PRODUCT
445
446void PSOldGen::space_invariants() {
447  assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
448    "Space invariant");
449  assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
450    "Space invariant");
451  assert(virtual_space()->low_boundary() <= virtual_space()->low(),
452    "Space invariant");
453  assert(virtual_space()->high_boundary() >= virtual_space()->high(),
454    "Space invariant");
455  assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
456    "Space invariant");
457  assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
458    "Space invariant");
459  assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
460    "Space invariant");
461}
462#endif
463
464void PSOldGen::verify() {
465  object_space()->verify();
466}
467class VerifyObjectStartArrayClosure : public ObjectClosure {
468  PSOldGen* _old_gen;
469  ObjectStartArray* _start_array;
470
471 public:
472  VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
473    _old_gen(old_gen), _start_array(start_array) { }
474
475  virtual void do_object(oop obj) {
476    HeapWord* test_addr = (HeapWord*)obj + 1;
477    guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
478    guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
479  }
480};
481
482void PSOldGen::verify_object_start_array() {
483  VerifyObjectStartArrayClosure check( this, &_start_array );
484  object_iterate(&check);
485}
486
487#ifndef PRODUCT
488void PSOldGen::record_spaces_top() {
489  assert(ZapUnusedHeapArea, "Not mangling unused space");
490  object_space()->set_top_for_allocations();
491}
492#endif
493