parallelScavengeHeap.cpp revision 9727:f944761a3ce3
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/parallel/adjoiningGenerations.hpp"
27#include "gc/parallel/adjoiningVirtualSpaces.hpp"
28#include "gc/parallel/cardTableExtension.hpp"
29#include "gc/parallel/gcTaskManager.hpp"
30#include "gc/parallel/generationSizer.hpp"
31#include "gc/parallel/parallelScavengeHeap.inline.hpp"
32#include "gc/parallel/psAdaptiveSizePolicy.hpp"
33#include "gc/parallel/psMarkSweep.hpp"
34#include "gc/parallel/psParallelCompact.hpp"
35#include "gc/parallel/psPromotionManager.hpp"
36#include "gc/parallel/psScavenge.hpp"
37#include "gc/parallel/vmPSOperations.hpp"
38#include "gc/shared/gcHeapSummary.hpp"
39#include "gc/shared/gcLocker.inline.hpp"
40#include "gc/shared/gcWhen.hpp"
41#include "logging/log.hpp"
42#include "oops/oop.inline.hpp"
43#include "runtime/handles.inline.hpp"
44#include "runtime/java.hpp"
45#include "runtime/vmThread.hpp"
46#include "services/memTracker.hpp"
47#include "utilities/vmError.hpp"
48
49PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
50PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
51PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
52PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
53GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
54
55jint ParallelScavengeHeap::initialize() {
56  CollectedHeap::pre_initialize();
57
58  const size_t heap_size = _collector_policy->max_heap_byte_size();
59
60  ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
61
62  os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
63                       heap_size, generation_alignment(),
64                       heap_rs.base(),
65                       heap_rs.size());
66
67  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
68
69  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
70  barrier_set->initialize();
71  set_barrier_set(barrier_set);
72
73  // Make up the generations
74  // Calculate the maximum size that a generation can grow.  This
75  // includes growth into the other generation.  Note that the
76  // parameter _max_gen_size is kept as the maximum
77  // size of the generation as the boundaries currently stand.
78  // _max_gen_size is still used as that value.
79  double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
80  double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
81
82  _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
83
84  _old_gen = _gens->old_gen();
85  _young_gen = _gens->young_gen();
86
87  const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
88  const size_t old_capacity = _old_gen->capacity_in_bytes();
89  const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
90  _size_policy =
91    new PSAdaptiveSizePolicy(eden_capacity,
92                             initial_promo_size,
93                             young_gen()->to_space()->capacity_in_bytes(),
94                             _collector_policy->gen_alignment(),
95                             max_gc_pause_sec,
96                             max_gc_minor_pause_sec,
97                             GCTimeRatio
98                             );
99
100  assert(!UseAdaptiveGCBoundary ||
101    (old_gen()->virtual_space()->high_boundary() ==
102     young_gen()->virtual_space()->low_boundary()),
103    "Boundaries must meet");
104  // initialize the policy counters - 2 collectors, 3 generations
105  _gc_policy_counters =
106    new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
107
108  // Set up the GCTaskManager
109  _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
110
111  if (UseParallelOldGC && !PSParallelCompact::initialize()) {
112    return JNI_ENOMEM;
113  }
114
115  return JNI_OK;
116}
117
118void ParallelScavengeHeap::post_initialize() {
119  // Need to init the tenuring threshold
120  PSScavenge::initialize();
121  if (UseParallelOldGC) {
122    PSParallelCompact::post_initialize();
123  } else {
124    PSMarkSweep::initialize();
125  }
126  PSPromotionManager::initialize();
127}
128
129void ParallelScavengeHeap::update_counters() {
130  young_gen()->update_counters();
131  old_gen()->update_counters();
132  MetaspaceCounters::update_performance_counters();
133  CompressedClassSpaceCounters::update_performance_counters();
134}
135
136size_t ParallelScavengeHeap::capacity() const {
137  size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
138  return value;
139}
140
141size_t ParallelScavengeHeap::used() const {
142  size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
143  return value;
144}
145
146bool ParallelScavengeHeap::is_maximal_no_gc() const {
147  return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
148}
149
150
151size_t ParallelScavengeHeap::max_capacity() const {
152  size_t estimated = reserved_region().byte_size();
153  if (UseAdaptiveSizePolicy) {
154    estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
155  } else {
156    estimated -= young_gen()->to_space()->capacity_in_bytes();
157  }
158  return MAX2(estimated, capacity());
159}
160
161bool ParallelScavengeHeap::is_in(const void* p) const {
162  return young_gen()->is_in(p) || old_gen()->is_in(p);
163}
164
165bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
166  return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
167}
168
169bool ParallelScavengeHeap::is_scavengable(const void* addr) {
170  return is_in_young((oop)addr);
171}
172
173// There are two levels of allocation policy here.
174//
175// When an allocation request fails, the requesting thread must invoke a VM
176// operation, transfer control to the VM thread, and await the results of a
177// garbage collection. That is quite expensive, and we should avoid doing it
178// multiple times if possible.
179//
180// To accomplish this, we have a basic allocation policy, and also a
181// failed allocation policy.
182//
183// The basic allocation policy controls how you allocate memory without
184// attempting garbage collection. It is okay to grab locks and
185// expand the heap, if that can be done without coming to a safepoint.
186// It is likely that the basic allocation policy will not be very
187// aggressive.
188//
189// The failed allocation policy is invoked from the VM thread after
190// the basic allocation policy is unable to satisfy a mem_allocate
191// request. This policy needs to cover the entire range of collection,
192// heap expansion, and out-of-memory conditions. It should make every
193// attempt to allocate the requested memory.
194
195// Basic allocation policy. Should never be called at a safepoint, or
196// from the VM thread.
197//
198// This method must handle cases where many mem_allocate requests fail
199// simultaneously. When that happens, only one VM operation will succeed,
200// and the rest will not be executed. For that reason, this method loops
201// during failed allocation attempts. If the java heap becomes exhausted,
202// we rely on the size_policy object to force a bail out.
203HeapWord* ParallelScavengeHeap::mem_allocate(
204                                     size_t size,
205                                     bool* gc_overhead_limit_was_exceeded) {
206  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
207  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
208  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
209
210  // In general gc_overhead_limit_was_exceeded should be false so
211  // set it so here and reset it to true only if the gc time
212  // limit is being exceeded as checked below.
213  *gc_overhead_limit_was_exceeded = false;
214
215  HeapWord* result = young_gen()->allocate(size);
216
217  uint loop_count = 0;
218  uint gc_count = 0;
219  uint gclocker_stalled_count = 0;
220
221  while (result == NULL) {
222    // We don't want to have multiple collections for a single filled generation.
223    // To prevent this, each thread tracks the total_collections() value, and if
224    // the count has changed, does not do a new collection.
225    //
226    // The collection count must be read only while holding the heap lock. VM
227    // operations also hold the heap lock during collections. There is a lock
228    // contention case where thread A blocks waiting on the Heap_lock, while
229    // thread B is holding it doing a collection. When thread A gets the lock,
230    // the collection count has already changed. To prevent duplicate collections,
231    // The policy MUST attempt allocations during the same period it reads the
232    // total_collections() value!
233    {
234      MutexLocker ml(Heap_lock);
235      gc_count = total_collections();
236
237      result = young_gen()->allocate(size);
238      if (result != NULL) {
239        return result;
240      }
241
242      // If certain conditions hold, try allocating from the old gen.
243      result = mem_allocate_old_gen(size);
244      if (result != NULL) {
245        return result;
246      }
247
248      if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
249        return NULL;
250      }
251
252      // Failed to allocate without a gc.
253      if (GC_locker::is_active_and_needs_gc()) {
254        // If this thread is not in a jni critical section, we stall
255        // the requestor until the critical section has cleared and
256        // GC allowed. When the critical section clears, a GC is
257        // initiated by the last thread exiting the critical section; so
258        // we retry the allocation sequence from the beginning of the loop,
259        // rather than causing more, now probably unnecessary, GC attempts.
260        JavaThread* jthr = JavaThread::current();
261        if (!jthr->in_critical()) {
262          MutexUnlocker mul(Heap_lock);
263          GC_locker::stall_until_clear();
264          gclocker_stalled_count += 1;
265          continue;
266        } else {
267          if (CheckJNICalls) {
268            fatal("Possible deadlock due to allocating while"
269                  " in jni critical section");
270          }
271          return NULL;
272        }
273      }
274    }
275
276    if (result == NULL) {
277      // Generate a VM operation
278      VM_ParallelGCFailedAllocation op(size, gc_count);
279      VMThread::execute(&op);
280
281      // Did the VM operation execute? If so, return the result directly.
282      // This prevents us from looping until time out on requests that can
283      // not be satisfied.
284      if (op.prologue_succeeded()) {
285        assert(is_in_or_null(op.result()), "result not in heap");
286
287        // If GC was locked out during VM operation then retry allocation
288        // and/or stall as necessary.
289        if (op.gc_locked()) {
290          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
291          continue;  // retry and/or stall as necessary
292        }
293
294        // Exit the loop if the gc time limit has been exceeded.
295        // The allocation must have failed above ("result" guarding
296        // this path is NULL) and the most recent collection has exceeded the
297        // gc overhead limit (although enough may have been collected to
298        // satisfy the allocation).  Exit the loop so that an out-of-memory
299        // will be thrown (return a NULL ignoring the contents of
300        // op.result()),
301        // but clear gc_overhead_limit_exceeded so that the next collection
302        // starts with a clean slate (i.e., forgets about previous overhead
303        // excesses).  Fill op.result() with a filler object so that the
304        // heap remains parsable.
305        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
306        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
307
308        if (limit_exceeded && softrefs_clear) {
309          *gc_overhead_limit_was_exceeded = true;
310          size_policy()->set_gc_overhead_limit_exceeded(false);
311          log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
312          if (op.result() != NULL) {
313            CollectedHeap::fill_with_object(op.result(), size);
314          }
315          return NULL;
316        }
317
318        return op.result();
319      }
320    }
321
322    // The policy object will prevent us from looping forever. If the
323    // time spent in gc crosses a threshold, we will bail out.
324    loop_count++;
325    if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
326        (loop_count % QueuedAllocationWarningCount == 0)) {
327      warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
328              " size=" SIZE_FORMAT, loop_count, size);
329    }
330  }
331
332  return result;
333}
334
335// A "death march" is a series of ultra-slow allocations in which a full gc is
336// done before each allocation, and after the full gc the allocation still
337// cannot be satisfied from the young gen.  This routine detects that condition;
338// it should be called after a full gc has been done and the allocation
339// attempted from the young gen. The parameter 'addr' should be the result of
340// that young gen allocation attempt.
341void
342ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
343  if (addr != NULL) {
344    _death_march_count = 0;  // death march has ended
345  } else if (_death_march_count == 0) {
346    if (should_alloc_in_eden(size)) {
347      _death_march_count = 1;    // death march has started
348    }
349  }
350}
351
352HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
353  if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
354    // Size is too big for eden, or gc is locked out.
355    return old_gen()->allocate(size);
356  }
357
358  // If a "death march" is in progress, allocate from the old gen a limited
359  // number of times before doing a GC.
360  if (_death_march_count > 0) {
361    if (_death_march_count < 64) {
362      ++_death_march_count;
363      return old_gen()->allocate(size);
364    } else {
365      _death_march_count = 0;
366    }
367  }
368  return NULL;
369}
370
371void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
372  if (UseParallelOldGC) {
373    // The do_full_collection() parameter clear_all_soft_refs
374    // is interpreted here as maximum_compaction which will
375    // cause SoftRefs to be cleared.
376    bool maximum_compaction = clear_all_soft_refs;
377    PSParallelCompact::invoke(maximum_compaction);
378  } else {
379    PSMarkSweep::invoke(clear_all_soft_refs);
380  }
381}
382
383// Failed allocation policy. Must be called from the VM thread, and
384// only at a safepoint! Note that this method has policy for allocation
385// flow, and NOT collection policy. So we do not check for gc collection
386// time over limit here, that is the responsibility of the heap specific
387// collection methods. This method decides where to attempt allocations,
388// and when to attempt collections, but no collection specific policy.
389HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
390  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
391  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
392  assert(!is_gc_active(), "not reentrant");
393  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
394
395  // We assume that allocation in eden will fail unless we collect.
396
397  // First level allocation failure, scavenge and allocate in young gen.
398  GCCauseSetter gccs(this, GCCause::_allocation_failure);
399  const bool invoked_full_gc = PSScavenge::invoke();
400  HeapWord* result = young_gen()->allocate(size);
401
402  // Second level allocation failure.
403  //   Mark sweep and allocate in young generation.
404  if (result == NULL && !invoked_full_gc) {
405    do_full_collection(false);
406    result = young_gen()->allocate(size);
407  }
408
409  death_march_check(result, size);
410
411  // Third level allocation failure.
412  //   After mark sweep and young generation allocation failure,
413  //   allocate in old generation.
414  if (result == NULL) {
415    result = old_gen()->allocate(size);
416  }
417
418  // Fourth level allocation failure. We're running out of memory.
419  //   More complete mark sweep and allocate in young generation.
420  if (result == NULL) {
421    do_full_collection(true);
422    result = young_gen()->allocate(size);
423  }
424
425  // Fifth level allocation failure.
426  //   After more complete mark sweep, allocate in old generation.
427  if (result == NULL) {
428    result = old_gen()->allocate(size);
429  }
430
431  return result;
432}
433
434void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
435  CollectedHeap::ensure_parsability(retire_tlabs);
436  young_gen()->eden_space()->ensure_parsability();
437}
438
439size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
440  return young_gen()->eden_space()->tlab_capacity(thr);
441}
442
443size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
444  return young_gen()->eden_space()->tlab_used(thr);
445}
446
447size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
448  return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
449}
450
451HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
452  return young_gen()->allocate(size);
453}
454
455void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
456  CollectedHeap::accumulate_statistics_all_tlabs();
457}
458
459void ParallelScavengeHeap::resize_all_tlabs() {
460  CollectedHeap::resize_all_tlabs();
461}
462
463bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
464  // We don't need barriers for stores to objects in the
465  // young gen and, a fortiori, for initializing stores to
466  // objects therein.
467  return is_in_young(new_obj);
468}
469
470// This method is used by System.gc() and JVMTI.
471void ParallelScavengeHeap::collect(GCCause::Cause cause) {
472  assert(!Heap_lock->owned_by_self(),
473    "this thread should not own the Heap_lock");
474
475  uint gc_count      = 0;
476  uint full_gc_count = 0;
477  {
478    MutexLocker ml(Heap_lock);
479    // This value is guarded by the Heap_lock
480    gc_count      = total_collections();
481    full_gc_count = total_full_collections();
482  }
483
484  VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
485  VMThread::execute(&op);
486}
487
488void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
489  young_gen()->object_iterate(cl);
490  old_gen()->object_iterate(cl);
491}
492
493
494HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
495  if (young_gen()->is_in_reserved(addr)) {
496    assert(young_gen()->is_in(addr),
497           "addr should be in allocated part of young gen");
498    // called from os::print_location by find or VMError
499    if (Debugging || VMError::fatal_error_in_progress())  return NULL;
500    Unimplemented();
501  } else if (old_gen()->is_in_reserved(addr)) {
502    assert(old_gen()->is_in(addr),
503           "addr should be in allocated part of old gen");
504    return old_gen()->start_array()->object_start((HeapWord*)addr);
505  }
506  return 0;
507}
508
509size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
510  return oop(addr)->size();
511}
512
513bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
514  return block_start(addr) == addr;
515}
516
517jlong ParallelScavengeHeap::millis_since_last_gc() {
518  return UseParallelOldGC ?
519    PSParallelCompact::millis_since_last_gc() :
520    PSMarkSweep::millis_since_last_gc();
521}
522
523void ParallelScavengeHeap::prepare_for_verify() {
524  ensure_parsability(false);  // no need to retire TLABs for verification
525}
526
527PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
528  PSOldGen* old = old_gen();
529  HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
530  VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
531  SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
532
533  PSYoungGen* young = young_gen();
534  VirtualSpaceSummary young_summary(young->reserved().start(),
535    (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
536
537  MutableSpace* eden = young_gen()->eden_space();
538  SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
539
540  MutableSpace* from = young_gen()->from_space();
541  SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
542
543  MutableSpace* to = young_gen()->to_space();
544  SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
545
546  VirtualSpaceSummary heap_summary = create_heap_space_summary();
547  return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
548}
549
550void ParallelScavengeHeap::print_on(outputStream* st) const {
551  young_gen()->print_on(st);
552  old_gen()->print_on(st);
553  MetaspaceAux::print_on(st);
554}
555
556void ParallelScavengeHeap::print_on_error(outputStream* st) const {
557  this->CollectedHeap::print_on_error(st);
558
559  if (UseParallelOldGC) {
560    st->cr();
561    PSParallelCompact::print_on_error(st);
562  }
563}
564
565void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
566  PSScavenge::gc_task_manager()->threads_do(tc);
567}
568
569void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
570  PSScavenge::gc_task_manager()->print_threads_on(st);
571}
572
573void ParallelScavengeHeap::print_tracing_info() const {
574  if (TraceYoungGenTime) {
575    double time = PSScavenge::accumulated_time()->seconds();
576    tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
577  }
578  if (TraceOldGenTime) {
579    double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
580    tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
581  }
582}
583
584
585void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
586  // Why do we need the total_collections()-filter below?
587  if (total_collections() > 0) {
588    log_debug(gc, verify)("Tenured");
589    old_gen()->verify();
590
591    log_debug(gc, verify)("Eden");
592    young_gen()->verify();
593  }
594}
595
596void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
597  const PSHeapSummary& heap_summary = create_ps_heap_summary();
598  gc_tracer->report_gc_heap_summary(when, heap_summary);
599
600  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
601  gc_tracer->report_metaspace_summary(when, metaspace_summary);
602}
603
604ParallelScavengeHeap* ParallelScavengeHeap::heap() {
605  CollectedHeap* heap = Universe::heap();
606  assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
607  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
608  return (ParallelScavengeHeap*)heap;
609}
610
611// Before delegating the resize to the young generation,
612// the reserved space for the young and old generations
613// may be changed to accommodate the desired resize.
614void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
615    size_t survivor_size) {
616  if (UseAdaptiveGCBoundary) {
617    if (size_policy()->bytes_absorbed_from_eden() != 0) {
618      size_policy()->reset_bytes_absorbed_from_eden();
619      return;  // The generation changed size already.
620    }
621    gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
622  }
623
624  // Delegate the resize to the generation.
625  _young_gen->resize(eden_size, survivor_size);
626}
627
628// Before delegating the resize to the old generation,
629// the reserved space for the young and old generations
630// may be changed to accommodate the desired resize.
631void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
632  if (UseAdaptiveGCBoundary) {
633    if (size_policy()->bytes_absorbed_from_eden() != 0) {
634      size_policy()->reset_bytes_absorbed_from_eden();
635      return;  // The generation changed size already.
636    }
637    gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
638  }
639
640  // Delegate the resize to the generation.
641  _old_gen->resize(desired_free_space);
642}
643
644ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
645  // nothing particular
646}
647
648ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
649  // nothing particular
650}
651
652#ifndef PRODUCT
653void ParallelScavengeHeap::record_gen_tops_before_GC() {
654  if (ZapUnusedHeapArea) {
655    young_gen()->record_spaces_top();
656    old_gen()->record_spaces_top();
657  }
658}
659
660void ParallelScavengeHeap::gen_mangle_unused_area() {
661  if (ZapUnusedHeapArea) {
662    young_gen()->eden_space()->mangle_unused_area();
663    young_gen()->to_space()->mangle_unused_area();
664    young_gen()->from_space()->mangle_unused_area();
665    old_gen()->object_space()->mangle_unused_area();
666  }
667}
668#endif
669