1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/parallel/adjoiningGenerations.hpp"
27#include "gc/parallel/adjoiningVirtualSpaces.hpp"
28#include "gc/parallel/cardTableExtension.hpp"
29#include "gc/parallel/gcTaskManager.hpp"
30#include "gc/parallel/generationSizer.hpp"
31#include "gc/parallel/objectStartArray.inline.hpp"
32#include "gc/parallel/parallelScavengeHeap.inline.hpp"
33#include "gc/parallel/psAdaptiveSizePolicy.hpp"
34#include "gc/parallel/psMarkSweep.hpp"
35#include "gc/parallel/psParallelCompact.inline.hpp"
36#include "gc/parallel/psPromotionManager.hpp"
37#include "gc/parallel/psScavenge.hpp"
38#include "gc/parallel/vmPSOperations.hpp"
39#include "gc/shared/gcHeapSummary.hpp"
40#include "gc/shared/gcLocker.inline.hpp"
41#include "gc/shared/gcWhen.hpp"
42#include "logging/log.hpp"
43#include "oops/oop.inline.hpp"
44#include "runtime/handles.inline.hpp"
45#include "runtime/java.hpp"
46#include "runtime/vmThread.hpp"
47#include "services/memTracker.hpp"
48#include "utilities/vmError.hpp"
49
50PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
51PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
52PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
53PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
54GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
55
56jint ParallelScavengeHeap::initialize() {
57  CollectedHeap::pre_initialize();
58
59  const size_t heap_size = _collector_policy->max_heap_byte_size();
60
61  ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
62
63  os::trace_page_sizes("Heap",
64                       _collector_policy->min_heap_byte_size(),
65                       heap_size,
66                       generation_alignment(),
67                       heap_rs.base(),
68                       heap_rs.size());
69
70  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
71
72  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
73  barrier_set->initialize();
74  set_barrier_set(barrier_set);
75
76  // Make up the generations
77  // Calculate the maximum size that a generation can grow.  This
78  // includes growth into the other generation.  Note that the
79  // parameter _max_gen_size is kept as the maximum
80  // size of the generation as the boundaries currently stand.
81  // _max_gen_size is still used as that value.
82  double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
83  double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
84
85  _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
86
87  _old_gen = _gens->old_gen();
88  _young_gen = _gens->young_gen();
89
90  const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
91  const size_t old_capacity = _old_gen->capacity_in_bytes();
92  const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
93  _size_policy =
94    new PSAdaptiveSizePolicy(eden_capacity,
95                             initial_promo_size,
96                             young_gen()->to_space()->capacity_in_bytes(),
97                             _collector_policy->gen_alignment(),
98                             max_gc_pause_sec,
99                             max_gc_minor_pause_sec,
100                             GCTimeRatio
101                             );
102
103  assert(!UseAdaptiveGCBoundary ||
104    (old_gen()->virtual_space()->high_boundary() ==
105     young_gen()->virtual_space()->low_boundary()),
106    "Boundaries must meet");
107  // initialize the policy counters - 2 collectors, 3 generations
108  _gc_policy_counters =
109    new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
110
111  // Set up the GCTaskManager
112  _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
113
114  if (UseParallelOldGC && !PSParallelCompact::initialize()) {
115    return JNI_ENOMEM;
116  }
117
118  return JNI_OK;
119}
120
121void ParallelScavengeHeap::post_initialize() {
122  // Need to init the tenuring threshold
123  PSScavenge::initialize();
124  if (UseParallelOldGC) {
125    PSParallelCompact::post_initialize();
126  } else {
127    PSMarkSweep::initialize();
128  }
129  PSPromotionManager::initialize();
130}
131
132void ParallelScavengeHeap::update_counters() {
133  young_gen()->update_counters();
134  old_gen()->update_counters();
135  MetaspaceCounters::update_performance_counters();
136  CompressedClassSpaceCounters::update_performance_counters();
137}
138
139size_t ParallelScavengeHeap::capacity() const {
140  size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
141  return value;
142}
143
144size_t ParallelScavengeHeap::used() const {
145  size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
146  return value;
147}
148
149bool ParallelScavengeHeap::is_maximal_no_gc() const {
150  return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
151}
152
153
154size_t ParallelScavengeHeap::max_capacity() const {
155  size_t estimated = reserved_region().byte_size();
156  if (UseAdaptiveSizePolicy) {
157    estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
158  } else {
159    estimated -= young_gen()->to_space()->capacity_in_bytes();
160  }
161  return MAX2(estimated, capacity());
162}
163
164bool ParallelScavengeHeap::is_in(const void* p) const {
165  return young_gen()->is_in(p) || old_gen()->is_in(p);
166}
167
168bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
169  return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
170}
171
172bool ParallelScavengeHeap::is_scavengable(const void* addr) {
173  return is_in_young((oop)addr);
174}
175
176// There are two levels of allocation policy here.
177//
178// When an allocation request fails, the requesting thread must invoke a VM
179// operation, transfer control to the VM thread, and await the results of a
180// garbage collection. That is quite expensive, and we should avoid doing it
181// multiple times if possible.
182//
183// To accomplish this, we have a basic allocation policy, and also a
184// failed allocation policy.
185//
186// The basic allocation policy controls how you allocate memory without
187// attempting garbage collection. It is okay to grab locks and
188// expand the heap, if that can be done without coming to a safepoint.
189// It is likely that the basic allocation policy will not be very
190// aggressive.
191//
192// The failed allocation policy is invoked from the VM thread after
193// the basic allocation policy is unable to satisfy a mem_allocate
194// request. This policy needs to cover the entire range of collection,
195// heap expansion, and out-of-memory conditions. It should make every
196// attempt to allocate the requested memory.
197
198// Basic allocation policy. Should never be called at a safepoint, or
199// from the VM thread.
200//
201// This method must handle cases where many mem_allocate requests fail
202// simultaneously. When that happens, only one VM operation will succeed,
203// and the rest will not be executed. For that reason, this method loops
204// during failed allocation attempts. If the java heap becomes exhausted,
205// we rely on the size_policy object to force a bail out.
206HeapWord* ParallelScavengeHeap::mem_allocate(
207                                     size_t size,
208                                     bool* gc_overhead_limit_was_exceeded) {
209  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
210  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
211  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
212
213  // In general gc_overhead_limit_was_exceeded should be false so
214  // set it so here and reset it to true only if the gc time
215  // limit is being exceeded as checked below.
216  *gc_overhead_limit_was_exceeded = false;
217
218  HeapWord* result = young_gen()->allocate(size);
219
220  uint loop_count = 0;
221  uint gc_count = 0;
222  uint gclocker_stalled_count = 0;
223
224  while (result == NULL) {
225    // We don't want to have multiple collections for a single filled generation.
226    // To prevent this, each thread tracks the total_collections() value, and if
227    // the count has changed, does not do a new collection.
228    //
229    // The collection count must be read only while holding the heap lock. VM
230    // operations also hold the heap lock during collections. There is a lock
231    // contention case where thread A blocks waiting on the Heap_lock, while
232    // thread B is holding it doing a collection. When thread A gets the lock,
233    // the collection count has already changed. To prevent duplicate collections,
234    // The policy MUST attempt allocations during the same period it reads the
235    // total_collections() value!
236    {
237      MutexLocker ml(Heap_lock);
238      gc_count = total_collections();
239
240      result = young_gen()->allocate(size);
241      if (result != NULL) {
242        return result;
243      }
244
245      // If certain conditions hold, try allocating from the old gen.
246      result = mem_allocate_old_gen(size);
247      if (result != NULL) {
248        return result;
249      }
250
251      if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
252        return NULL;
253      }
254
255      // Failed to allocate without a gc.
256      if (GCLocker::is_active_and_needs_gc()) {
257        // If this thread is not in a jni critical section, we stall
258        // the requestor until the critical section has cleared and
259        // GC allowed. When the critical section clears, a GC is
260        // initiated by the last thread exiting the critical section; so
261        // we retry the allocation sequence from the beginning of the loop,
262        // rather than causing more, now probably unnecessary, GC attempts.
263        JavaThread* jthr = JavaThread::current();
264        if (!jthr->in_critical()) {
265          MutexUnlocker mul(Heap_lock);
266          GCLocker::stall_until_clear();
267          gclocker_stalled_count += 1;
268          continue;
269        } else {
270          if (CheckJNICalls) {
271            fatal("Possible deadlock due to allocating while"
272                  " in jni critical section");
273          }
274          return NULL;
275        }
276      }
277    }
278
279    if (result == NULL) {
280      // Generate a VM operation
281      VM_ParallelGCFailedAllocation op(size, gc_count);
282      VMThread::execute(&op);
283
284      // Did the VM operation execute? If so, return the result directly.
285      // This prevents us from looping until time out on requests that can
286      // not be satisfied.
287      if (op.prologue_succeeded()) {
288        assert(is_in_or_null(op.result()), "result not in heap");
289
290        // If GC was locked out during VM operation then retry allocation
291        // and/or stall as necessary.
292        if (op.gc_locked()) {
293          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
294          continue;  // retry and/or stall as necessary
295        }
296
297        // Exit the loop if the gc time limit has been exceeded.
298        // The allocation must have failed above ("result" guarding
299        // this path is NULL) and the most recent collection has exceeded the
300        // gc overhead limit (although enough may have been collected to
301        // satisfy the allocation).  Exit the loop so that an out-of-memory
302        // will be thrown (return a NULL ignoring the contents of
303        // op.result()),
304        // but clear gc_overhead_limit_exceeded so that the next collection
305        // starts with a clean slate (i.e., forgets about previous overhead
306        // excesses).  Fill op.result() with a filler object so that the
307        // heap remains parsable.
308        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
309        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
310
311        if (limit_exceeded && softrefs_clear) {
312          *gc_overhead_limit_was_exceeded = true;
313          size_policy()->set_gc_overhead_limit_exceeded(false);
314          log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
315          if (op.result() != NULL) {
316            CollectedHeap::fill_with_object(op.result(), size);
317          }
318          return NULL;
319        }
320
321        return op.result();
322      }
323    }
324
325    // The policy object will prevent us from looping forever. If the
326    // time spent in gc crosses a threshold, we will bail out.
327    loop_count++;
328    if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
329        (loop_count % QueuedAllocationWarningCount == 0)) {
330      log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
331      log_warning(gc)("\tsize=" SIZE_FORMAT, size);
332    }
333  }
334
335  return result;
336}
337
338// A "death march" is a series of ultra-slow allocations in which a full gc is
339// done before each allocation, and after the full gc the allocation still
340// cannot be satisfied from the young gen.  This routine detects that condition;
341// it should be called after a full gc has been done and the allocation
342// attempted from the young gen. The parameter 'addr' should be the result of
343// that young gen allocation attempt.
344void
345ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
346  if (addr != NULL) {
347    _death_march_count = 0;  // death march has ended
348  } else if (_death_march_count == 0) {
349    if (should_alloc_in_eden(size)) {
350      _death_march_count = 1;    // death march has started
351    }
352  }
353}
354
355HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
356  if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
357    // Size is too big for eden, or gc is locked out.
358    return old_gen()->allocate(size);
359  }
360
361  // If a "death march" is in progress, allocate from the old gen a limited
362  // number of times before doing a GC.
363  if (_death_march_count > 0) {
364    if (_death_march_count < 64) {
365      ++_death_march_count;
366      return old_gen()->allocate(size);
367    } else {
368      _death_march_count = 0;
369    }
370  }
371  return NULL;
372}
373
374void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
375  if (UseParallelOldGC) {
376    // The do_full_collection() parameter clear_all_soft_refs
377    // is interpreted here as maximum_compaction which will
378    // cause SoftRefs to be cleared.
379    bool maximum_compaction = clear_all_soft_refs;
380    PSParallelCompact::invoke(maximum_compaction);
381  } else {
382    PSMarkSweep::invoke(clear_all_soft_refs);
383  }
384}
385
386// Failed allocation policy. Must be called from the VM thread, and
387// only at a safepoint! Note that this method has policy for allocation
388// flow, and NOT collection policy. So we do not check for gc collection
389// time over limit here, that is the responsibility of the heap specific
390// collection methods. This method decides where to attempt allocations,
391// and when to attempt collections, but no collection specific policy.
392HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
393  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
394  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
395  assert(!is_gc_active(), "not reentrant");
396  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
397
398  // We assume that allocation in eden will fail unless we collect.
399
400  // First level allocation failure, scavenge and allocate in young gen.
401  GCCauseSetter gccs(this, GCCause::_allocation_failure);
402  const bool invoked_full_gc = PSScavenge::invoke();
403  HeapWord* result = young_gen()->allocate(size);
404
405  // Second level allocation failure.
406  //   Mark sweep and allocate in young generation.
407  if (result == NULL && !invoked_full_gc) {
408    do_full_collection(false);
409    result = young_gen()->allocate(size);
410  }
411
412  death_march_check(result, size);
413
414  // Third level allocation failure.
415  //   After mark sweep and young generation allocation failure,
416  //   allocate in old generation.
417  if (result == NULL) {
418    result = old_gen()->allocate(size);
419  }
420
421  // Fourth level allocation failure. We're running out of memory.
422  //   More complete mark sweep and allocate in young generation.
423  if (result == NULL) {
424    do_full_collection(true);
425    result = young_gen()->allocate(size);
426  }
427
428  // Fifth level allocation failure.
429  //   After more complete mark sweep, allocate in old generation.
430  if (result == NULL) {
431    result = old_gen()->allocate(size);
432  }
433
434  return result;
435}
436
437void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
438  CollectedHeap::ensure_parsability(retire_tlabs);
439  young_gen()->eden_space()->ensure_parsability();
440}
441
442size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
443  return young_gen()->eden_space()->tlab_capacity(thr);
444}
445
446size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
447  return young_gen()->eden_space()->tlab_used(thr);
448}
449
450size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
451  return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
452}
453
454HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
455  return young_gen()->allocate(size);
456}
457
458void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
459  CollectedHeap::accumulate_statistics_all_tlabs();
460}
461
462void ParallelScavengeHeap::resize_all_tlabs() {
463  CollectedHeap::resize_all_tlabs();
464}
465
466bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
467  // We don't need barriers for stores to objects in the
468  // young gen and, a fortiori, for initializing stores to
469  // objects therein.
470  return is_in_young(new_obj);
471}
472
473// This method is used by System.gc() and JVMTI.
474void ParallelScavengeHeap::collect(GCCause::Cause cause) {
475  assert(!Heap_lock->owned_by_self(),
476    "this thread should not own the Heap_lock");
477
478  uint gc_count      = 0;
479  uint full_gc_count = 0;
480  {
481    MutexLocker ml(Heap_lock);
482    // This value is guarded by the Heap_lock
483    gc_count      = total_collections();
484    full_gc_count = total_full_collections();
485  }
486
487  VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
488  VMThread::execute(&op);
489}
490
491void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
492  young_gen()->object_iterate(cl);
493  old_gen()->object_iterate(cl);
494}
495
496
497HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
498  if (young_gen()->is_in_reserved(addr)) {
499    assert(young_gen()->is_in(addr),
500           "addr should be in allocated part of young gen");
501    // called from os::print_location by find or VMError
502    if (Debugging || VMError::fatal_error_in_progress())  return NULL;
503    Unimplemented();
504  } else if (old_gen()->is_in_reserved(addr)) {
505    assert(old_gen()->is_in(addr),
506           "addr should be in allocated part of old gen");
507    return old_gen()->start_array()->object_start((HeapWord*)addr);
508  }
509  return 0;
510}
511
512size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
513  return oop(addr)->size();
514}
515
516bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
517  return block_start(addr) == addr;
518}
519
520jlong ParallelScavengeHeap::millis_since_last_gc() {
521  return UseParallelOldGC ?
522    PSParallelCompact::millis_since_last_gc() :
523    PSMarkSweep::millis_since_last_gc();
524}
525
526void ParallelScavengeHeap::prepare_for_verify() {
527  ensure_parsability(false);  // no need to retire TLABs for verification
528}
529
530PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
531  PSOldGen* old = old_gen();
532  HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
533  VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
534  SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
535
536  PSYoungGen* young = young_gen();
537  VirtualSpaceSummary young_summary(young->reserved().start(),
538    (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
539
540  MutableSpace* eden = young_gen()->eden_space();
541  SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
542
543  MutableSpace* from = young_gen()->from_space();
544  SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
545
546  MutableSpace* to = young_gen()->to_space();
547  SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
548
549  VirtualSpaceSummary heap_summary = create_heap_space_summary();
550  return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
551}
552
553void ParallelScavengeHeap::print_on(outputStream* st) const {
554  young_gen()->print_on(st);
555  old_gen()->print_on(st);
556  MetaspaceAux::print_on(st);
557}
558
559void ParallelScavengeHeap::print_on_error(outputStream* st) const {
560  this->CollectedHeap::print_on_error(st);
561
562  if (UseParallelOldGC) {
563    st->cr();
564    PSParallelCompact::print_on_error(st);
565  }
566}
567
568void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
569  PSScavenge::gc_task_manager()->threads_do(tc);
570}
571
572void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
573  PSScavenge::gc_task_manager()->print_threads_on(st);
574}
575
576void ParallelScavengeHeap::print_tracing_info() const {
577  if (TraceYoungGenTime) {
578    double time = PSScavenge::accumulated_time()->seconds();
579    tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
580  }
581  if (TraceOldGenTime) {
582    double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
583    tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
584  }
585
586  AdaptiveSizePolicyOutput::print();
587}
588
589
590void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
591  // Why do we need the total_collections()-filter below?
592  if (total_collections() > 0) {
593    log_debug(gc, verify)("Tenured");
594    old_gen()->verify();
595
596    log_debug(gc, verify)("Eden");
597    young_gen()->verify();
598  }
599}
600
601void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
602  const PSHeapSummary& heap_summary = create_ps_heap_summary();
603  gc_tracer->report_gc_heap_summary(when, heap_summary);
604
605  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
606  gc_tracer->report_metaspace_summary(when, metaspace_summary);
607}
608
609ParallelScavengeHeap* ParallelScavengeHeap::heap() {
610  CollectedHeap* heap = Universe::heap();
611  assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
612  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
613  return (ParallelScavengeHeap*)heap;
614}
615
616// Before delegating the resize to the young generation,
617// the reserved space for the young and old generations
618// may be changed to accommodate the desired resize.
619void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
620    size_t survivor_size) {
621  if (UseAdaptiveGCBoundary) {
622    if (size_policy()->bytes_absorbed_from_eden() != 0) {
623      size_policy()->reset_bytes_absorbed_from_eden();
624      return;  // The generation changed size already.
625    }
626    gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
627  }
628
629  // Delegate the resize to the generation.
630  _young_gen->resize(eden_size, survivor_size);
631}
632
633// Before delegating the resize to the old generation,
634// the reserved space for the young and old generations
635// may be changed to accommodate the desired resize.
636void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
637  if (UseAdaptiveGCBoundary) {
638    if (size_policy()->bytes_absorbed_from_eden() != 0) {
639      size_policy()->reset_bytes_absorbed_from_eden();
640      return;  // The generation changed size already.
641    }
642    gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
643  }
644
645  // Delegate the resize to the generation.
646  _old_gen->resize(desired_free_space);
647}
648
649ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
650  // nothing particular
651}
652
653ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
654  // nothing particular
655}
656
657#ifndef PRODUCT
658void ParallelScavengeHeap::record_gen_tops_before_GC() {
659  if (ZapUnusedHeapArea) {
660    young_gen()->record_spaces_top();
661    old_gen()->record_spaces_top();
662  }
663}
664
665void ParallelScavengeHeap::gen_mangle_unused_area() {
666  if (ZapUnusedHeapArea) {
667    young_gen()->eden_space()->mangle_unused_area();
668    young_gen()->to_space()->mangle_unused_area();
669    young_gen()->from_space()->mangle_unused_area();
670    old_gen()->object_space()->mangle_unused_area();
671  }
672}
673#endif
674