psScavenge.cpp revision 8413:92457dfb91bd
1221828Sgrehan/*
2221828Sgrehan * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3221828Sgrehan * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4221828Sgrehan *
5221828Sgrehan * This code is free software; you can redistribute it and/or modify it
6221828Sgrehan * under the terms of the GNU General Public License version 2 only, as
7221828Sgrehan * published by the Free Software Foundation.
8221828Sgrehan *
9221828Sgrehan * This code is distributed in the hope that it will be useful, but WITHOUT
10221828Sgrehan * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11221828Sgrehan * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12221828Sgrehan * version 2 for more details (a copy is included in the LICENSE file that
13221828Sgrehan * accompanied this code).
14221828Sgrehan *
15221828Sgrehan * You should have received a copy of the GNU General Public License version
16221828Sgrehan * 2 along with this work; if not, write to the Free Software Foundation,
17221828Sgrehan * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18221828Sgrehan *
19221828Sgrehan * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20221828Sgrehan * or visit www.oracle.com if you need additional information or have any
21221828Sgrehan * questions.
22221828Sgrehan *
23221828Sgrehan */
24221828Sgrehan
25221828Sgrehan#include "precompiled.hpp"
26221828Sgrehan#include "classfile/stringTable.hpp"
27221828Sgrehan#include "code/codeCache.hpp"
28221828Sgrehan#include "gc/parallel/cardTableExtension.hpp"
29221828Sgrehan#include "gc/parallel/gcTaskManager.hpp"
30221828Sgrehan#include "gc/parallel/parallelScavengeHeap.hpp"
31221828Sgrehan#include "gc/parallel/psAdaptiveSizePolicy.hpp"
32221828Sgrehan#include "gc/parallel/psMarkSweep.hpp"
33221828Sgrehan#include "gc/parallel/psParallelCompact.hpp"
34221828Sgrehan#include "gc/parallel/psScavenge.inline.hpp"
35221828Sgrehan#include "gc/parallel/psTasks.hpp"
36221828Sgrehan#include "gc/shared/collectorPolicy.hpp"
37261265Sjhb#include "gc/shared/gcCause.hpp"
38221828Sgrehan#include "gc/shared/gcHeapSummary.hpp"
39221828Sgrehan#include "gc/shared/gcLocker.inline.hpp"
40221828Sgrehan#include "gc/shared/gcTimer.hpp"
41221828Sgrehan#include "gc/shared/gcTrace.hpp"
42221828Sgrehan#include "gc/shared/gcTraceTime.hpp"
43221828Sgrehan#include "gc/shared/isGCActiveMark.hpp"
44221828Sgrehan#include "gc/shared/referencePolicy.hpp"
45221828Sgrehan#include "gc/shared/referenceProcessor.hpp"
46221828Sgrehan#include "gc/shared/spaceDecorator.hpp"
47221828Sgrehan#include "memory/resourceArea.hpp"
48221828Sgrehan#include "oops/oop.inline.hpp"
49221828Sgrehan#include "runtime/biasedLocking.hpp"
50221828Sgrehan#include "runtime/fprofiler.hpp"
51221828Sgrehan#include "runtime/handles.inline.hpp"
52221828Sgrehan#include "runtime/threadCritical.hpp"
53261265Sjhb#include "runtime/vmThread.hpp"
54#include "runtime/vm_operations.hpp"
55#include "services/memoryService.hpp"
56#include "utilities/stack.inline.hpp"
57
58HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
59int                        PSScavenge::_consecutive_skipped_scavenges = 0;
60ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
61CardTableExtension*        PSScavenge::_card_table = NULL;
62bool                       PSScavenge::_survivor_overflow = false;
63uint                       PSScavenge::_tenuring_threshold = 0;
64HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
65uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
66elapsedTimer               PSScavenge::_accumulated_time;
67STWGCTimer                 PSScavenge::_gc_timer;
68ParallelScavengeTracer     PSScavenge::_gc_tracer;
69Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
70Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
71CollectorCounters*         PSScavenge::_counters = NULL;
72
73// Define before use
74class PSIsAliveClosure: public BoolObjectClosure {
75public:
76  bool do_object_b(oop p) {
77    return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
78  }
79};
80
81PSIsAliveClosure PSScavenge::_is_alive_closure;
82
83class PSKeepAliveClosure: public OopClosure {
84protected:
85  MutableSpace* _to_space;
86  PSPromotionManager* _promotion_manager;
87
88public:
89  PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
90    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
91    _to_space = heap->young_gen()->to_space();
92
93    assert(_promotion_manager != NULL, "Sanity");
94  }
95
96  template <class T> void do_oop_work(T* p) {
97    assert (!oopDesc::is_null(*p), "expected non-null ref");
98    assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
99            "expected an oop while scanning weak refs");
100
101    // Weak refs may be visited more than once.
102    if (PSScavenge::should_scavenge(p, _to_space)) {
103      _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p);
104    }
105  }
106  virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
107  virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
108};
109
110class PSEvacuateFollowersClosure: public VoidClosure {
111 private:
112  PSPromotionManager* _promotion_manager;
113 public:
114  PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
115
116  virtual void do_void() {
117    assert(_promotion_manager != NULL, "Sanity");
118    _promotion_manager->drain_stacks(true);
119    guarantee(_promotion_manager->stacks_empty(),
120              "stacks should be empty at this point");
121  }
122};
123
124class PSPromotionFailedClosure : public ObjectClosure {
125  virtual void do_object(oop obj) {
126    if (obj->is_forwarded()) {
127      obj->init_mark();
128    }
129  }
130};
131
132class PSRefProcTaskProxy: public GCTask {
133  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
134  ProcessTask & _rp_task;
135  uint          _work_id;
136public:
137  PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
138    : _rp_task(rp_task),
139      _work_id(work_id)
140  { }
141
142private:
143  virtual char* name() { return (char *)"Process referents by policy in parallel"; }
144  virtual void do_it(GCTaskManager* manager, uint which);
145};
146
147void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
148{
149  PSPromotionManager* promotion_manager =
150    PSPromotionManager::gc_thread_promotion_manager(which);
151  assert(promotion_manager != NULL, "sanity check");
152  PSKeepAliveClosure keep_alive(promotion_manager);
153  PSEvacuateFollowersClosure evac_followers(promotion_manager);
154  PSIsAliveClosure is_alive;
155  _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
156}
157
158class PSRefEnqueueTaskProxy: public GCTask {
159  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
160  EnqueueTask& _enq_task;
161  uint         _work_id;
162
163public:
164  PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
165    : _enq_task(enq_task),
166      _work_id(work_id)
167  { }
168
169  virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
170  virtual void do_it(GCTaskManager* manager, uint which)
171  {
172    _enq_task.work(_work_id);
173  }
174};
175
176class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
177  virtual void execute(ProcessTask& task);
178  virtual void execute(EnqueueTask& task);
179};
180
181void PSRefProcTaskExecutor::execute(ProcessTask& task)
182{
183  GCTaskQueue* q = GCTaskQueue::create();
184  GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
185  for(uint i=0; i < manager->active_workers(); i++) {
186    q->enqueue(new PSRefProcTaskProxy(task, i));
187  }
188  ParallelTaskTerminator terminator(manager->active_workers(),
189                 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
190  if (task.marks_oops_alive() && manager->active_workers() > 1) {
191    for (uint j = 0; j < manager->active_workers(); j++) {
192      q->enqueue(new StealTask(&terminator));
193    }
194  }
195  manager->execute_and_wait(q);
196}
197
198
199void PSRefProcTaskExecutor::execute(EnqueueTask& task)
200{
201  GCTaskQueue* q = GCTaskQueue::create();
202  GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
203  for(uint i=0; i < manager->active_workers(); i++) {
204    q->enqueue(new PSRefEnqueueTaskProxy(task, i));
205  }
206  manager->execute_and_wait(q);
207}
208
209// This method contains all heap specific policy for invoking scavenge.
210// PSScavenge::invoke_no_policy() will do nothing but attempt to
211// scavenge. It will not clean up after failed promotions, bail out if
212// we've exceeded policy time limits, or any other special behavior.
213// All such policy should be placed here.
214//
215// Note that this method should only be called from the vm_thread while
216// at a safepoint!
217bool PSScavenge::invoke() {
218  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
219  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
220  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
221
222  ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
223  PSAdaptiveSizePolicy* policy = heap->size_policy();
224  IsGCActiveMark mark;
225
226  const bool scavenge_done = PSScavenge::invoke_no_policy();
227  const bool need_full_gc = !scavenge_done ||
228    policy->should_full_GC(heap->old_gen()->free_in_bytes());
229  bool full_gc_done = false;
230
231  if (UsePerfData) {
232    PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
233    const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
234    counters->update_full_follows_scavenge(ffs_val);
235  }
236
237  if (need_full_gc) {
238    GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
239    CollectorPolicy* cp = heap->collector_policy();
240    const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
241
242    if (UseParallelOldGC) {
243      full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
244    } else {
245      full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
246    }
247  }
248
249  return full_gc_done;
250}
251
252// This method contains no policy. You should probably
253// be calling invoke() instead.
254bool PSScavenge::invoke_no_policy() {
255  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
256  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
257
258  assert(_preserved_mark_stack.is_empty(), "should be empty");
259  assert(_preserved_oop_stack.is_empty(), "should be empty");
260
261  _gc_timer.register_gc_start();
262
263  TimeStamp scavenge_entry;
264  TimeStamp scavenge_midpoint;
265  TimeStamp scavenge_exit;
266
267  scavenge_entry.update();
268
269  if (GC_locker::check_active_before_gc()) {
270    return false;
271  }
272
273  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
274  GCCause::Cause gc_cause = heap->gc_cause();
275
276  // Check for potential problems.
277  if (!should_attempt_scavenge()) {
278    return false;
279  }
280
281  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
282
283  bool promotion_failure_occurred = false;
284
285  PSYoungGen* young_gen = heap->young_gen();
286  PSOldGen* old_gen = heap->old_gen();
287  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
288
289  heap->increment_total_collections();
290
291  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
292
293  if ((gc_cause != GCCause::_java_lang_system_gc) ||
294       UseAdaptiveSizePolicyWithSystemGC) {
295    // Gather the feedback data for eden occupancy.
296    young_gen->eden_space()->accumulate_statistics();
297  }
298
299  if (ZapUnusedHeapArea) {
300    // Save information needed to minimize mangling
301    heap->record_gen_tops_before_GC();
302  }
303
304  heap->print_heap_before_gc();
305  heap->trace_heap_before_gc(&_gc_tracer);
306
307  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
308  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
309
310  size_t prev_used = heap->used();
311
312  // Fill in TLABs
313  heap->accumulate_statistics_all_tlabs();
314  heap->ensure_parsability(true);  // retire TLABs
315
316  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
317    HandleMark hm;  // Discard invalid handles created during verification
318    Universe::verify(" VerifyBeforeGC:");
319  }
320
321  {
322    ResourceMark rm;
323    HandleMark hm;
324
325    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
326    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
327    TraceCollectorStats tcs(counters());
328    TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
329
330    if (TraceYoungGenTime) accumulated_time()->start();
331
332    // Let the size policy know we're starting
333    size_policy->minor_collection_begin();
334
335    // Verify the object start arrays.
336    if (VerifyObjectStartArray &&
337        VerifyBeforeGC) {
338      old_gen->verify_object_start_array();
339    }
340
341    // Verify no unmarked old->young roots
342    if (VerifyRememberedSets) {
343      CardTableExtension::verify_all_young_refs_imprecise();
344    }
345
346    if (!ScavengeWithObjectsInToSpace) {
347      assert(young_gen->to_space()->is_empty(),
348             "Attempt to scavenge with live objects in to_space");
349      young_gen->to_space()->clear(SpaceDecorator::Mangle);
350    } else if (ZapUnusedHeapArea) {
351      young_gen->to_space()->mangle_unused_area();
352    }
353    save_to_space_top_before_gc();
354
355    COMPILER2_PRESENT(DerivedPointerTable::clear());
356
357    reference_processor()->enable_discovery();
358    reference_processor()->setup_policy(false);
359
360    // We track how much was promoted to the next generation for
361    // the AdaptiveSizePolicy.
362    size_t old_gen_used_before = old_gen->used_in_bytes();
363
364    // For PrintGCDetails
365    size_t young_gen_used_before = young_gen->used_in_bytes();
366
367    // Reset our survivor overflow.
368    set_survivor_overflow(false);
369
370    // We need to save the old top values before
371    // creating the promotion_manager. We pass the top
372    // values to the card_table, to prevent it from
373    // straying into the promotion labs.
374    HeapWord* old_top = old_gen->object_space()->top();
375
376    // Release all previously held resources
377    gc_task_manager()->release_all_resources();
378
379    // Set the number of GC threads to be used in this collection
380    gc_task_manager()->set_active_gang();
381    gc_task_manager()->task_idle_workers();
382    // Get the active number of workers here and use that value
383    // throughout the methods.
384    uint active_workers = gc_task_manager()->active_workers();
385    heap->set_par_threads(active_workers);
386
387    PSPromotionManager::pre_scavenge();
388
389    // We'll use the promotion manager again later.
390    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
391    {
392      GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id());
393      ParallelScavengeHeap::ParStrongRootsScope psrs;
394
395      GCTaskQueue* q = GCTaskQueue::create();
396
397      if (!old_gen->object_space()->is_empty()) {
398        // There are only old-to-young pointers if there are objects
399        // in the old gen.
400        uint stripe_total = active_workers;
401        for(uint i=0; i < stripe_total; i++) {
402          q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
403        }
404      }
405
406      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
407      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
408      // We scan the thread roots in parallel
409      Threads::create_thread_roots_tasks(q);
410      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
411      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
412      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
413      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
414      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
415      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
416      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
417
418      ParallelTaskTerminator terminator(
419        active_workers,
420                  (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
421      if (active_workers > 1) {
422        for (uint j = 0; j < active_workers; j++) {
423          q->enqueue(new StealTask(&terminator));
424        }
425      }
426
427      gc_task_manager()->execute_and_wait(q);
428    }
429
430    scavenge_midpoint.update();
431
432    // Process reference objects discovered during scavenge
433    {
434      GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id());
435
436      reference_processor()->setup_policy(false); // not always_clear
437      reference_processor()->set_active_mt_degree(active_workers);
438      PSKeepAliveClosure keep_alive(promotion_manager);
439      PSEvacuateFollowersClosure evac_followers(promotion_manager);
440      ReferenceProcessorStats stats;
441      if (reference_processor()->processing_is_mt()) {
442        PSRefProcTaskExecutor task_executor;
443        stats = reference_processor()->process_discovered_references(
444          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
445          &_gc_timer, _gc_tracer.gc_id());
446      } else {
447        stats = reference_processor()->process_discovered_references(
448          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id());
449      }
450
451      _gc_tracer.report_gc_reference_stats(stats);
452
453      // Enqueue reference objects discovered during scavenge.
454      if (reference_processor()->processing_is_mt()) {
455        PSRefProcTaskExecutor task_executor;
456        reference_processor()->enqueue_discovered_references(&task_executor);
457      } else {
458        reference_processor()->enqueue_discovered_references(NULL);
459      }
460    }
461
462    {
463      GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id());
464      // Unlink any dead interned Strings and process the remaining live ones.
465      PSScavengeRootsClosure root_closure(promotion_manager);
466      StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
467    }
468
469    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
470    promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
471    if (promotion_failure_occurred) {
472      clean_up_failed_promotion();
473      if (PrintGC) {
474        gclog_or_tty->print("--");
475      }
476    }
477
478    // Let the size policy know we're done.  Note that we count promotion
479    // failure cleanup time as part of the collection (otherwise, we're
480    // implicitly saying it's mutator time).
481    size_policy->minor_collection_end(gc_cause);
482
483    if (!promotion_failure_occurred) {
484      // Swap the survivor spaces.
485      young_gen->eden_space()->clear(SpaceDecorator::Mangle);
486      young_gen->from_space()->clear(SpaceDecorator::Mangle);
487      young_gen->swap_spaces();
488
489      size_t survived = young_gen->from_space()->used_in_bytes();
490      size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
491      size_policy->update_averages(_survivor_overflow, survived, promoted);
492
493      // A successful scavenge should restart the GC time limit count which is
494      // for full GC's.
495      size_policy->reset_gc_overhead_limit_count();
496      if (UseAdaptiveSizePolicy) {
497        // Calculate the new survivor size and tenuring threshold
498
499        if (PrintAdaptiveSizePolicy) {
500          gclog_or_tty->print("AdaptiveSizeStart: ");
501          gclog_or_tty->stamp();
502          gclog_or_tty->print_cr(" collection: %d ",
503                         heap->total_collections());
504
505          if (Verbose) {
506            gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
507              " young_gen_capacity: " SIZE_FORMAT,
508              old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
509          }
510        }
511
512
513        if (UsePerfData) {
514          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
515          counters->update_old_eden_size(
516            size_policy->calculated_eden_size_in_bytes());
517          counters->update_old_promo_size(
518            size_policy->calculated_promo_size_in_bytes());
519          counters->update_old_capacity(old_gen->capacity_in_bytes());
520          counters->update_young_capacity(young_gen->capacity_in_bytes());
521          counters->update_survived(survived);
522          counters->update_promoted(promoted);
523          counters->update_survivor_overflowed(_survivor_overflow);
524        }
525
526        size_t max_young_size = young_gen->max_size();
527
528        // Deciding a free ratio in the young generation is tricky, so if
529        // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
530        // that the old generation size may have been limited because of them) we
531        // should then limit our young generation size using NewRatio to have it
532        // follow the old generation size.
533        if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
534          max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
535        }
536
537        size_t survivor_limit =
538          size_policy->max_survivor_size(max_young_size);
539        _tenuring_threshold =
540          size_policy->compute_survivor_space_size_and_threshold(
541                                                           _survivor_overflow,
542                                                           _tenuring_threshold,
543                                                           survivor_limit);
544
545       if (PrintTenuringDistribution) {
546         gclog_or_tty->cr();
547         gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u"
548                                " (max threshold " UINTX_FORMAT ")",
549                                size_policy->calculated_survivor_size_in_bytes(),
550                                _tenuring_threshold, MaxTenuringThreshold);
551       }
552
553        if (UsePerfData) {
554          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
555          counters->update_tenuring_threshold(_tenuring_threshold);
556          counters->update_survivor_size_counters();
557        }
558
559        // Do call at minor collections?
560        // Don't check if the size_policy is ready at this
561        // level.  Let the size_policy check that internally.
562        if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
563            ((gc_cause != GCCause::_java_lang_system_gc) ||
564              UseAdaptiveSizePolicyWithSystemGC)) {
565
566          // Calculate optimal free space amounts
567          assert(young_gen->max_size() >
568            young_gen->from_space()->capacity_in_bytes() +
569            young_gen->to_space()->capacity_in_bytes(),
570            "Sizes of space in young gen are out-of-bounds");
571
572          size_t young_live = young_gen->used_in_bytes();
573          size_t eden_live = young_gen->eden_space()->used_in_bytes();
574          size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
575          size_t max_old_gen_size = old_gen->max_gen_size();
576          size_t max_eden_size = max_young_size -
577            young_gen->from_space()->capacity_in_bytes() -
578            young_gen->to_space()->capacity_in_bytes();
579
580          // Used for diagnostics
581          size_policy->clear_generation_free_space_flags();
582
583          size_policy->compute_eden_space_size(young_live,
584                                               eden_live,
585                                               cur_eden,
586                                               max_eden_size,
587                                               false /* not full gc*/);
588
589          size_policy->check_gc_overhead_limit(young_live,
590                                               eden_live,
591                                               max_old_gen_size,
592                                               max_eden_size,
593                                               false /* not full gc*/,
594                                               gc_cause,
595                                               heap->collector_policy());
596
597          size_policy->decay_supplemental_growth(false /* not full gc*/);
598        }
599        // Resize the young generation at every collection
600        // even if new sizes have not been calculated.  This is
601        // to allow resizes that may have been inhibited by the
602        // relative location of the "to" and "from" spaces.
603
604        // Resizing the old gen at minor collects can cause increases
605        // that don't feed back to the generation sizing policy until
606        // a major collection.  Don't resize the old gen here.
607
608        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
609                        size_policy->calculated_survivor_size_in_bytes());
610
611        if (PrintAdaptiveSizePolicy) {
612          gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
613                         heap->total_collections());
614        }
615      }
616
617      // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
618      // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
619      // Also update() will case adaptive NUMA chunk resizing.
620      assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
621      young_gen->eden_space()->update();
622
623      heap->gc_policy_counters()->update_counters();
624
625      heap->resize_all_tlabs();
626
627      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
628    }
629
630    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
631
632    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
633
634    {
635      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id());
636
637      CodeCache::prune_scavenge_root_nmethods();
638    }
639
640    // Re-verify object start arrays
641    if (VerifyObjectStartArray &&
642        VerifyAfterGC) {
643      old_gen->verify_object_start_array();
644    }
645
646    // Verify all old -> young cards are now precise
647    if (VerifyRememberedSets) {
648      // Precise verification will give false positives. Until this is fixed,
649      // use imprecise verification.
650      // CardTableExtension::verify_all_young_refs_precise();
651      CardTableExtension::verify_all_young_refs_imprecise();
652    }
653
654    if (TraceYoungGenTime) accumulated_time()->stop();
655
656    if (PrintGC) {
657      if (PrintGCDetails) {
658        // Don't print a GC timestamp here.  This is after the GC so
659        // would be confusing.
660        young_gen->print_used_change(young_gen_used_before);
661      }
662      heap->print_heap_change(prev_used);
663    }
664
665    // Track memory usage and detect low memory
666    MemoryService::track_memory_usage();
667    heap->update_counters();
668
669    gc_task_manager()->release_idle_workers();
670  }
671
672  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
673    HandleMark hm;  // Discard invalid handles created during verification
674    Universe::verify(" VerifyAfterGC:");
675  }
676
677  heap->print_heap_after_gc();
678  heap->trace_heap_after_gc(&_gc_tracer);
679  _gc_tracer.report_tenuring_threshold(tenuring_threshold());
680
681  if (ZapUnusedHeapArea) {
682    young_gen->eden_space()->check_mangled_unused_area_complete();
683    young_gen->from_space()->check_mangled_unused_area_complete();
684    young_gen->to_space()->check_mangled_unused_area_complete();
685  }
686
687  scavenge_exit.update();
688
689  if (PrintGCTaskTimeStamps) {
690    tty->print_cr("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
691                  scavenge_entry.ticks(), scavenge_midpoint.ticks(),
692                  scavenge_exit.ticks());
693    gc_task_manager()->print_task_time_stamps();
694  }
695
696#ifdef TRACESPINNING
697  ParallelTaskTerminator::print_termination_counts();
698#endif
699
700
701  _gc_timer.register_gc_end();
702
703  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
704
705  return !promotion_failure_occurred;
706}
707
708// This method iterates over all objects in the young generation,
709// unforwarding markOops. It then restores any preserved mark oops,
710// and clears the _preserved_mark_stack.
711void PSScavenge::clean_up_failed_promotion() {
712  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
713  PSYoungGen* young_gen = heap->young_gen();
714
715  {
716    ResourceMark rm;
717
718    // Unforward all pointers in the young gen.
719    PSPromotionFailedClosure unforward_closure;
720    young_gen->object_iterate(&unforward_closure);
721
722    if (PrintGC && Verbose) {
723      gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size());
724    }
725
726    // Restore any saved marks.
727    while (!_preserved_oop_stack.is_empty()) {
728      oop obj      = _preserved_oop_stack.pop();
729      markOop mark = _preserved_mark_stack.pop();
730      obj->set_mark(mark);
731    }
732
733    // Clear the preserved mark and oop stack caches.
734    _preserved_mark_stack.clear(true);
735    _preserved_oop_stack.clear(true);
736  }
737
738  // Reset the PromotionFailureALot counters.
739  NOT_PRODUCT(heap->reset_promotion_should_fail();)
740}
741
742// This method is called whenever an attempt to promote an object
743// fails. Some markOops will need preservation, some will not. Note
744// that the entire eden is traversed after a failed promotion, with
745// all forwarded headers replaced by the default markOop. This means
746// it is not necessary to preserve most markOops.
747void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
748  if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
749    // Should use per-worker private stacks here rather than
750    // locking a common pair of stacks.
751    ThreadCritical tc;
752    _preserved_oop_stack.push(obj);
753    _preserved_mark_stack.push(obj_mark);
754  }
755}
756
757bool PSScavenge::should_attempt_scavenge() {
758  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
759  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
760
761  if (UsePerfData) {
762    counters->update_scavenge_skipped(not_skipped);
763  }
764
765  PSYoungGen* young_gen = heap->young_gen();
766  PSOldGen* old_gen = heap->old_gen();
767
768  if (!ScavengeWithObjectsInToSpace) {
769    // Do not attempt to promote unless to_space is empty
770    if (!young_gen->to_space()->is_empty()) {
771      _consecutive_skipped_scavenges++;
772      if (UsePerfData) {
773        counters->update_scavenge_skipped(to_space_not_empty);
774      }
775      return false;
776    }
777  }
778
779  // Test to see if the scavenge will likely fail.
780  PSAdaptiveSizePolicy* policy = heap->size_policy();
781
782  // A similar test is done in the policy's should_full_GC().  If this is
783  // changed, decide if that test should also be changed.
784  size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
785  size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
786  bool result = promotion_estimate < old_gen->free_in_bytes();
787
788  if (PrintGCDetails && Verbose) {
789    gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
790    gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
791      " padded_average_promoted " SIZE_FORMAT
792      " free in old gen " SIZE_FORMAT,
793      (size_t) policy->average_promoted_in_bytes(),
794      (size_t) policy->padded_average_promoted_in_bytes(),
795      old_gen->free_in_bytes());
796    if (young_gen->used_in_bytes() <
797        (size_t) policy->padded_average_promoted_in_bytes()) {
798      gclog_or_tty->print_cr(" padded_promoted_average is greater"
799        " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
800    }
801  }
802
803  if (result) {
804    _consecutive_skipped_scavenges = 0;
805  } else {
806    _consecutive_skipped_scavenges++;
807    if (UsePerfData) {
808      counters->update_scavenge_skipped(promoted_too_large);
809    }
810  }
811  return result;
812}
813
814  // Used to add tasks
815GCTaskManager* const PSScavenge::gc_task_manager() {
816  assert(ParallelScavengeHeap::gc_task_manager() != NULL,
817   "shouldn't return NULL");
818  return ParallelScavengeHeap::gc_task_manager();
819}
820
821void PSScavenge::initialize() {
822  // Arguments must have been parsed
823
824  if (AlwaysTenure || NeverTenure) {
825    assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
826        err_msg("MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold));
827    _tenuring_threshold = MaxTenuringThreshold;
828  } else {
829    // We want to smooth out our startup times for the AdaptiveSizePolicy
830    _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
831                                                    MaxTenuringThreshold;
832  }
833
834  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
835  PSYoungGen* young_gen = heap->young_gen();
836  PSOldGen* old_gen = heap->old_gen();
837
838  // Set boundary between young_gen and old_gen
839  assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
840         "old above young");
841  set_young_generation_boundary(young_gen->eden_space()->bottom());
842
843  // Initialize ref handling object for scavenging.
844  MemRegion mr = young_gen->reserved();
845
846  _ref_processor =
847    new ReferenceProcessor(mr,                         // span
848                           ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
849                           (int) ParallelGCThreads,    // mt processing degree
850                           true,                       // mt discovery
851                           (int) ParallelGCThreads,    // mt discovery degree
852                           true,                       // atomic_discovery
853                           NULL);                      // header provides liveness info
854
855  // Cache the cardtable
856  _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
857
858  _counters = new CollectorCounters("PSScavenge", 0);
859}
860