psScavenge.cpp revision 9727:f944761a3ce3
1/*
2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/stringTable.hpp"
27#include "code/codeCache.hpp"
28#include "gc/parallel/cardTableExtension.hpp"
29#include "gc/parallel/gcTaskManager.hpp"
30#include "gc/parallel/parallelScavengeHeap.hpp"
31#include "gc/parallel/psAdaptiveSizePolicy.hpp"
32#include "gc/parallel/psMarkSweep.hpp"
33#include "gc/parallel/psParallelCompact.hpp"
34#include "gc/parallel/psScavenge.inline.hpp"
35#include "gc/parallel/psTasks.hpp"
36#include "gc/shared/collectorPolicy.hpp"
37#include "gc/shared/gcCause.hpp"
38#include "gc/shared/gcHeapSummary.hpp"
39#include "gc/shared/gcId.hpp"
40#include "gc/shared/gcLocker.inline.hpp"
41#include "gc/shared/gcTimer.hpp"
42#include "gc/shared/gcTrace.hpp"
43#include "gc/shared/gcTraceTime.inline.hpp"
44#include "gc/shared/isGCActiveMark.hpp"
45#include "gc/shared/referencePolicy.hpp"
46#include "gc/shared/referenceProcessor.hpp"
47#include "gc/shared/spaceDecorator.hpp"
48#include "memory/resourceArea.hpp"
49#include "logging/log.hpp"
50#include "oops/oop.inline.hpp"
51#include "runtime/biasedLocking.hpp"
52#include "runtime/fprofiler.hpp"
53#include "runtime/handles.inline.hpp"
54#include "runtime/threadCritical.hpp"
55#include "runtime/vmThread.hpp"
56#include "runtime/vm_operations.hpp"
57#include "services/memoryService.hpp"
58#include "utilities/stack.inline.hpp"
59
60HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
61int                        PSScavenge::_consecutive_skipped_scavenges = 0;
62ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
63CardTableExtension*        PSScavenge::_card_table = NULL;
64bool                       PSScavenge::_survivor_overflow = false;
65uint                       PSScavenge::_tenuring_threshold = 0;
66HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
67uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
68elapsedTimer               PSScavenge::_accumulated_time;
69STWGCTimer                 PSScavenge::_gc_timer;
70ParallelScavengeTracer     PSScavenge::_gc_tracer;
71Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
72Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
73CollectorCounters*         PSScavenge::_counters = NULL;
74
75// Define before use
76class PSIsAliveClosure: public BoolObjectClosure {
77public:
78  bool do_object_b(oop p) {
79    return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
80  }
81};
82
83PSIsAliveClosure PSScavenge::_is_alive_closure;
84
85class PSKeepAliveClosure: public OopClosure {
86protected:
87  MutableSpace* _to_space;
88  PSPromotionManager* _promotion_manager;
89
90public:
91  PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
92    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
93    _to_space = heap->young_gen()->to_space();
94
95    assert(_promotion_manager != NULL, "Sanity");
96  }
97
98  template <class T> void do_oop_work(T* p) {
99    assert (!oopDesc::is_null(*p), "expected non-null ref");
100    assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
101            "expected an oop while scanning weak refs");
102
103    // Weak refs may be visited more than once.
104    if (PSScavenge::should_scavenge(p, _to_space)) {
105      _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p);
106    }
107  }
108  virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
109  virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
110};
111
112class PSEvacuateFollowersClosure: public VoidClosure {
113 private:
114  PSPromotionManager* _promotion_manager;
115 public:
116  PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
117
118  virtual void do_void() {
119    assert(_promotion_manager != NULL, "Sanity");
120    _promotion_manager->drain_stacks(true);
121    guarantee(_promotion_manager->stacks_empty(),
122              "stacks should be empty at this point");
123  }
124};
125
126class PSPromotionFailedClosure : public ObjectClosure {
127  virtual void do_object(oop obj) {
128    if (obj->is_forwarded()) {
129      obj->init_mark();
130    }
131  }
132};
133
134class PSRefProcTaskProxy: public GCTask {
135  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
136  ProcessTask & _rp_task;
137  uint          _work_id;
138public:
139  PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
140    : _rp_task(rp_task),
141      _work_id(work_id)
142  { }
143
144private:
145  virtual char* name() { return (char *)"Process referents by policy in parallel"; }
146  virtual void do_it(GCTaskManager* manager, uint which);
147};
148
149void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
150{
151  PSPromotionManager* promotion_manager =
152    PSPromotionManager::gc_thread_promotion_manager(which);
153  assert(promotion_manager != NULL, "sanity check");
154  PSKeepAliveClosure keep_alive(promotion_manager);
155  PSEvacuateFollowersClosure evac_followers(promotion_manager);
156  PSIsAliveClosure is_alive;
157  _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
158}
159
160class PSRefEnqueueTaskProxy: public GCTask {
161  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
162  EnqueueTask& _enq_task;
163  uint         _work_id;
164
165public:
166  PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
167    : _enq_task(enq_task),
168      _work_id(work_id)
169  { }
170
171  virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
172  virtual void do_it(GCTaskManager* manager, uint which)
173  {
174    _enq_task.work(_work_id);
175  }
176};
177
178class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
179  virtual void execute(ProcessTask& task);
180  virtual void execute(EnqueueTask& task);
181};
182
183void PSRefProcTaskExecutor::execute(ProcessTask& task)
184{
185  GCTaskQueue* q = GCTaskQueue::create();
186  GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
187  for(uint i=0; i < manager->active_workers(); i++) {
188    q->enqueue(new PSRefProcTaskProxy(task, i));
189  }
190  ParallelTaskTerminator terminator(manager->active_workers(),
191                 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
192  if (task.marks_oops_alive() && manager->active_workers() > 1) {
193    for (uint j = 0; j < manager->active_workers(); j++) {
194      q->enqueue(new StealTask(&terminator));
195    }
196  }
197  manager->execute_and_wait(q);
198}
199
200
201void PSRefProcTaskExecutor::execute(EnqueueTask& task)
202{
203  GCTaskQueue* q = GCTaskQueue::create();
204  GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
205  for(uint i=0; i < manager->active_workers(); i++) {
206    q->enqueue(new PSRefEnqueueTaskProxy(task, i));
207  }
208  manager->execute_and_wait(q);
209}
210
211// This method contains all heap specific policy for invoking scavenge.
212// PSScavenge::invoke_no_policy() will do nothing but attempt to
213// scavenge. It will not clean up after failed promotions, bail out if
214// we've exceeded policy time limits, or any other special behavior.
215// All such policy should be placed here.
216//
217// Note that this method should only be called from the vm_thread while
218// at a safepoint!
219bool PSScavenge::invoke() {
220  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
221  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
222  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
223
224  ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
225  PSAdaptiveSizePolicy* policy = heap->size_policy();
226  IsGCActiveMark mark;
227
228  const bool scavenge_done = PSScavenge::invoke_no_policy();
229  const bool need_full_gc = !scavenge_done ||
230    policy->should_full_GC(heap->old_gen()->free_in_bytes());
231  bool full_gc_done = false;
232
233  if (UsePerfData) {
234    PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
235    const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
236    counters->update_full_follows_scavenge(ffs_val);
237  }
238
239  if (need_full_gc) {
240    GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
241    CollectorPolicy* cp = heap->collector_policy();
242    const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
243
244    if (UseParallelOldGC) {
245      full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
246    } else {
247      full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
248    }
249  }
250
251  return full_gc_done;
252}
253
254// This method contains no policy. You should probably
255// be calling invoke() instead.
256bool PSScavenge::invoke_no_policy() {
257  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
258  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
259
260  assert(_preserved_mark_stack.is_empty(), "should be empty");
261  assert(_preserved_oop_stack.is_empty(), "should be empty");
262
263  _gc_timer.register_gc_start();
264
265  TimeStamp scavenge_entry;
266  TimeStamp scavenge_midpoint;
267  TimeStamp scavenge_exit;
268
269  scavenge_entry.update();
270
271  if (GC_locker::check_active_before_gc()) {
272    return false;
273  }
274
275  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
276  GCCause::Cause gc_cause = heap->gc_cause();
277
278  // Check for potential problems.
279  if (!should_attempt_scavenge()) {
280    return false;
281  }
282
283  GCIdMark gc_id_mark;
284  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
285
286  bool promotion_failure_occurred = false;
287
288  PSYoungGen* young_gen = heap->young_gen();
289  PSOldGen* old_gen = heap->old_gen();
290  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
291
292  heap->increment_total_collections();
293
294  if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
295    // Gather the feedback data for eden occupancy.
296    young_gen->eden_space()->accumulate_statistics();
297  }
298
299  heap->print_heap_before_gc();
300  heap->trace_heap_before_gc(&_gc_tracer);
301
302  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
303  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
304
305  // Fill in TLABs
306  heap->accumulate_statistics_all_tlabs();
307  heap->ensure_parsability(true);  // retire TLABs
308
309  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
310    HandleMark hm;  // Discard invalid handles created during verification
311    Universe::verify("Before GC");
312  }
313
314  {
315    ResourceMark rm;
316    HandleMark hm;
317
318    GCTraceCPUTime tcpu;
319    GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true);
320    TraceCollectorStats tcs(counters());
321    TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
322
323    if (TraceYoungGenTime) accumulated_time()->start();
324
325    // Let the size policy know we're starting
326    size_policy->minor_collection_begin();
327
328    // Verify the object start arrays.
329    if (VerifyObjectStartArray &&
330        VerifyBeforeGC) {
331      old_gen->verify_object_start_array();
332    }
333
334    // Verify no unmarked old->young roots
335    if (VerifyRememberedSets) {
336      CardTableExtension::verify_all_young_refs_imprecise();
337    }
338
339    assert(young_gen->to_space()->is_empty(),
340           "Attempt to scavenge with live objects in to_space");
341    young_gen->to_space()->clear(SpaceDecorator::Mangle);
342
343    save_to_space_top_before_gc();
344
345#if defined(COMPILER2) || INCLUDE_JVMCI
346    DerivedPointerTable::clear();
347#endif
348
349    reference_processor()->enable_discovery();
350    reference_processor()->setup_policy(false);
351
352    PreGCValues pre_gc_values(heap);
353
354    // Reset our survivor overflow.
355    set_survivor_overflow(false);
356
357    // We need to save the old top values before
358    // creating the promotion_manager. We pass the top
359    // values to the card_table, to prevent it from
360    // straying into the promotion labs.
361    HeapWord* old_top = old_gen->object_space()->top();
362
363    // Release all previously held resources
364    gc_task_manager()->release_all_resources();
365
366    // Set the number of GC threads to be used in this collection
367    gc_task_manager()->set_active_gang();
368    gc_task_manager()->task_idle_workers();
369    // Get the active number of workers here and use that value
370    // throughout the methods.
371    uint active_workers = gc_task_manager()->active_workers();
372
373    PSPromotionManager::pre_scavenge();
374
375    // We'll use the promotion manager again later.
376    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
377    {
378      GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
379      ParallelScavengeHeap::ParStrongRootsScope psrs;
380
381      GCTaskQueue* q = GCTaskQueue::create();
382
383      if (!old_gen->object_space()->is_empty()) {
384        // There are only old-to-young pointers if there are objects
385        // in the old gen.
386        uint stripe_total = active_workers;
387        for(uint i=0; i < stripe_total; i++) {
388          q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
389        }
390      }
391
392      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
393      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
394      // We scan the thread roots in parallel
395      Threads::create_thread_roots_tasks(q);
396      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
397      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
398      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
399      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
400      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
401      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
402      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
403
404      ParallelTaskTerminator terminator(
405        active_workers,
406                  (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
407      if (active_workers > 1) {
408        for (uint j = 0; j < active_workers; j++) {
409          q->enqueue(new StealTask(&terminator));
410        }
411      }
412
413      gc_task_manager()->execute_and_wait(q);
414    }
415
416    scavenge_midpoint.update();
417
418    // Process reference objects discovered during scavenge
419    {
420      GCTraceTime(Debug, gc, phases) tm("References", &_gc_timer);
421
422      reference_processor()->setup_policy(false); // not always_clear
423      reference_processor()->set_active_mt_degree(active_workers);
424      PSKeepAliveClosure keep_alive(promotion_manager);
425      PSEvacuateFollowersClosure evac_followers(promotion_manager);
426      ReferenceProcessorStats stats;
427      if (reference_processor()->processing_is_mt()) {
428        PSRefProcTaskExecutor task_executor;
429        stats = reference_processor()->process_discovered_references(
430          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
431          &_gc_timer);
432      } else {
433        stats = reference_processor()->process_discovered_references(
434          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
435      }
436
437      _gc_tracer.report_gc_reference_stats(stats);
438
439      // Enqueue reference objects discovered during scavenge.
440      if (reference_processor()->processing_is_mt()) {
441        PSRefProcTaskExecutor task_executor;
442        reference_processor()->enqueue_discovered_references(&task_executor);
443      } else {
444        reference_processor()->enqueue_discovered_references(NULL);
445      }
446    }
447
448    {
449      GCTraceTime(Debug, gc, phases) tm("StringTable", &_gc_timer);
450      // Unlink any dead interned Strings and process the remaining live ones.
451      PSScavengeRootsClosure root_closure(promotion_manager);
452      StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
453    }
454
455    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
456    promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
457    if (promotion_failure_occurred) {
458      clean_up_failed_promotion();
459      log_info(gc)("Promotion failed");
460    }
461
462    _gc_tracer.report_tenuring_threshold(tenuring_threshold());
463
464    // Let the size policy know we're done.  Note that we count promotion
465    // failure cleanup time as part of the collection (otherwise, we're
466    // implicitly saying it's mutator time).
467    size_policy->minor_collection_end(gc_cause);
468
469    if (!promotion_failure_occurred) {
470      // Swap the survivor spaces.
471      young_gen->eden_space()->clear(SpaceDecorator::Mangle);
472      young_gen->from_space()->clear(SpaceDecorator::Mangle);
473      young_gen->swap_spaces();
474
475      size_t survived = young_gen->from_space()->used_in_bytes();
476      size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
477      size_policy->update_averages(_survivor_overflow, survived, promoted);
478
479      // A successful scavenge should restart the GC time limit count which is
480      // for full GC's.
481      size_policy->reset_gc_overhead_limit_count();
482      if (UseAdaptiveSizePolicy) {
483        // Calculate the new survivor size and tenuring threshold
484
485        log_debug(gc, ergo)("AdaptiveSizeStart:  collection: %d ", heap->total_collections());
486        log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
487                            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
488
489        if (UsePerfData) {
490          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
491          counters->update_old_eden_size(
492            size_policy->calculated_eden_size_in_bytes());
493          counters->update_old_promo_size(
494            size_policy->calculated_promo_size_in_bytes());
495          counters->update_old_capacity(old_gen->capacity_in_bytes());
496          counters->update_young_capacity(young_gen->capacity_in_bytes());
497          counters->update_survived(survived);
498          counters->update_promoted(promoted);
499          counters->update_survivor_overflowed(_survivor_overflow);
500        }
501
502        size_t max_young_size = young_gen->max_size();
503
504        // Deciding a free ratio in the young generation is tricky, so if
505        // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
506        // that the old generation size may have been limited because of them) we
507        // should then limit our young generation size using NewRatio to have it
508        // follow the old generation size.
509        if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
510          max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
511        }
512
513        size_t survivor_limit =
514          size_policy->max_survivor_size(max_young_size);
515        _tenuring_threshold =
516          size_policy->compute_survivor_space_size_and_threshold(
517                                                           _survivor_overflow,
518                                                           _tenuring_threshold,
519                                                           survivor_limit);
520
521       log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")",
522                          size_policy->calculated_survivor_size_in_bytes(),
523                          _tenuring_threshold, MaxTenuringThreshold);
524
525        if (UsePerfData) {
526          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
527          counters->update_tenuring_threshold(_tenuring_threshold);
528          counters->update_survivor_size_counters();
529        }
530
531        // Do call at minor collections?
532        // Don't check if the size_policy is ready at this
533        // level.  Let the size_policy check that internally.
534        if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
535            (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) {
536          // Calculate optimal free space amounts
537          assert(young_gen->max_size() >
538            young_gen->from_space()->capacity_in_bytes() +
539            young_gen->to_space()->capacity_in_bytes(),
540            "Sizes of space in young gen are out-of-bounds");
541
542          size_t young_live = young_gen->used_in_bytes();
543          size_t eden_live = young_gen->eden_space()->used_in_bytes();
544          size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
545          size_t max_old_gen_size = old_gen->max_gen_size();
546          size_t max_eden_size = max_young_size -
547            young_gen->from_space()->capacity_in_bytes() -
548            young_gen->to_space()->capacity_in_bytes();
549
550          // Used for diagnostics
551          size_policy->clear_generation_free_space_flags();
552
553          size_policy->compute_eden_space_size(young_live,
554                                               eden_live,
555                                               cur_eden,
556                                               max_eden_size,
557                                               false /* not full gc*/);
558
559          size_policy->check_gc_overhead_limit(young_live,
560                                               eden_live,
561                                               max_old_gen_size,
562                                               max_eden_size,
563                                               false /* not full gc*/,
564                                               gc_cause,
565                                               heap->collector_policy());
566
567          size_policy->decay_supplemental_growth(false /* not full gc*/);
568        }
569        // Resize the young generation at every collection
570        // even if new sizes have not been calculated.  This is
571        // to allow resizes that may have been inhibited by the
572        // relative location of the "to" and "from" spaces.
573
574        // Resizing the old gen at young collections can cause increases
575        // that don't feed back to the generation sizing policy until
576        // a full collection.  Don't resize the old gen here.
577
578        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
579                        size_policy->calculated_survivor_size_in_bytes());
580
581        log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
582      }
583
584      // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
585      // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
586      // Also update() will case adaptive NUMA chunk resizing.
587      assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
588      young_gen->eden_space()->update();
589
590      heap->gc_policy_counters()->update_counters();
591
592      heap->resize_all_tlabs();
593
594      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
595    }
596
597#if defined(COMPILER2) || INCLUDE_JVMCI
598    DerivedPointerTable::update_pointers();
599#endif
600
601    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
602
603    {
604      GCTraceTime(Debug, gc, phases) tm("Prune Scavenge Root Methods", &_gc_timer);
605
606      CodeCache::prune_scavenge_root_nmethods();
607    }
608
609    // Re-verify object start arrays
610    if (VerifyObjectStartArray &&
611        VerifyAfterGC) {
612      old_gen->verify_object_start_array();
613    }
614
615    // Verify all old -> young cards are now precise
616    if (VerifyRememberedSets) {
617      // Precise verification will give false positives. Until this is fixed,
618      // use imprecise verification.
619      // CardTableExtension::verify_all_young_refs_precise();
620      CardTableExtension::verify_all_young_refs_imprecise();
621    }
622
623    if (TraceYoungGenTime) accumulated_time()->stop();
624
625    young_gen->print_used_change(pre_gc_values.young_gen_used());
626    old_gen->print_used_change(pre_gc_values.old_gen_used());
627    MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
628
629    // Track memory usage and detect low memory
630    MemoryService::track_memory_usage();
631    heap->update_counters();
632
633    gc_task_manager()->release_idle_workers();
634  }
635
636  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
637    HandleMark hm;  // Discard invalid handles created during verification
638    Universe::verify("After GC");
639  }
640
641  heap->print_heap_after_gc();
642  heap->trace_heap_after_gc(&_gc_tracer);
643
644  scavenge_exit.update();
645
646  log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
647                            scavenge_entry.ticks(), scavenge_midpoint.ticks(),
648                            scavenge_exit.ticks());
649  gc_task_manager()->print_task_time_stamps();
650
651#ifdef TRACESPINNING
652  ParallelTaskTerminator::print_termination_counts();
653#endif
654
655  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
656
657  _gc_timer.register_gc_end();
658
659  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
660
661  return !promotion_failure_occurred;
662}
663
664// This method iterates over all objects in the young generation,
665// unforwarding markOops. It then restores any preserved mark oops,
666// and clears the _preserved_mark_stack.
667void PSScavenge::clean_up_failed_promotion() {
668  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
669  PSYoungGen* young_gen = heap->young_gen();
670
671  {
672    ResourceMark rm;
673
674    // Unforward all pointers in the young gen.
675    PSPromotionFailedClosure unforward_closure;
676    young_gen->object_iterate(&unforward_closure);
677
678    log_trace(gc, ergo)("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size());
679
680    // Restore any saved marks.
681    while (!_preserved_oop_stack.is_empty()) {
682      oop obj      = _preserved_oop_stack.pop();
683      markOop mark = _preserved_mark_stack.pop();
684      obj->set_mark(mark);
685    }
686
687    // Clear the preserved mark and oop stack caches.
688    _preserved_mark_stack.clear(true);
689    _preserved_oop_stack.clear(true);
690  }
691
692  // Reset the PromotionFailureALot counters.
693  NOT_PRODUCT(heap->reset_promotion_should_fail();)
694}
695
696// This method is called whenever an attempt to promote an object
697// fails. Some markOops will need preservation, some will not. Note
698// that the entire eden is traversed after a failed promotion, with
699// all forwarded headers replaced by the default markOop. This means
700// it is not necessary to preserve most markOops.
701void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
702  if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
703    // Should use per-worker private stacks here rather than
704    // locking a common pair of stacks.
705    ThreadCritical tc;
706    _preserved_oop_stack.push(obj);
707    _preserved_mark_stack.push(obj_mark);
708  }
709}
710
711bool PSScavenge::should_attempt_scavenge() {
712  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
713  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
714
715  if (UsePerfData) {
716    counters->update_scavenge_skipped(not_skipped);
717  }
718
719  PSYoungGen* young_gen = heap->young_gen();
720  PSOldGen* old_gen = heap->old_gen();
721
722  // Do not attempt to promote unless to_space is empty
723  if (!young_gen->to_space()->is_empty()) {
724    _consecutive_skipped_scavenges++;
725    if (UsePerfData) {
726      counters->update_scavenge_skipped(to_space_not_empty);
727    }
728    return false;
729  }
730
731  // Test to see if the scavenge will likely fail.
732  PSAdaptiveSizePolicy* policy = heap->size_policy();
733
734  // A similar test is done in the policy's should_full_GC().  If this is
735  // changed, decide if that test should also be changed.
736  size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
737  size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
738  bool result = promotion_estimate < old_gen->free_in_bytes();
739
740  log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
741                result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
742                (size_t) policy->padded_average_promoted_in_bytes(),
743                old_gen->free_in_bytes());
744  if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
745    log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
746  }
747
748  if (result) {
749    _consecutive_skipped_scavenges = 0;
750  } else {
751    _consecutive_skipped_scavenges++;
752    if (UsePerfData) {
753      counters->update_scavenge_skipped(promoted_too_large);
754    }
755  }
756  return result;
757}
758
759  // Used to add tasks
760GCTaskManager* const PSScavenge::gc_task_manager() {
761  assert(ParallelScavengeHeap::gc_task_manager() != NULL,
762   "shouldn't return NULL");
763  return ParallelScavengeHeap::gc_task_manager();
764}
765
766void PSScavenge::initialize() {
767  // Arguments must have been parsed
768
769  if (AlwaysTenure || NeverTenure) {
770    assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
771           "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
772    _tenuring_threshold = MaxTenuringThreshold;
773  } else {
774    // We want to smooth out our startup times for the AdaptiveSizePolicy
775    _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
776                                                    MaxTenuringThreshold;
777  }
778
779  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
780  PSYoungGen* young_gen = heap->young_gen();
781  PSOldGen* old_gen = heap->old_gen();
782
783  // Set boundary between young_gen and old_gen
784  assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
785         "old above young");
786  set_young_generation_boundary(young_gen->eden_space()->bottom());
787
788  // Initialize ref handling object for scavenging.
789  MemRegion mr = young_gen->reserved();
790
791  _ref_processor =
792    new ReferenceProcessor(mr,                         // span
793                           ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
794                           ParallelGCThreads,          // mt processing degree
795                           true,                       // mt discovery
796                           ParallelGCThreads,          // mt discovery degree
797                           true,                       // atomic_discovery
798                           NULL);                      // header provides liveness info
799
800  // Cache the cardtable
801  _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
802
803  _counters = new CollectorCounters("PSScavenge", 0);
804}
805