1/* 2 * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "classfile/stringTable.hpp" 27#include "code/codeCache.hpp" 28#include "gc/parallel/cardTableExtension.hpp" 29#include "gc/parallel/gcTaskManager.hpp" 30#include "gc/parallel/parallelScavengeHeap.hpp" 31#include "gc/parallel/psAdaptiveSizePolicy.hpp" 32#include "gc/parallel/psMarkSweep.hpp" 33#include "gc/parallel/psParallelCompact.inline.hpp" 34#include "gc/parallel/psScavenge.inline.hpp" 35#include "gc/parallel/psTasks.hpp" 36#include "gc/shared/collectorPolicy.hpp" 37#include "gc/shared/gcCause.hpp" 38#include "gc/shared/gcHeapSummary.hpp" 39#include "gc/shared/gcId.hpp" 40#include "gc/shared/gcLocker.inline.hpp" 41#include "gc/shared/gcTimer.hpp" 42#include "gc/shared/gcTrace.hpp" 43#include "gc/shared/gcTraceTime.inline.hpp" 44#include "gc/shared/isGCActiveMark.hpp" 45#include "gc/shared/referencePolicy.hpp" 46#include "gc/shared/referenceProcessor.hpp" 47#include "gc/shared/spaceDecorator.hpp" 48#include "memory/resourceArea.hpp" 49#include "logging/log.hpp" 50#include "oops/oop.inline.hpp" 51#include "runtime/biasedLocking.hpp" 52#include "runtime/handles.inline.hpp" 53#include "runtime/threadCritical.hpp" 54#include "runtime/vmThread.hpp" 55#include "runtime/vm_operations.hpp" 56#include "services/memoryService.hpp" 57#include "utilities/stack.inline.hpp" 58 59HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 60int PSScavenge::_consecutive_skipped_scavenges = 0; 61ReferenceProcessor* PSScavenge::_ref_processor = NULL; 62CardTableExtension* PSScavenge::_card_table = NULL; 63bool PSScavenge::_survivor_overflow = false; 64uint PSScavenge::_tenuring_threshold = 0; 65HeapWord* PSScavenge::_young_generation_boundary = NULL; 66uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 67elapsedTimer PSScavenge::_accumulated_time; 68STWGCTimer PSScavenge::_gc_timer; 69ParallelScavengeTracer PSScavenge::_gc_tracer; 70CollectorCounters* PSScavenge::_counters = NULL; 71 72// Define before use 73class PSIsAliveClosure: public BoolObjectClosure { 74public: 75 bool do_object_b(oop p) { 76 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 77 } 78}; 79 80PSIsAliveClosure PSScavenge::_is_alive_closure; 81 82class PSKeepAliveClosure: public OopClosure { 83protected: 84 MutableSpace* _to_space; 85 PSPromotionManager* _promotion_manager; 86 87public: 88 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 89 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 90 _to_space = heap->young_gen()->to_space(); 91 92 assert(_promotion_manager != NULL, "Sanity"); 93 } 94 95 template <class T> void do_oop_work(T* p) { 96 assert (!oopDesc::is_null(*p), "expected non-null ref"); 97 assert (oopDesc::is_oop(oopDesc::load_decode_heap_oop_not_null(p)), 98 "expected an oop while scanning weak refs"); 99 100 // Weak refs may be visited more than once. 101 if (PSScavenge::should_scavenge(p, _to_space)) { 102 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p); 103 } 104 } 105 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 106 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 107}; 108 109class PSEvacuateFollowersClosure: public VoidClosure { 110 private: 111 PSPromotionManager* _promotion_manager; 112 public: 113 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 114 115 virtual void do_void() { 116 assert(_promotion_manager != NULL, "Sanity"); 117 _promotion_manager->drain_stacks(true); 118 guarantee(_promotion_manager->stacks_empty(), 119 "stacks should be empty at this point"); 120 } 121}; 122 123class PSRefProcTaskProxy: public GCTask { 124 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 125 ProcessTask & _rp_task; 126 uint _work_id; 127public: 128 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 129 : _rp_task(rp_task), 130 _work_id(work_id) 131 { } 132 133private: 134 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 135 virtual void do_it(GCTaskManager* manager, uint which); 136}; 137 138void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 139{ 140 PSPromotionManager* promotion_manager = 141 PSPromotionManager::gc_thread_promotion_manager(which); 142 assert(promotion_manager != NULL, "sanity check"); 143 PSKeepAliveClosure keep_alive(promotion_manager); 144 PSEvacuateFollowersClosure evac_followers(promotion_manager); 145 PSIsAliveClosure is_alive; 146 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 147} 148 149class PSRefEnqueueTaskProxy: public GCTask { 150 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 151 EnqueueTask& _enq_task; 152 uint _work_id; 153 154public: 155 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 156 : _enq_task(enq_task), 157 _work_id(work_id) 158 { } 159 160 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 161 virtual void do_it(GCTaskManager* manager, uint which) 162 { 163 _enq_task.work(_work_id); 164 } 165}; 166 167class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 168 virtual void execute(ProcessTask& task); 169 virtual void execute(EnqueueTask& task); 170}; 171 172void PSRefProcTaskExecutor::execute(ProcessTask& task) 173{ 174 GCTaskQueue* q = GCTaskQueue::create(); 175 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 176 for(uint i=0; i < manager->active_workers(); i++) { 177 q->enqueue(new PSRefProcTaskProxy(task, i)); 178 } 179 ParallelTaskTerminator terminator(manager->active_workers(), 180 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 181 if (task.marks_oops_alive() && manager->active_workers() > 1) { 182 for (uint j = 0; j < manager->active_workers(); j++) { 183 q->enqueue(new StealTask(&terminator)); 184 } 185 } 186 manager->execute_and_wait(q); 187} 188 189 190void PSRefProcTaskExecutor::execute(EnqueueTask& task) 191{ 192 GCTaskQueue* q = GCTaskQueue::create(); 193 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 194 for(uint i=0; i < manager->active_workers(); i++) { 195 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 196 } 197 manager->execute_and_wait(q); 198} 199 200// This method contains all heap specific policy for invoking scavenge. 201// PSScavenge::invoke_no_policy() will do nothing but attempt to 202// scavenge. It will not clean up after failed promotions, bail out if 203// we've exceeded policy time limits, or any other special behavior. 204// All such policy should be placed here. 205// 206// Note that this method should only be called from the vm_thread while 207// at a safepoint! 208bool PSScavenge::invoke() { 209 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 210 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 211 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 212 213 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap(); 214 PSAdaptiveSizePolicy* policy = heap->size_policy(); 215 IsGCActiveMark mark; 216 217 const bool scavenge_done = PSScavenge::invoke_no_policy(); 218 const bool need_full_gc = !scavenge_done || 219 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 220 bool full_gc_done = false; 221 222 if (UsePerfData) { 223 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 224 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 225 counters->update_full_follows_scavenge(ffs_val); 226 } 227 228 if (need_full_gc) { 229 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 230 CollectorPolicy* cp = heap->collector_policy(); 231 const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); 232 233 if (UseParallelOldGC) { 234 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 235 } else { 236 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); 237 } 238 } 239 240 return full_gc_done; 241} 242 243// This method contains no policy. You should probably 244// be calling invoke() instead. 245bool PSScavenge::invoke_no_policy() { 246 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 247 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 248 249 _gc_timer.register_gc_start(); 250 251 TimeStamp scavenge_entry; 252 TimeStamp scavenge_midpoint; 253 TimeStamp scavenge_exit; 254 255 scavenge_entry.update(); 256 257 if (GCLocker::check_active_before_gc()) { 258 return false; 259 } 260 261 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 262 GCCause::Cause gc_cause = heap->gc_cause(); 263 264 // Check for potential problems. 265 if (!should_attempt_scavenge()) { 266 return false; 267 } 268 269 GCIdMark gc_id_mark; 270 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 271 272 bool promotion_failure_occurred = false; 273 274 PSYoungGen* young_gen = heap->young_gen(); 275 PSOldGen* old_gen = heap->old_gen(); 276 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 277 278 heap->increment_total_collections(); 279 280 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 281 // Gather the feedback data for eden occupancy. 282 young_gen->eden_space()->accumulate_statistics(); 283 } 284 285 heap->print_heap_before_gc(); 286 heap->trace_heap_before_gc(&_gc_tracer); 287 288 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 289 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 290 291 // Fill in TLABs 292 heap->accumulate_statistics_all_tlabs(); 293 heap->ensure_parsability(true); // retire TLABs 294 295 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 296 HandleMark hm; // Discard invalid handles created during verification 297 Universe::verify("Before GC"); 298 } 299 300 { 301 ResourceMark rm; 302 HandleMark hm; 303 304 GCTraceCPUTime tcpu; 305 GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true); 306 TraceCollectorStats tcs(counters()); 307 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); 308 309 if (TraceYoungGenTime) accumulated_time()->start(); 310 311 // Let the size policy know we're starting 312 size_policy->minor_collection_begin(); 313 314 // Verify the object start arrays. 315 if (VerifyObjectStartArray && 316 VerifyBeforeGC) { 317 old_gen->verify_object_start_array(); 318 } 319 320 // Verify no unmarked old->young roots 321 if (VerifyRememberedSets) { 322 CardTableExtension::verify_all_young_refs_imprecise(); 323 } 324 325 assert(young_gen->to_space()->is_empty(), 326 "Attempt to scavenge with live objects in to_space"); 327 young_gen->to_space()->clear(SpaceDecorator::Mangle); 328 329 save_to_space_top_before_gc(); 330 331#if defined(COMPILER2) || INCLUDE_JVMCI 332 DerivedPointerTable::clear(); 333#endif 334 335 reference_processor()->enable_discovery(); 336 reference_processor()->setup_policy(false); 337 338 PreGCValues pre_gc_values(heap); 339 340 // Reset our survivor overflow. 341 set_survivor_overflow(false); 342 343 // We need to save the old top values before 344 // creating the promotion_manager. We pass the top 345 // values to the card_table, to prevent it from 346 // straying into the promotion labs. 347 HeapWord* old_top = old_gen->object_space()->top(); 348 349 // Release all previously held resources 350 gc_task_manager()->release_all_resources(); 351 352 // Set the number of GC threads to be used in this collection 353 gc_task_manager()->set_active_gang(); 354 gc_task_manager()->task_idle_workers(); 355 // Get the active number of workers here and use that value 356 // throughout the methods. 357 uint active_workers = gc_task_manager()->active_workers(); 358 359 PSPromotionManager::pre_scavenge(); 360 361 // We'll use the promotion manager again later. 362 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 363 { 364 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer); 365 ParallelScavengeHeap::ParStrongRootsScope psrs; 366 367 GCTaskQueue* q = GCTaskQueue::create(); 368 369 if (!old_gen->object_space()->is_empty()) { 370 // There are only old-to-young pointers if there are objects 371 // in the old gen. 372 uint stripe_total = active_workers; 373 for(uint i=0; i < stripe_total; i++) { 374 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); 375 } 376 } 377 378 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 379 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 380 // We scan the thread roots in parallel 381 Threads::create_thread_roots_tasks(q); 382 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 383 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 384 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 385 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); 386 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 387 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 388 389 ParallelTaskTerminator terminator( 390 active_workers, 391 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); 392 // If active_workers can exceed 1, add a StrealTask. 393 // PSPromotionManager::drain_stacks_depth() does not fully drain its 394 // stacks and expects a StealTask to complete the draining if 395 // ParallelGCThreads is > 1. 396 if (gc_task_manager()->workers() > 1) { 397 for (uint j = 0; j < active_workers; j++) { 398 q->enqueue(new StealTask(&terminator)); 399 } 400 } 401 402 gc_task_manager()->execute_and_wait(q); 403 } 404 405 scavenge_midpoint.update(); 406 407 // Process reference objects discovered during scavenge 408 { 409 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 410 411 reference_processor()->setup_policy(false); // not always_clear 412 reference_processor()->set_active_mt_degree(active_workers); 413 PSKeepAliveClosure keep_alive(promotion_manager); 414 PSEvacuateFollowersClosure evac_followers(promotion_manager); 415 ReferenceProcessorStats stats; 416 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q()); 417 if (reference_processor()->processing_is_mt()) { 418 PSRefProcTaskExecutor task_executor; 419 stats = reference_processor()->process_discovered_references( 420 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 421 &pt); 422 } else { 423 stats = reference_processor()->process_discovered_references( 424 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &pt); 425 } 426 427 _gc_tracer.report_gc_reference_stats(stats); 428 pt.print_all_references(); 429 430 // Enqueue reference objects discovered during scavenge. 431 if (reference_processor()->processing_is_mt()) { 432 PSRefProcTaskExecutor task_executor; 433 reference_processor()->enqueue_discovered_references(&task_executor, &pt); 434 } else { 435 reference_processor()->enqueue_discovered_references(NULL, &pt); 436 } 437 438 pt.print_enqueue_phase(); 439 } 440 441 { 442 GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer); 443 // Unlink any dead interned Strings and process the remaining live ones. 444 PSScavengeRootsClosure root_closure(promotion_manager); 445 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); 446 } 447 448 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 449 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 450 if (promotion_failure_occurred) { 451 clean_up_failed_promotion(); 452 log_info(gc, promotion)("Promotion failed"); 453 } 454 455 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 456 457 // Let the size policy know we're done. Note that we count promotion 458 // failure cleanup time as part of the collection (otherwise, we're 459 // implicitly saying it's mutator time). 460 size_policy->minor_collection_end(gc_cause); 461 462 if (!promotion_failure_occurred) { 463 // Swap the survivor spaces. 464 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 465 young_gen->from_space()->clear(SpaceDecorator::Mangle); 466 young_gen->swap_spaces(); 467 468 size_t survived = young_gen->from_space()->used_in_bytes(); 469 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used(); 470 size_policy->update_averages(_survivor_overflow, survived, promoted); 471 472 // A successful scavenge should restart the GC time limit count which is 473 // for full GC's. 474 size_policy->reset_gc_overhead_limit_count(); 475 if (UseAdaptiveSizePolicy) { 476 // Calculate the new survivor size and tenuring threshold 477 478 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 479 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, 480 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 481 482 if (UsePerfData) { 483 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 484 counters->update_old_eden_size( 485 size_policy->calculated_eden_size_in_bytes()); 486 counters->update_old_promo_size( 487 size_policy->calculated_promo_size_in_bytes()); 488 counters->update_old_capacity(old_gen->capacity_in_bytes()); 489 counters->update_young_capacity(young_gen->capacity_in_bytes()); 490 counters->update_survived(survived); 491 counters->update_promoted(promoted); 492 counters->update_survivor_overflowed(_survivor_overflow); 493 } 494 495 size_t max_young_size = young_gen->max_size(); 496 497 // Deciding a free ratio in the young generation is tricky, so if 498 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 499 // that the old generation size may have been limited because of them) we 500 // should then limit our young generation size using NewRatio to have it 501 // follow the old generation size. 502 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 503 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); 504 } 505 506 size_t survivor_limit = 507 size_policy->max_survivor_size(max_young_size); 508 _tenuring_threshold = 509 size_policy->compute_survivor_space_size_and_threshold( 510 _survivor_overflow, 511 _tenuring_threshold, 512 survivor_limit); 513 514 log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")", 515 size_policy->calculated_survivor_size_in_bytes(), 516 _tenuring_threshold, MaxTenuringThreshold); 517 518 if (UsePerfData) { 519 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 520 counters->update_tenuring_threshold(_tenuring_threshold); 521 counters->update_survivor_size_counters(); 522 } 523 524 // Do call at minor collections? 525 // Don't check if the size_policy is ready at this 526 // level. Let the size_policy check that internally. 527 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 528 (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) { 529 // Calculate optimal free space amounts 530 assert(young_gen->max_size() > 531 young_gen->from_space()->capacity_in_bytes() + 532 young_gen->to_space()->capacity_in_bytes(), 533 "Sizes of space in young gen are out-of-bounds"); 534 535 size_t young_live = young_gen->used_in_bytes(); 536 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 537 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 538 size_t max_old_gen_size = old_gen->max_gen_size(); 539 size_t max_eden_size = max_young_size - 540 young_gen->from_space()->capacity_in_bytes() - 541 young_gen->to_space()->capacity_in_bytes(); 542 543 // Used for diagnostics 544 size_policy->clear_generation_free_space_flags(); 545 546 size_policy->compute_eden_space_size(young_live, 547 eden_live, 548 cur_eden, 549 max_eden_size, 550 false /* not full gc*/); 551 552 size_policy->check_gc_overhead_limit(young_live, 553 eden_live, 554 max_old_gen_size, 555 max_eden_size, 556 false /* not full gc*/, 557 gc_cause, 558 heap->collector_policy()); 559 560 size_policy->decay_supplemental_growth(false /* not full gc*/); 561 } 562 // Resize the young generation at every collection 563 // even if new sizes have not been calculated. This is 564 // to allow resizes that may have been inhibited by the 565 // relative location of the "to" and "from" spaces. 566 567 // Resizing the old gen at young collections can cause increases 568 // that don't feed back to the generation sizing policy until 569 // a full collection. Don't resize the old gen here. 570 571 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 572 size_policy->calculated_survivor_size_in_bytes()); 573 574 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 575 } 576 577 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 578 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 579 // Also update() will case adaptive NUMA chunk resizing. 580 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 581 young_gen->eden_space()->update(); 582 583 heap->gc_policy_counters()->update_counters(); 584 585 heap->resize_all_tlabs(); 586 587 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 588 } 589 590#if defined(COMPILER2) || INCLUDE_JVMCI 591 DerivedPointerTable::update_pointers(); 592#endif 593 594 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 595 596 // Re-verify object start arrays 597 if (VerifyObjectStartArray && 598 VerifyAfterGC) { 599 old_gen->verify_object_start_array(); 600 } 601 602 // Verify all old -> young cards are now precise 603 if (VerifyRememberedSets) { 604 // Precise verification will give false positives. Until this is fixed, 605 // use imprecise verification. 606 // CardTableExtension::verify_all_young_refs_precise(); 607 CardTableExtension::verify_all_young_refs_imprecise(); 608 } 609 610 if (TraceYoungGenTime) accumulated_time()->stop(); 611 612 young_gen->print_used_change(pre_gc_values.young_gen_used()); 613 old_gen->print_used_change(pre_gc_values.old_gen_used()); 614 MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used()); 615 616 // Track memory usage and detect low memory 617 MemoryService::track_memory_usage(); 618 heap->update_counters(); 619 620 gc_task_manager()->release_idle_workers(); 621 } 622 623 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 624 HandleMark hm; // Discard invalid handles created during verification 625 Universe::verify("After GC"); 626 } 627 628 heap->print_heap_after_gc(); 629 heap->trace_heap_after_gc(&_gc_tracer); 630 631 scavenge_exit.update(); 632 633 log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT, 634 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 635 scavenge_exit.ticks()); 636 gc_task_manager()->print_task_time_stamps(); 637 638#ifdef TRACESPINNING 639 ParallelTaskTerminator::print_termination_counts(); 640#endif 641 642 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 643 644 _gc_timer.register_gc_end(); 645 646 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 647 648 return !promotion_failure_occurred; 649} 650 651// This method iterates over all objects in the young generation, 652// removing all forwarding references. It then restores any preserved marks. 653void PSScavenge::clean_up_failed_promotion() { 654 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 655 PSYoungGen* young_gen = heap->young_gen(); 656 657 RemoveForwardedPointerClosure remove_fwd_ptr_closure; 658 young_gen->object_iterate(&remove_fwd_ptr_closure); 659 660 PSPromotionManager::restore_preserved_marks(); 661 662 // Reset the PromotionFailureALot counters. 663 NOT_PRODUCT(heap->reset_promotion_should_fail();) 664} 665 666bool PSScavenge::should_attempt_scavenge() { 667 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 668 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 669 670 if (UsePerfData) { 671 counters->update_scavenge_skipped(not_skipped); 672 } 673 674 PSYoungGen* young_gen = heap->young_gen(); 675 PSOldGen* old_gen = heap->old_gen(); 676 677 // Do not attempt to promote unless to_space is empty 678 if (!young_gen->to_space()->is_empty()) { 679 _consecutive_skipped_scavenges++; 680 if (UsePerfData) { 681 counters->update_scavenge_skipped(to_space_not_empty); 682 } 683 return false; 684 } 685 686 // Test to see if the scavenge will likely fail. 687 PSAdaptiveSizePolicy* policy = heap->size_policy(); 688 689 // A similar test is done in the policy's should_full_GC(). If this is 690 // changed, decide if that test should also be changed. 691 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 692 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 693 bool result = promotion_estimate < old_gen->free_in_bytes(); 694 695 log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT, 696 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), 697 (size_t) policy->padded_average_promoted_in_bytes(), 698 old_gen->free_in_bytes()); 699 if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) { 700 log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 701 } 702 703 if (result) { 704 _consecutive_skipped_scavenges = 0; 705 } else { 706 _consecutive_skipped_scavenges++; 707 if (UsePerfData) { 708 counters->update_scavenge_skipped(promoted_too_large); 709 } 710 } 711 return result; 712} 713 714 // Used to add tasks 715GCTaskManager* const PSScavenge::gc_task_manager() { 716 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 717 "shouldn't return NULL"); 718 return ParallelScavengeHeap::gc_task_manager(); 719} 720 721// Adaptive size policy support. When the young generation/old generation 722// boundary moves, _young_generation_boundary must be reset 723void PSScavenge::set_young_generation_boundary(HeapWord* v) { 724 _young_generation_boundary = v; 725 if (UseCompressedOops) { 726 _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v); 727 } 728} 729 730void PSScavenge::initialize() { 731 // Arguments must have been parsed 732 733 if (AlwaysTenure || NeverTenure) { 734 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1, 735 "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold); 736 _tenuring_threshold = MaxTenuringThreshold; 737 } else { 738 // We want to smooth out our startup times for the AdaptiveSizePolicy 739 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 740 MaxTenuringThreshold; 741 } 742 743 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 744 PSYoungGen* young_gen = heap->young_gen(); 745 PSOldGen* old_gen = heap->old_gen(); 746 747 // Set boundary between young_gen and old_gen 748 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 749 "old above young"); 750 set_young_generation_boundary(young_gen->eden_space()->bottom()); 751 752 // Initialize ref handling object for scavenging. 753 MemRegion mr = young_gen->reserved(); 754 755 _ref_processor = 756 new ReferenceProcessor(mr, // span 757 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 758 ParallelGCThreads, // mt processing degree 759 true, // mt discovery 760 ParallelGCThreads, // mt discovery degree 761 true, // atomic_discovery 762 NULL); // header provides liveness info 763 764 // Cache the cardtable 765 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set()); 766 767 _counters = new CollectorCounters("PSScavenge", 0); 768} 769