1/* 2 * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "gc/parallel/gcTaskManager.hpp" 27#include "gc/parallel/mutableSpace.hpp" 28#include "gc/parallel/parallelScavengeHeap.hpp" 29#include "gc/parallel/psOldGen.hpp" 30#include "gc/parallel/psPromotionManager.inline.hpp" 31#include "gc/parallel/psScavenge.inline.hpp" 32#include "gc/shared/gcTrace.hpp" 33#include "gc/shared/preservedMarks.inline.hpp" 34#include "gc/shared/taskqueue.inline.hpp" 35#include "logging/log.hpp" 36#include "memory/allocation.inline.hpp" 37#include "memory/memRegion.hpp" 38#include "memory/padded.inline.hpp" 39#include "memory/resourceArea.hpp" 40#include "oops/instanceKlass.inline.hpp" 41#include "oops/instanceMirrorKlass.inline.hpp" 42#include "oops/objArrayKlass.inline.hpp" 43#include "oops/oop.inline.hpp" 44 45PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL; 46OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; 47PreservedMarksSet* PSPromotionManager::_preserved_marks_set = NULL; 48PSOldGen* PSPromotionManager::_old_gen = NULL; 49MutableSpace* PSPromotionManager::_young_space = NULL; 50 51void PSPromotionManager::initialize() { 52 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 53 54 _old_gen = heap->old_gen(); 55 _young_space = heap->young_gen()->to_space(); 56 57 const uint promotion_manager_num = ParallelGCThreads + 1; 58 59 // To prevent false sharing, we pad the PSPromotionManagers 60 // and make sure that the first instance starts at a cache line. 61 assert(_manager_array == NULL, "Attempt to initialize twice"); 62 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num); 63 guarantee(_manager_array != NULL, "Could not initialize promotion manager"); 64 65 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); 66 guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager"); 67 68 // Create and register the PSPromotionManager(s) for the worker threads. 69 for(uint i=0; i<ParallelGCThreads; i++) { 70 stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth()); 71 } 72 // The VMThread gets its own PSPromotionManager, which is not available 73 // for work stealing. 74 75 assert(_preserved_marks_set == NULL, "Attempt to initialize twice"); 76 _preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */); 77 guarantee(_preserved_marks_set != NULL, "Could not initialize preserved marks set"); 78 _preserved_marks_set->init(promotion_manager_num); 79 for (uint i = 0; i < promotion_manager_num; i += 1) { 80 _manager_array[i].register_preserved_marks(_preserved_marks_set->get(i)); 81 } 82} 83 84// Helper functions to get around the circular dependency between 85// psScavenge.inline.hpp and psPromotionManager.inline.hpp. 86bool PSPromotionManager::should_scavenge(oop* p, bool check_to_space) { 87 return PSScavenge::should_scavenge(p, check_to_space); 88} 89bool PSPromotionManager::should_scavenge(narrowOop* p, bool check_to_space) { 90 return PSScavenge::should_scavenge(p, check_to_space); 91} 92 93PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index) { 94 assert(index < ParallelGCThreads, "index out of range"); 95 assert(_manager_array != NULL, "Sanity"); 96 return &_manager_array[index]; 97} 98 99PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() { 100 assert(_manager_array != NULL, "Sanity"); 101 return &_manager_array[ParallelGCThreads]; 102} 103 104void PSPromotionManager::pre_scavenge() { 105 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 106 107 _preserved_marks_set->assert_empty(); 108 _young_space = heap->young_gen()->to_space(); 109 110 for(uint i=0; i<ParallelGCThreads+1; i++) { 111 manager_array(i)->reset(); 112 } 113} 114 115bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) { 116 bool promotion_failure_occurred = false; 117 118 TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); 119 for (uint i = 0; i < ParallelGCThreads + 1; i++) { 120 PSPromotionManager* manager = manager_array(i); 121 assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); 122 if (manager->_promotion_failed_info.has_failed()) { 123 gc_tracer.report_promotion_failed(manager->_promotion_failed_info); 124 promotion_failure_occurred = true; 125 } 126 manager->flush_labs(); 127 } 128 if (!promotion_failure_occurred) { 129 // If there was no promotion failure, the preserved mark stacks 130 // should be empty. 131 _preserved_marks_set->assert_empty(); 132 } 133 return promotion_failure_occurred; 134} 135 136#if TASKQUEUE_STATS 137void 138PSPromotionManager::print_local_stats(outputStream* const out, uint i) const { 139 #define FMT " " SIZE_FORMAT_W(10) 140 out->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals, 141 _arrays_chunked, _array_chunks_processed); 142 #undef FMT 143} 144 145static const char* const pm_stats_hdr[] = { 146 " --------masked------- arrays array", 147 "thr push steal chunked chunks", 148 "--- ---------- ---------- ---------- ----------" 149}; 150 151void 152PSPromotionManager::print_taskqueue_stats() { 153 if (!log_develop_is_enabled(Trace, gc, task, stats)) { 154 return; 155 } 156 Log(gc, task, stats) log; 157 ResourceMark rm; 158 outputStream* out = log.trace_stream(); 159 out->print_cr("== GC Tasks Stats, GC %3d", 160 ParallelScavengeHeap::heap()->total_collections()); 161 162 TaskQueueStats totals; 163 out->print("thr "); TaskQueueStats::print_header(1, out); out->cr(); 164 out->print("--- "); TaskQueueStats::print_header(2, out); out->cr(); 165 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { 166 TaskQueueStats& next = manager_array(i)->_claimed_stack_depth.stats; 167 out->print("%3d ", i); next.print(out); out->cr(); 168 totals += next; 169 } 170 out->print("tot "); totals.print(out); out->cr(); 171 172 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]); 173 for (uint i = 0; i < hlines; ++i) out->print_cr("%s", pm_stats_hdr[i]); 174 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { 175 manager_array(i)->print_local_stats(out, i); 176 } 177} 178 179void 180PSPromotionManager::reset_stats() { 181 claimed_stack_depth()->stats.reset(); 182 _masked_pushes = _masked_steals = 0; 183 _arrays_chunked = _array_chunks_processed = 0; 184} 185#endif // TASKQUEUE_STATS 186 187PSPromotionManager::PSPromotionManager() { 188 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 189 190 // We set the old lab's start array. 191 _old_lab.set_start_array(old_gen()->start_array()); 192 193 uint queue_size; 194 claimed_stack_depth()->initialize(); 195 queue_size = claimed_stack_depth()->max_elems(); 196 197 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); 198 if (_totally_drain) { 199 _target_stack_size = 0; 200 } else { 201 // don't let the target stack size to be more than 1/4 of the entries 202 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize, 203 (uint) (queue_size / 4)); 204 } 205 206 _array_chunk_size = ParGCArrayScanChunk; 207 // let's choose 1.5x the chunk size 208 _min_array_size_for_chunking = 3 * _array_chunk_size / 2; 209 210 _preserved_marks = NULL; 211 212 reset(); 213} 214 215void PSPromotionManager::reset() { 216 assert(stacks_empty(), "reset of non-empty stack"); 217 218 // We need to get an assert in here to make sure the labs are always flushed. 219 220 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 221 222 // Do not prefill the LAB's, save heap wastage! 223 HeapWord* lab_base = young_space()->top(); 224 _young_lab.initialize(MemRegion(lab_base, (size_t)0)); 225 _young_gen_is_full = false; 226 227 lab_base = old_gen()->object_space()->top(); 228 _old_lab.initialize(MemRegion(lab_base, (size_t)0)); 229 _old_gen_is_full = false; 230 231 _promotion_failed_info.reset(); 232 233 TASKQUEUE_STATS_ONLY(reset_stats()); 234} 235 236void PSPromotionManager::register_preserved_marks(PreservedMarks* preserved_marks) { 237 assert(_preserved_marks == NULL, "do not set it twice"); 238 _preserved_marks = preserved_marks; 239} 240 241class ParRestoreGCTask : public GCTask { 242private: 243 const uint _id; 244 PreservedMarksSet* const _preserved_marks_set; 245 volatile size_t* const _total_size_addr; 246 247public: 248 virtual char* name() { 249 return (char*) "preserved mark restoration task"; 250 } 251 252 virtual void do_it(GCTaskManager* manager, uint which){ 253 _preserved_marks_set->get(_id)->restore_and_increment(_total_size_addr); 254 } 255 256 ParRestoreGCTask(uint id, 257 PreservedMarksSet* preserved_marks_set, 258 volatile size_t* total_size_addr) 259 : _id(id), 260 _preserved_marks_set(preserved_marks_set), 261 _total_size_addr(total_size_addr) { } 262}; 263 264class PSRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor { 265private: 266 GCTaskManager* _gc_task_manager; 267 268public: 269 PSRestorePreservedMarksTaskExecutor(GCTaskManager* gc_task_manager) 270 : _gc_task_manager(gc_task_manager) { } 271 272 void restore(PreservedMarksSet* preserved_marks_set, 273 volatile size_t* total_size_addr) { 274 // GCTask / GCTaskQueue are ResourceObjs 275 ResourceMark rm; 276 277 GCTaskQueue* q = GCTaskQueue::create(); 278 for (uint i = 0; i < preserved_marks_set->num(); i += 1) { 279 q->enqueue(new ParRestoreGCTask(i, preserved_marks_set, total_size_addr)); 280 } 281 _gc_task_manager->execute_and_wait(q); 282 } 283}; 284 285void PSPromotionManager::restore_preserved_marks() { 286 PSRestorePreservedMarksTaskExecutor task_executor(PSScavenge::gc_task_manager()); 287 _preserved_marks_set->restore(&task_executor); 288} 289 290void PSPromotionManager::drain_stacks_depth(bool totally_drain) { 291 totally_drain = totally_drain || _totally_drain; 292 293#ifdef ASSERT 294 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 295 MutableSpace* to_space = heap->young_gen()->to_space(); 296 MutableSpace* old_space = heap->old_gen()->object_space(); 297#endif /* ASSERT */ 298 299 OopStarTaskQueue* const tq = claimed_stack_depth(); 300 do { 301 StarTask p; 302 303 // Drain overflow stack first, so other threads can steal from 304 // claimed stack while we work. 305 while (tq->pop_overflow(p)) { 306 process_popped_location_depth(p); 307 } 308 309 if (totally_drain) { 310 while (tq->pop_local(p)) { 311 process_popped_location_depth(p); 312 } 313 } else { 314 while (tq->size() > _target_stack_size && tq->pop_local(p)) { 315 process_popped_location_depth(p); 316 } 317 } 318 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); 319 320 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); 321 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); 322 assert(tq->overflow_empty(), "Sanity"); 323} 324 325void PSPromotionManager::flush_labs() { 326 assert(stacks_empty(), "Attempt to flush lab with live stack"); 327 328 // If either promotion lab fills up, we can flush the 329 // lab but not refill it, so check first. 330 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity"); 331 if (!_young_lab.is_flushed()) 332 _young_lab.flush(); 333 334 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity"); 335 if (!_old_lab.is_flushed()) 336 _old_lab.flush(); 337 338 // Let PSScavenge know if we overflowed 339 if (_young_gen_is_full) { 340 PSScavenge::set_survivor_overflow(true); 341 } 342} 343 344template <class T> void PSPromotionManager::process_array_chunk_work( 345 oop obj, 346 int start, int end) { 347 assert(start <= end, "invariant"); 348 T* const base = (T*)objArrayOop(obj)->base(); 349 T* p = base + start; 350 T* const chunk_end = base + end; 351 while (p < chunk_end) { 352 if (PSScavenge::should_scavenge(p)) { 353 claim_or_forward_depth(p); 354 } 355 ++p; 356 } 357} 358 359void PSPromotionManager::process_array_chunk(oop old) { 360 assert(PSChunkLargeArrays, "invariant"); 361 assert(old->is_objArray(), "invariant"); 362 assert(old->is_forwarded(), "invariant"); 363 364 TASKQUEUE_STATS_ONLY(++_array_chunks_processed); 365 366 oop const obj = old->forwardee(); 367 368 int start; 369 int const end = arrayOop(old)->length(); 370 if (end > (int) _min_array_size_for_chunking) { 371 // we'll chunk more 372 start = end - _array_chunk_size; 373 assert(start > 0, "invariant"); 374 arrayOop(old)->set_length(start); 375 push_depth(mask_chunked_array_oop(old)); 376 TASKQUEUE_STATS_ONLY(++_masked_pushes); 377 } else { 378 // this is the final chunk for this array 379 start = 0; 380 int const actual_length = arrayOop(obj)->length(); 381 arrayOop(old)->set_length(actual_length); 382 } 383 384 if (UseCompressedOops) { 385 process_array_chunk_work<narrowOop>(obj, start, end); 386 } else { 387 process_array_chunk_work<oop>(obj, start, end); 388 } 389} 390 391class PushContentsClosure : public ExtendedOopClosure { 392 PSPromotionManager* _pm; 393 public: 394 PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {} 395 396 template <typename T> void do_oop_nv(T* p) { 397 if (PSScavenge::should_scavenge(p)) { 398 _pm->claim_or_forward_depth(p); 399 } 400 } 401 402 virtual void do_oop(oop* p) { do_oop_nv(p); } 403 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 404 405 // Don't use the oop verification code in the oop_oop_iterate framework. 406 debug_only(virtual bool should_verify_oops() { return false; }) 407}; 408 409void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 410 PushContentsClosure cl(pm); 411 oop_oop_iterate_oop_maps_reverse<true>(obj, &cl); 412} 413 414void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 415 // Note that we don't have to follow the mirror -> klass pointer, since all 416 // klasses that are dirty will be scavenged when we iterate over the 417 // ClassLoaderData objects. 418 419 InstanceKlass::oop_ps_push_contents(obj, pm); 420 421 PushContentsClosure cl(pm); 422 oop_oop_iterate_statics<true>(obj, &cl); 423} 424 425void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 426 InstanceKlass::oop_ps_push_contents(obj, pm); 427 428 // This is called by the young collector. It will already have taken care of 429 // all class loader data. So, we don't have to follow the class loader -> 430 // class loader data link. 431} 432 433template <class T> 434static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) { 435 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); 436 if (PSScavenge::should_scavenge(referent_addr)) { 437 ReferenceProcessor* rp = PSScavenge::reference_processor(); 438 if (rp->discover_reference(obj, klass->reference_type())) { 439 // reference already enqueued, referent and next will be traversed later 440 klass->InstanceKlass::oop_ps_push_contents(obj, pm); 441 return; 442 } else { 443 // treat referent as normal oop 444 pm->claim_or_forward_depth(referent_addr); 445 } 446 } 447 // Treat discovered as normal oop, if ref is not "active", 448 // i.e. if next is non-NULL. 449 T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); 450 T next_oop = oopDesc::load_heap_oop(next_addr); 451 if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" 452 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); 453 log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr)); 454 if (PSScavenge::should_scavenge(discovered_addr)) { 455 pm->claim_or_forward_depth(discovered_addr); 456 } 457 } 458 // Treat next as normal oop; next is a link in the reference queue. 459 if (PSScavenge::should_scavenge(next_addr)) { 460 pm->claim_or_forward_depth(next_addr); 461 } 462 klass->InstanceKlass::oop_ps_push_contents(obj, pm); 463} 464 465void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 466 if (UseCompressedOops) { 467 oop_ps_push_contents_specialized<narrowOop>(obj, this, pm); 468 } else { 469 oop_ps_push_contents_specialized<oop>(obj, this, pm); 470 } 471} 472 473void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 474 assert(obj->is_objArray(), "obj must be obj array"); 475 PushContentsClosure cl(pm); 476 oop_oop_iterate_elements<true>(objArrayOop(obj), &cl); 477} 478 479void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 480 assert(obj->is_typeArray(),"must be a type array"); 481 ShouldNotReachHere(); 482} 483 484oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) { 485 assert(_old_gen_is_full || PromotionFailureALot, "Sanity"); 486 487 // Attempt to CAS in the header. 488 // This tests if the header is still the same as when 489 // this started. If it is the same (i.e., no forwarding 490 // pointer has been installed), then this thread owns 491 // it. 492 if (obj->cas_forward_to(obj, obj_mark)) { 493 // We won any races, we "own" this object. 494 assert(obj == obj->forwardee(), "Sanity"); 495 496 _promotion_failed_info.register_copy_failure(obj->size()); 497 498 push_contents(obj); 499 500 _preserved_marks->push_if_necessary(obj, obj_mark); 501 } else { 502 // We lost, someone else "owns" this object 503 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed."); 504 505 // No unallocation to worry about. 506 obj = obj->forwardee(); 507 } 508 509 log_develop_trace(gc, scavenge)("{promotion-failure %s " PTR_FORMAT " (%d)}", obj->klass()->internal_name(), p2i(obj), obj->size()); 510 511 return obj; 512} 513