parNewGeneration.cpp revision 10979:89883d363528
1/* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "gc/cms/compactibleFreeListSpace.hpp" 27#include "gc/cms/concurrentMarkSweepGeneration.hpp" 28#include "gc/cms/parNewGeneration.inline.hpp" 29#include "gc/cms/parOopClosures.inline.hpp" 30#include "gc/serial/defNewGeneration.inline.hpp" 31#include "gc/shared/adaptiveSizePolicy.hpp" 32#include "gc/shared/ageTable.inline.hpp" 33#include "gc/shared/copyFailedInfo.hpp" 34#include "gc/shared/gcHeapSummary.hpp" 35#include "gc/shared/gcTimer.hpp" 36#include "gc/shared/gcTrace.hpp" 37#include "gc/shared/gcTraceTime.inline.hpp" 38#include "gc/shared/genCollectedHeap.hpp" 39#include "gc/shared/genOopClosures.inline.hpp" 40#include "gc/shared/generation.hpp" 41#include "gc/shared/plab.inline.hpp" 42#include "gc/shared/preservedMarks.inline.hpp" 43#include "gc/shared/referencePolicy.hpp" 44#include "gc/shared/space.hpp" 45#include "gc/shared/spaceDecorator.hpp" 46#include "gc/shared/strongRootsScope.hpp" 47#include "gc/shared/taskqueue.inline.hpp" 48#include "gc/shared/workgroup.hpp" 49#include "logging/log.hpp" 50#include "memory/resourceArea.hpp" 51#include "oops/objArrayOop.hpp" 52#include "oops/oop.inline.hpp" 53#include "runtime/atomic.inline.hpp" 54#include "runtime/handles.hpp" 55#include "runtime/handles.inline.hpp" 56#include "runtime/java.hpp" 57#include "runtime/thread.inline.hpp" 58#include "utilities/copy.hpp" 59#include "utilities/globalDefinitions.hpp" 60#include "utilities/stack.inline.hpp" 61 62ParScanThreadState::ParScanThreadState(Space* to_space_, 63 ParNewGeneration* young_gen_, 64 Generation* old_gen_, 65 int thread_num_, 66 ObjToScanQueueSet* work_queue_set_, 67 Stack<oop, mtGC>* overflow_stacks_, 68 PreservedMarks* preserved_marks_, 69 size_t desired_plab_sz_, 70 ParallelTaskTerminator& term_) : 71 _to_space(to_space_), 72 _old_gen(old_gen_), 73 _young_gen(young_gen_), 74 _thread_num(thread_num_), 75 _work_queue(work_queue_set_->queue(thread_num_)), 76 _to_space_full(false), 77 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 78 _preserved_marks(preserved_marks_), 79 _ageTable(false), // false ==> not the global age table, no perf data. 80 _to_space_alloc_buffer(desired_plab_sz_), 81 _to_space_closure(young_gen_, this), 82 _old_gen_closure(young_gen_, this), 83 _to_space_root_closure(young_gen_, this), 84 _old_gen_root_closure(young_gen_, this), 85 _older_gen_closure(young_gen_, this), 86 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 87 &_to_space_root_closure, young_gen_, &_old_gen_root_closure, 88 work_queue_set_, &term_), 89 _is_alive_closure(young_gen_), 90 _scan_weak_ref_closure(young_gen_, this), 91 _keep_alive_closure(&_scan_weak_ref_closure), 92 _strong_roots_time(0.0), 93 _term_time(0.0) 94{ 95 #if TASKQUEUE_STATS 96 _term_attempts = 0; 97 _overflow_refills = 0; 98 _overflow_refill_objs = 0; 99 #endif // TASKQUEUE_STATS 100 101 _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 102 _hash_seed = 17; // Might want to take time-based random value. 103 _start = os::elapsedTime(); 104 _old_gen_closure.set_generation(old_gen_); 105 _old_gen_root_closure.set_generation(old_gen_); 106} 107 108void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 109 size_t plab_word_size) { 110 ChunkArray* sca = survivor_chunk_array(); 111 if (sca != NULL) { 112 // A non-null SCA implies that we want the PLAB data recorded. 113 sca->record_sample(plab_start, plab_word_size); 114 } 115} 116 117bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 118 return new_obj->is_objArray() && 119 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 120 new_obj != old_obj; 121} 122 123void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 124 assert(old->is_objArray(), "must be obj array"); 125 assert(old->is_forwarded(), "must be forwarded"); 126 assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap."); 127 assert(!old_gen()->is_in(old), "must be in young generation."); 128 129 objArrayOop obj = objArrayOop(old->forwardee()); 130 // Process ParGCArrayScanChunk elements now 131 // and push the remainder back onto queue 132 int start = arrayOop(old)->length(); 133 int end = obj->length(); 134 int remainder = end - start; 135 assert(start <= end, "just checking"); 136 if (remainder > 2 * ParGCArrayScanChunk) { 137 // Test above combines last partial chunk with a full chunk 138 end = start + ParGCArrayScanChunk; 139 arrayOop(old)->set_length(end); 140 // Push remainder. 141 bool ok = work_queue()->push(old); 142 assert(ok, "just popped, push must be okay"); 143 } else { 144 // Restore length so that it can be used if there 145 // is a promotion failure and forwarding pointers 146 // must be removed. 147 arrayOop(old)->set_length(end); 148 } 149 150 // process our set of indices (include header in first chunk) 151 // should make sure end is even (aligned to HeapWord in case of compressed oops) 152 if ((HeapWord *)obj < young_old_boundary()) { 153 // object is in to_space 154 obj->oop_iterate_range(&_to_space_closure, start, end); 155 } else { 156 // object is in old generation 157 obj->oop_iterate_range(&_old_gen_closure, start, end); 158 } 159} 160 161void ParScanThreadState::trim_queues(int max_size) { 162 ObjToScanQueue* queue = work_queue(); 163 do { 164 while (queue->size() > (juint)max_size) { 165 oop obj_to_scan; 166 if (queue->pop_local(obj_to_scan)) { 167 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 168 if (obj_to_scan->is_objArray() && 169 obj_to_scan->is_forwarded() && 170 obj_to_scan->forwardee() != obj_to_scan) { 171 scan_partial_array_and_push_remainder(obj_to_scan); 172 } else { 173 // object is in to_space 174 obj_to_scan->oop_iterate(&_to_space_closure); 175 } 176 } else { 177 // object is in old generation 178 obj_to_scan->oop_iterate(&_old_gen_closure); 179 } 180 } 181 } 182 // For the case of compressed oops, we have a private, non-shared 183 // overflow stack, so we eagerly drain it so as to more evenly 184 // distribute load early. Note: this may be good to do in 185 // general rather than delay for the final stealing phase. 186 // If applicable, we'll transfer a set of objects over to our 187 // work queue, allowing them to be stolen and draining our 188 // private overflow stack. 189 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 190} 191 192bool ParScanThreadState::take_from_overflow_stack() { 193 assert(ParGCUseLocalOverflow, "Else should not call"); 194 assert(young_gen()->overflow_list() == NULL, "Error"); 195 ObjToScanQueue* queue = work_queue(); 196 Stack<oop, mtGC>* const of_stack = overflow_stack(); 197 const size_t num_overflow_elems = of_stack->size(); 198 const size_t space_available = queue->max_elems() - queue->size(); 199 const size_t num_take_elems = MIN3(space_available / 4, 200 ParGCDesiredObjsFromOverflowList, 201 num_overflow_elems); 202 // Transfer the most recent num_take_elems from the overflow 203 // stack to our work queue. 204 for (size_t i = 0; i != num_take_elems; i++) { 205 oop cur = of_stack->pop(); 206 oop obj_to_push = cur->forwardee(); 207 assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap"); 208 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 209 assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 210 if (should_be_partially_scanned(obj_to_push, cur)) { 211 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 212 obj_to_push = cur; 213 } 214 bool ok = queue->push(obj_to_push); 215 assert(ok, "Should have succeeded"); 216 } 217 assert(young_gen()->overflow_list() == NULL, "Error"); 218 return num_take_elems > 0; // was something transferred? 219} 220 221void ParScanThreadState::push_on_overflow_stack(oop p) { 222 assert(ParGCUseLocalOverflow, "Else should not call"); 223 overflow_stack()->push(p); 224 assert(young_gen()->overflow_list() == NULL, "Error"); 225} 226 227HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 228 // If the object is small enough, try to reallocate the buffer. 229 HeapWord* obj = NULL; 230 if (!_to_space_full) { 231 PLAB* const plab = to_space_alloc_buffer(); 232 Space* const sp = to_space(); 233 if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { 234 // Is small enough; abandon this buffer and start a new one. 235 plab->retire(); 236 // The minimum size has to be twice SurvivorAlignmentInBytes to 237 // allow for padding used in the alignment of 1 word. A padding 238 // of 1 is too small for a filler word so the padding size will 239 // be increased by SurvivorAlignmentInBytes. 240 size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize); 241 size_t buf_size = MAX2(plab->word_sz(), min_usable_size); 242 HeapWord* buf_space = sp->par_allocate(buf_size); 243 if (buf_space == NULL) { 244 const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize; 245 size_t free_bytes = sp->free(); 246 while(buf_space == NULL && free_bytes >= min_bytes) { 247 buf_size = free_bytes >> LogHeapWordSize; 248 assert(buf_size == (size_t)align_object_size(buf_size), "Invariant"); 249 buf_space = sp->par_allocate(buf_size); 250 free_bytes = sp->free(); 251 } 252 } 253 if (buf_space != NULL) { 254 plab->set_buf(buf_space, buf_size); 255 record_survivor_plab(buf_space, buf_size); 256 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 257 // Note that we cannot compare buf_size < word_sz below 258 // because of AlignmentReserve (see PLAB::allocate()). 259 assert(obj != NULL || plab->words_remaining() < word_sz, 260 "Else should have been able to allocate requested object size " 261 SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes " 262 SIZE_FORMAT ", words_remaining " SIZE_FORMAT, 263 word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining()); 264 // It's conceivable that we may be able to use the 265 // buffer we just grabbed for subsequent small requests 266 // even if not for this one. 267 } else { 268 // We're used up. 269 _to_space_full = true; 270 } 271 } else { 272 // Too large; allocate the object individually. 273 obj = sp->par_allocate(word_sz); 274 } 275 } 276 return obj; 277} 278 279void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { 280 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 281} 282 283void ParScanThreadState::print_promotion_failure_size() { 284 if (_promotion_failed_info.has_failed()) { 285 log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ", 286 _thread_num, _promotion_failed_info.first_size()); 287 } 288} 289 290class ParScanThreadStateSet: StackObj { 291public: 292 // Initializes states for the specified number of threads; 293 ParScanThreadStateSet(int num_threads, 294 Space& to_space, 295 ParNewGeneration& young_gen, 296 Generation& old_gen, 297 ObjToScanQueueSet& queue_set, 298 Stack<oop, mtGC>* overflow_stacks_, 299 PreservedMarksSet& preserved_marks_set, 300 size_t desired_plab_sz, 301 ParallelTaskTerminator& term); 302 303 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 304 305 inline ParScanThreadState& thread_state(int i); 306 307 void trace_promotion_failed(const YoungGCTracer* gc_tracer); 308 void reset(uint active_workers, bool promotion_failed); 309 void flush(); 310 311 #if TASKQUEUE_STATS 312 static void 313 print_termination_stats_hdr(outputStream* const st); 314 void print_termination_stats(); 315 static void 316 print_taskqueue_stats_hdr(outputStream* const st); 317 void print_taskqueue_stats(); 318 void reset_stats(); 319 #endif // TASKQUEUE_STATS 320 321private: 322 ParallelTaskTerminator& _term; 323 ParNewGeneration& _young_gen; 324 Generation& _old_gen; 325 ParScanThreadState* _per_thread_states; 326 const int _num_threads; 327 public: 328 bool is_valid(int id) const { return id < _num_threads; } 329 ParallelTaskTerminator* terminator() { return &_term; } 330}; 331 332ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, 333 Space& to_space, 334 ParNewGeneration& young_gen, 335 Generation& old_gen, 336 ObjToScanQueueSet& queue_set, 337 Stack<oop, mtGC>* overflow_stacks, 338 PreservedMarksSet& preserved_marks_set, 339 size_t desired_plab_sz, 340 ParallelTaskTerminator& term) 341 : _young_gen(young_gen), 342 _old_gen(old_gen), 343 _term(term), 344 _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)), 345 _num_threads(num_threads) 346{ 347 assert(num_threads > 0, "sanity check!"); 348 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 349 "overflow_stack allocation mismatch"); 350 // Initialize states. 351 for (int i = 0; i < num_threads; ++i) { 352 new(_per_thread_states + i) 353 ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set, 354 overflow_stacks, preserved_marks_set.get(i), 355 desired_plab_sz, term); 356 } 357} 358 359inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { 360 assert(i >= 0 && i < _num_threads, "sanity check!"); 361 return _per_thread_states[i]; 362} 363 364void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) { 365 for (int i = 0; i < _num_threads; ++i) { 366 if (thread_state(i).promotion_failed()) { 367 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info()); 368 thread_state(i).promotion_failed_info().reset(); 369 } 370 } 371} 372 373void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) { 374 _term.reset_for_reuse(active_threads); 375 if (promotion_failed) { 376 for (int i = 0; i < _num_threads; ++i) { 377 thread_state(i).print_promotion_failure_size(); 378 } 379 } 380} 381 382#if TASKQUEUE_STATS 383void ParScanThreadState::reset_stats() { 384 taskqueue_stats().reset(); 385 _term_attempts = 0; 386 _overflow_refills = 0; 387 _overflow_refill_objs = 0; 388} 389 390void ParScanThreadStateSet::reset_stats() { 391 for (int i = 0; i < _num_threads; ++i) { 392 thread_state(i).reset_stats(); 393 } 394} 395 396void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { 397 st->print_raw_cr("GC Termination Stats"); 398 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"); 399 st->print_raw_cr("thr ms ms % ms % attempts"); 400 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"); 401} 402 403void ParScanThreadStateSet::print_termination_stats() { 404 Log(gc, task, stats) log; 405 if (!log.is_debug()) { 406 return; 407 } 408 409 ResourceMark rm; 410 outputStream* st = log.debug_stream(); 411 412 print_termination_stats_hdr(st); 413 414 for (int i = 0; i < _num_threads; ++i) { 415 const ParScanThreadState & pss = thread_state(i); 416 const double elapsed_ms = pss.elapsed_time() * 1000.0; 417 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 418 const double term_ms = pss.term_time() * 1000.0; 419 st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8), 420 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 421 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 422 } 423} 424 425// Print stats related to work queue activity. 426void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { 427 st->print_raw_cr("GC Task Stats"); 428 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 429 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 430} 431 432void ParScanThreadStateSet::print_taskqueue_stats() { 433 if (!log_develop_is_enabled(Trace, gc, task, stats)) { 434 return; 435 } 436 Log(gc, task, stats) log; 437 ResourceMark rm; 438 outputStream* st = log.trace_stream(); 439 print_taskqueue_stats_hdr(st); 440 441 TaskQueueStats totals; 442 for (int i = 0; i < _num_threads; ++i) { 443 const ParScanThreadState & pss = thread_state(i); 444 const TaskQueueStats & stats = pss.taskqueue_stats(); 445 st->print("%3d ", i); stats.print(st); st->cr(); 446 totals += stats; 447 448 if (pss.overflow_refills() > 0) { 449 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 450 SIZE_FORMAT_W(10) " overflow objects", 451 pss.overflow_refills(), pss.overflow_refill_objs()); 452 } 453 } 454 st->print("tot "); totals.print(st); st->cr(); 455 456 DEBUG_ONLY(totals.verify()); 457} 458#endif // TASKQUEUE_STATS 459 460void ParScanThreadStateSet::flush() { 461 // Work in this loop should be kept as lightweight as 462 // possible since this might otherwise become a bottleneck 463 // to scaling. Should we add heavy-weight work into this 464 // loop, consider parallelizing the loop into the worker threads. 465 for (int i = 0; i < _num_threads; ++i) { 466 ParScanThreadState& par_scan_state = thread_state(i); 467 468 // Flush stats related to To-space PLAB activity and 469 // retire the last buffer. 470 par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats()); 471 472 // Every thread has its own age table. We need to merge 473 // them all into one. 474 AgeTable *local_table = par_scan_state.age_table(); 475 _young_gen.age_table()->merge(local_table); 476 477 // Inform old gen that we're done. 478 _old_gen.par_promote_alloc_done(i); 479 _old_gen.par_oop_since_save_marks_iterate_done(i); 480 } 481 482 if (UseConcMarkSweepGC) { 483 // We need to call this even when ResizeOldPLAB is disabled 484 // so as to avoid breaking some asserts. While we may be able 485 // to avoid this by reorganizing the code a bit, I am loathe 486 // to do that unless we find cases where ergo leads to bad 487 // performance. 488 CompactibleFreeListSpaceLAB::compute_desired_plab_size(); 489 } 490} 491 492ParScanClosure::ParScanClosure(ParNewGeneration* g, 493 ParScanThreadState* par_scan_state) : 494 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { 495 _boundary = _g->reserved().end(); 496} 497 498void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 499void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 500 501void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 502void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 503 504void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 505void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 506 507void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 508void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 509 510ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 511 ParScanThreadState* par_scan_state) 512 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 513{} 514 515void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 516void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 517 518#ifdef WIN32 519#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 520#endif 521 522ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 523 ParScanThreadState* par_scan_state_, 524 ParScanWithoutBarrierClosure* to_space_closure_, 525 ParScanWithBarrierClosure* old_gen_closure_, 526 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 527 ParNewGeneration* par_gen_, 528 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 529 ObjToScanQueueSet* task_queues_, 530 ParallelTaskTerminator* terminator_) : 531 532 _par_scan_state(par_scan_state_), 533 _to_space_closure(to_space_closure_), 534 _old_gen_closure(old_gen_closure_), 535 _to_space_root_closure(to_space_root_closure_), 536 _old_gen_root_closure(old_gen_root_closure_), 537 _par_gen(par_gen_), 538 _task_queues(task_queues_), 539 _terminator(terminator_) 540{} 541 542void ParEvacuateFollowersClosure::do_void() { 543 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 544 545 while (true) { 546 // Scan to-space and old-gen objs until we run out of both. 547 oop obj_to_scan; 548 par_scan_state()->trim_queues(0); 549 550 // We have no local work, attempt to steal from other threads. 551 552 // Attempt to steal work from promoted. 553 if (task_queues()->steal(par_scan_state()->thread_num(), 554 par_scan_state()->hash_seed(), 555 obj_to_scan)) { 556 bool res = work_q->push(obj_to_scan); 557 assert(res, "Empty queue should have room for a push."); 558 559 // If successful, goto Start. 560 continue; 561 562 // Try global overflow list. 563 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 564 continue; 565 } 566 567 // Otherwise, offer termination. 568 par_scan_state()->start_term_time(); 569 if (terminator()->offer_termination()) break; 570 par_scan_state()->end_term_time(); 571 } 572 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 573 "Broken overflow list?"); 574 // Finish the last termination pause. 575 par_scan_state()->end_term_time(); 576} 577 578ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, 579 Generation* old_gen, 580 HeapWord* young_old_boundary, 581 ParScanThreadStateSet* state_set, 582 StrongRootsScope* strong_roots_scope) : 583 AbstractGangTask("ParNewGeneration collection"), 584 _young_gen(young_gen), _old_gen(old_gen), 585 _young_old_boundary(young_old_boundary), 586 _state_set(state_set), 587 _strong_roots_scope(strong_roots_scope) 588{} 589 590void ParNewGenTask::work(uint worker_id) { 591 GenCollectedHeap* gch = GenCollectedHeap::heap(); 592 // Since this is being done in a separate thread, need new resource 593 // and handle marks. 594 ResourceMark rm; 595 HandleMark hm; 596 597 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 598 assert(_state_set->is_valid(worker_id), "Should not have been called"); 599 600 par_scan_state.set_young_old_boundary(_young_old_boundary); 601 602 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 603 gch->rem_set()->klass_rem_set()); 604 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 605 &par_scan_state.to_space_root_closure(), 606 false); 607 608 par_scan_state.start_strong_roots(); 609 gch->gen_process_roots(_strong_roots_scope, 610 GenCollectedHeap::YoungGen, 611 true, // Process younger gens, if any, as strong roots. 612 GenCollectedHeap::SO_ScavengeCodeCache, 613 GenCollectedHeap::StrongAndWeakRoots, 614 &par_scan_state.to_space_root_closure(), 615 &par_scan_state.older_gen_closure(), 616 &cld_scan_closure); 617 618 par_scan_state.end_strong_roots(); 619 620 // "evacuate followers". 621 par_scan_state.evacuate_followers_closure().do_void(); 622} 623 624ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) 625 : DefNewGeneration(rs, initial_byte_size, "PCopy"), 626 _overflow_list(NULL), 627 _is_alive_closure(this), 628 _plab_stats("Young", YoungPLABSize, PLABWeight) 629{ 630 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 631 NOT_PRODUCT(_num_par_pushes = 0;) 632 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 633 guarantee(_task_queues != NULL, "task_queues allocation failure."); 634 635 for (uint i = 0; i < ParallelGCThreads; i++) { 636 ObjToScanQueue *q = new ObjToScanQueue(); 637 guarantee(q != NULL, "work_queue Allocation failure."); 638 _task_queues->register_queue(i, q); 639 } 640 641 for (uint i = 0; i < ParallelGCThreads; i++) { 642 _task_queues->queue(i)->initialize(); 643 } 644 645 _overflow_stacks = NULL; 646 if (ParGCUseLocalOverflow) { 647 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ',' 648 typedef Stack<oop, mtGC> GCOopStack; 649 650 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 651 for (size_t i = 0; i < ParallelGCThreads; ++i) { 652 new (_overflow_stacks + i) Stack<oop, mtGC>(); 653 } 654 } 655 656 if (UsePerfData) { 657 EXCEPTION_MARK; 658 ResourceMark rm; 659 660 const char* cname = 661 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 662 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 663 ParallelGCThreads, CHECK); 664 } 665} 666 667// ParNewGeneration:: 668ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 669 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 670 671template <class T> 672void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 673#ifdef ASSERT 674 { 675 assert(!oopDesc::is_null(*p), "expected non-null ref"); 676 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 677 // We never expect to see a null reference being processed 678 // as a weak reference. 679 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 680 } 681#endif // ASSERT 682 683 _par_cl->do_oop_nv(p); 684 685 if (GenCollectedHeap::heap()->is_in_reserved(p)) { 686 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 687 _rs->write_ref_field_gc_par(p, obj); 688 } 689} 690 691void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 692void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 693 694// ParNewGeneration:: 695KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 696 DefNewGeneration::KeepAliveClosure(cl) {} 697 698template <class T> 699void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 700#ifdef ASSERT 701 { 702 assert(!oopDesc::is_null(*p), "expected non-null ref"); 703 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 704 // We never expect to see a null reference being processed 705 // as a weak reference. 706 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 707 } 708#endif // ASSERT 709 710 _cl->do_oop_nv(p); 711 712 if (GenCollectedHeap::heap()->is_in_reserved(p)) { 713 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 714 _rs->write_ref_field_gc_par(p, obj); 715 } 716} 717 718void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 719void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 720 721template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 722 T heap_oop = oopDesc::load_heap_oop(p); 723 if (!oopDesc::is_null(heap_oop)) { 724 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 725 if ((HeapWord*)obj < _boundary) { 726 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 727 oop new_obj = obj->is_forwarded() 728 ? obj->forwardee() 729 : _g->DefNewGeneration::copy_to_survivor_space(obj); 730 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 731 } 732 if (_gc_barrier) { 733 // If p points to a younger generation, mark the card. 734 if ((HeapWord*)obj < _gen_boundary) { 735 _rs->write_ref_field_gc_par(p, obj); 736 } 737 } 738 } 739} 740 741void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 742void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 743 744class ParNewRefProcTaskProxy: public AbstractGangTask { 745 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 746public: 747 ParNewRefProcTaskProxy(ProcessTask& task, 748 ParNewGeneration& young_gen, 749 Generation& old_gen, 750 HeapWord* young_old_boundary, 751 ParScanThreadStateSet& state_set); 752 753private: 754 virtual void work(uint worker_id); 755private: 756 ParNewGeneration& _young_gen; 757 ProcessTask& _task; 758 Generation& _old_gen; 759 HeapWord* _young_old_boundary; 760 ParScanThreadStateSet& _state_set; 761}; 762 763ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, 764 ParNewGeneration& young_gen, 765 Generation& old_gen, 766 HeapWord* young_old_boundary, 767 ParScanThreadStateSet& state_set) 768 : AbstractGangTask("ParNewGeneration parallel reference processing"), 769 _young_gen(young_gen), 770 _task(task), 771 _old_gen(old_gen), 772 _young_old_boundary(young_old_boundary), 773 _state_set(state_set) 774{ } 775 776void ParNewRefProcTaskProxy::work(uint worker_id) { 777 ResourceMark rm; 778 HandleMark hm; 779 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 780 par_scan_state.set_young_old_boundary(_young_old_boundary); 781 _task.work(worker_id, par_scan_state.is_alive_closure(), 782 par_scan_state.keep_alive_closure(), 783 par_scan_state.evacuate_followers_closure()); 784} 785 786class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 787 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 788 EnqueueTask& _task; 789 790public: 791 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 792 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 793 _task(task) 794 { } 795 796 virtual void work(uint worker_id) { 797 _task.work(worker_id); 798 } 799}; 800 801void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { 802 GenCollectedHeap* gch = GenCollectedHeap::heap(); 803 WorkGang* workers = gch->workers(); 804 assert(workers != NULL, "Need parallel worker threads."); 805 _state_set.reset(workers->active_workers(), _young_gen.promotion_failed()); 806 ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen, 807 _young_gen.reserved().end(), _state_set); 808 workers->run_task(&rp_task); 809 _state_set.reset(0 /* bad value in debug if not reset */, 810 _young_gen.promotion_failed()); 811} 812 813void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { 814 GenCollectedHeap* gch = GenCollectedHeap::heap(); 815 WorkGang* workers = gch->workers(); 816 assert(workers != NULL, "Need parallel worker threads."); 817 ParNewRefEnqueueTaskProxy enq_task(task); 818 workers->run_task(&enq_task); 819} 820 821void ParNewRefProcTaskExecutor::set_single_threaded_mode() { 822 _state_set.flush(); 823 GenCollectedHeap* gch = GenCollectedHeap::heap(); 824 gch->save_marks(); 825} 826 827ScanClosureWithParBarrier:: 828ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 829 ScanClosure(g, gc_barrier) 830{ } 831 832EvacuateFollowersClosureGeneral:: 833EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, 834 OopsInGenClosure* cur, 835 OopsInGenClosure* older) : 836 _gch(gch), 837 _scan_cur_or_nonheap(cur), _scan_older(older) 838{ } 839 840void EvacuateFollowersClosureGeneral::do_void() { 841 do { 842 // Beware: this call will lead to closure applications via virtual 843 // calls. 844 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, 845 _scan_cur_or_nonheap, 846 _scan_older); 847 } while (!_gch->no_allocs_since_save_marks()); 848} 849 850// A Generation that does parallel young-gen collection. 851 852void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) { 853 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 854 _promo_failure_scan_stack.clear(true); // Clear cached segments. 855 856 remove_forwarding_pointers(); 857 log_info(gc, promotion)("Promotion failed"); 858 // All the spaces are in play for mark-sweep. 859 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 860 from()->set_next_compaction_space(to()); 861 gch->set_incremental_collection_failed(); 862 // Inform the next generation that a promotion failure occurred. 863 _old_gen->promotion_failure_occurred(); 864 865 // Trace promotion failure in the parallel GC threads 866 thread_state_set.trace_promotion_failed(gc_tracer()); 867 // Single threaded code may have reported promotion failure to the global state 868 if (_promotion_failed_info.has_failed()) { 869 _gc_tracer.report_promotion_failed(_promotion_failed_info); 870 } 871 // Reset the PromotionFailureALot counters. 872 NOT_PRODUCT(gch->reset_promotion_should_fail();) 873} 874 875void ParNewGeneration::collect(bool full, 876 bool clear_all_soft_refs, 877 size_t size, 878 bool is_tlab) { 879 assert(full || size > 0, "otherwise we don't want to collect"); 880 881 GenCollectedHeap* gch = GenCollectedHeap::heap(); 882 883 _gc_timer->register_gc_start(); 884 885 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 886 WorkGang* workers = gch->workers(); 887 assert(workers != NULL, "Need workgang for parallel work"); 888 uint active_workers = 889 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 890 workers->active_workers(), 891 Threads::number_of_non_daemon_threads()); 892 workers->set_active_workers(active_workers); 893 _old_gen = gch->old_gen(); 894 895 // If the next generation is too full to accommodate worst-case promotion 896 // from this generation, pass on collection; let the next generation 897 // do it. 898 if (!collection_attempt_is_safe()) { 899 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 900 return; 901 } 902 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 903 904 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 905 gch->trace_heap_before_gc(gc_tracer()); 906 907 init_assuming_no_promotion_failure(); 908 909 if (UseAdaptiveSizePolicy) { 910 set_survivor_overflow(false); 911 size_policy->minor_collection_begin(); 912 } 913 914 GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause()); 915 916 age_table()->clear(); 917 to()->clear(SpaceDecorator::Mangle); 918 919 gch->save_marks(); 920 921 // Set the correct parallelism (number of queues) in the reference processor 922 ref_processor()->set_active_mt_degree(active_workers); 923 924 // Need to initialize the preserved marks before the ThreadStateSet c'tor. 925 _preserved_marks_set.init(active_workers); 926 927 // Always set the terminator for the active number of workers 928 // because only those workers go through the termination protocol. 929 ParallelTaskTerminator _term(active_workers, task_queues()); 930 ParScanThreadStateSet thread_state_set(active_workers, 931 *to(), *this, *_old_gen, *task_queues(), 932 _overflow_stacks, _preserved_marks_set, 933 desired_plab_sz(), _term); 934 935 thread_state_set.reset(active_workers, promotion_failed()); 936 937 { 938 StrongRootsScope srs(active_workers); 939 940 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs); 941 gch->rem_set()->prepare_for_younger_refs_iterate(true); 942 // It turns out that even when we're using 1 thread, doing the work in a 943 // separate thread causes wide variance in run times. We can't help this 944 // in the multi-threaded case, but we special-case n=1 here to get 945 // repeatable measurements of the 1-thread overhead of the parallel code. 946 if (active_workers > 1) { 947 workers->run_task(&tsk); 948 } else { 949 tsk.work(0); 950 } 951 } 952 953 thread_state_set.reset(0 /* Bad value in debug if not reset */, 954 promotion_failed()); 955 956 // Trace and reset failed promotion info. 957 if (promotion_failed()) { 958 thread_state_set.trace_promotion_failed(gc_tracer()); 959 } 960 961 // Process (weak) reference objects found during scavenge. 962 ReferenceProcessor* rp = ref_processor(); 963 IsAliveClosure is_alive(this); 964 ScanWeakRefClosure scan_weak_ref(this); 965 KeepAliveClosure keep_alive(&scan_weak_ref); 966 ScanClosure scan_without_gc_barrier(this, false); 967 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 968 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 969 EvacuateFollowersClosureGeneral evacuate_followers(gch, 970 &scan_without_gc_barrier, &scan_with_gc_barrier); 971 rp->setup_policy(clear_all_soft_refs); 972 // Can the mt_degree be set later (at run_task() time would be best)? 973 rp->set_active_mt_degree(active_workers); 974 ReferenceProcessorStats stats; 975 if (rp->processing_is_mt()) { 976 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); 977 stats = rp->process_discovered_references(&is_alive, &keep_alive, 978 &evacuate_followers, &task_executor, 979 _gc_timer); 980 } else { 981 thread_state_set.flush(); 982 gch->save_marks(); 983 stats = rp->process_discovered_references(&is_alive, &keep_alive, 984 &evacuate_followers, NULL, 985 _gc_timer); 986 } 987 _gc_tracer.report_gc_reference_stats(stats); 988 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 989 990 if (!promotion_failed()) { 991 // Swap the survivor spaces. 992 eden()->clear(SpaceDecorator::Mangle); 993 from()->clear(SpaceDecorator::Mangle); 994 if (ZapUnusedHeapArea) { 995 // This is now done here because of the piece-meal mangling which 996 // can check for valid mangling at intermediate points in the 997 // collection(s). When a young collection fails to collect 998 // sufficient space resizing of the young generation can occur 999 // and redistribute the spaces in the young generation. Mangle 1000 // here so that unzapped regions don't get distributed to 1001 // other spaces. 1002 to()->mangle_unused_area(); 1003 } 1004 swap_spaces(); 1005 1006 // A successful scavenge should restart the GC time limit count which is 1007 // for full GC's. 1008 size_policy->reset_gc_overhead_limit_count(); 1009 1010 assert(to()->is_empty(), "to space should be empty now"); 1011 1012 adjust_desired_tenuring_threshold(); 1013 } else { 1014 handle_promotion_failed(gch, thread_state_set); 1015 } 1016 _preserved_marks_set.reclaim(); 1017 // set new iteration safe limit for the survivor spaces 1018 from()->set_concurrent_iteration_safe_limit(from()->top()); 1019 to()->set_concurrent_iteration_safe_limit(to()->top()); 1020 1021 plab_stats()->adjust_desired_plab_sz(); 1022 1023 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); 1024 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); 1025 1026 if (UseAdaptiveSizePolicy) { 1027 size_policy->minor_collection_end(gch->gc_cause()); 1028 size_policy->avg_survived()->sample(from()->used()); 1029 } 1030 1031 // We need to use a monotonically non-decreasing time in ms 1032 // or we will see time-warp warnings and os::javaTimeMillis() 1033 // does not guarantee monotonicity. 1034 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1035 update_time_of_last_gc(now); 1036 1037 rp->set_enqueuing_is_done(true); 1038 if (rp->processing_is_mt()) { 1039 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); 1040 rp->enqueue_discovered_references(&task_executor); 1041 } else { 1042 rp->enqueue_discovered_references(NULL); 1043 } 1044 rp->verify_no_references_recorded(); 1045 1046 gch->trace_heap_after_gc(gc_tracer()); 1047 1048 _gc_timer->register_gc_end(); 1049 1050 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1051} 1052 1053size_t ParNewGeneration::desired_plab_sz() { 1054 return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers()); 1055} 1056 1057static int sum; 1058void ParNewGeneration::waste_some_time() { 1059 for (int i = 0; i < 100; i++) { 1060 sum += i; 1061 } 1062} 1063 1064static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1065 1066// Because of concurrency, there are times where an object for which 1067// "is_forwarded()" is true contains an "interim" forwarding pointer 1068// value. Such a value will soon be overwritten with a real value. 1069// This method requires "obj" to have a forwarding pointer, and waits, if 1070// necessary for a real one to be inserted, and returns it. 1071 1072oop ParNewGeneration::real_forwardee(oop obj) { 1073 oop forward_ptr = obj->forwardee(); 1074 if (forward_ptr != ClaimedForwardPtr) { 1075 return forward_ptr; 1076 } else { 1077 return real_forwardee_slow(obj); 1078 } 1079} 1080 1081oop ParNewGeneration::real_forwardee_slow(oop obj) { 1082 // Spin-read if it is claimed but not yet written by another thread. 1083 oop forward_ptr = obj->forwardee(); 1084 while (forward_ptr == ClaimedForwardPtr) { 1085 waste_some_time(); 1086 assert(obj->is_forwarded(), "precondition"); 1087 forward_ptr = obj->forwardee(); 1088 } 1089 return forward_ptr; 1090} 1091 1092// Multiple GC threads may try to promote an object. If the object 1093// is successfully promoted, a forwarding pointer will be installed in 1094// the object in the young generation. This method claims the right 1095// to install the forwarding pointer before it copies the object, 1096// thus avoiding the need to undo the copy as in 1097// copy_to_survivor_space_avoiding_with_undo. 1098 1099oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state, 1100 oop old, 1101 size_t sz, 1102 markOop m) { 1103 // In the sequential version, this assert also says that the object is 1104 // not forwarded. That might not be the case here. It is the case that 1105 // the caller observed it to be not forwarded at some time in the past. 1106 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1107 1108 // The sequential code read "old->age()" below. That doesn't work here, 1109 // since the age is in the mark word, and that might be overwritten with 1110 // a forwarding pointer by a parallel thread. So we must save the mark 1111 // word in a local and then analyze it. 1112 oopDesc dummyOld; 1113 dummyOld.set_mark(m); 1114 assert(!dummyOld.is_forwarded(), 1115 "should not be called with forwarding pointer mark word."); 1116 1117 oop new_obj = NULL; 1118 oop forward_ptr; 1119 1120 // Try allocating obj in to-space (unless too old) 1121 if (dummyOld.age() < tenuring_threshold()) { 1122 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1123 if (new_obj == NULL) { 1124 set_survivor_overflow(true); 1125 } 1126 } 1127 1128 if (new_obj == NULL) { 1129 // Either to-space is full or we decided to promote try allocating obj tenured 1130 1131 // Attempt to install a null forwarding pointer (atomically), 1132 // to claim the right to install the real forwarding pointer. 1133 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1134 if (forward_ptr != NULL) { 1135 // someone else beat us to it. 1136 return real_forwardee(old); 1137 } 1138 1139 if (!_promotion_failed) { 1140 new_obj = _old_gen->par_promote(par_scan_state->thread_num(), 1141 old, m, sz); 1142 } 1143 1144 if (new_obj == NULL) { 1145 // promotion failed, forward to self 1146 _promotion_failed = true; 1147 new_obj = old; 1148 1149 par_scan_state->preserved_marks()->push_if_necessary(old, m); 1150 par_scan_state->register_promotion_failure(sz); 1151 } 1152 1153 old->forward_to(new_obj); 1154 forward_ptr = NULL; 1155 } else { 1156 // Is in to-space; do copying ourselves. 1157 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1158 assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value."); 1159 forward_ptr = old->forward_to_atomic(new_obj); 1160 // Restore the mark word copied above. 1161 new_obj->set_mark(m); 1162 // Increment age if obj still in new generation 1163 new_obj->incr_age(); 1164 par_scan_state->age_table()->add(new_obj, sz); 1165 } 1166 assert(new_obj != NULL, "just checking"); 1167 1168 // This code must come after the CAS test, or it will print incorrect 1169 // information. 1170 log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1171 is_in_reserved(new_obj) ? "copying" : "tenuring", 1172 new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); 1173 1174 if (forward_ptr == NULL) { 1175 oop obj_to_push = new_obj; 1176 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1177 // Length field used as index of next element to be scanned. 1178 // Real length can be obtained from real_forwardee() 1179 arrayOop(old)->set_length(0); 1180 obj_to_push = old; 1181 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1182 "push forwarded object"); 1183 } 1184 // Push it on one of the queues of to-be-scanned objects. 1185 bool simulate_overflow = false; 1186 NOT_PRODUCT( 1187 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1188 // simulate a stack overflow 1189 simulate_overflow = true; 1190 } 1191 ) 1192 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1193 // Add stats for overflow pushes. 1194 log_develop_trace(gc)("Queue Overflow"); 1195 push_on_overflow_list(old, par_scan_state); 1196 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1197 } 1198 1199 return new_obj; 1200 } 1201 1202 // Oops. Someone beat us to it. Undo the allocation. Where did we 1203 // allocate it? 1204 if (is_in_reserved(new_obj)) { 1205 // Must be in to_space. 1206 assert(to()->is_in_reserved(new_obj), "Checking"); 1207 if (forward_ptr == ClaimedForwardPtr) { 1208 // Wait to get the real forwarding pointer value. 1209 forward_ptr = real_forwardee(old); 1210 } 1211 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1212 } 1213 1214 return forward_ptr; 1215} 1216 1217#ifndef PRODUCT 1218// It's OK to call this multi-threaded; the worst thing 1219// that can happen is that we'll get a bunch of closely 1220// spaced simulated overflows, but that's OK, in fact 1221// probably good as it would exercise the overflow code 1222// under contention. 1223bool ParNewGeneration::should_simulate_overflow() { 1224 if (_overflow_counter-- <= 0) { // just being defensive 1225 _overflow_counter = ParGCWorkQueueOverflowInterval; 1226 return true; 1227 } else { 1228 return false; 1229 } 1230} 1231#endif 1232 1233// In case we are using compressed oops, we need to be careful. 1234// If the object being pushed is an object array, then its length 1235// field keeps track of the "grey boundary" at which the next 1236// incremental scan will be done (see ParGCArrayScanChunk). 1237// When using compressed oops, this length field is kept in the 1238// lower 32 bits of the erstwhile klass word and cannot be used 1239// for the overflow chaining pointer (OCP below). As such the OCP 1240// would itself need to be compressed into the top 32-bits in this 1241// case. Unfortunately, see below, in the event that we have a 1242// promotion failure, the node to be pushed on the list can be 1243// outside of the Java heap, so the heap-based pointer compression 1244// would not work (we would have potential aliasing between C-heap 1245// and Java-heap pointers). For this reason, when using compressed 1246// oops, we simply use a worker-thread-local, non-shared overflow 1247// list in the form of a growable array, with a slightly different 1248// overflow stack draining strategy. If/when we start using fat 1249// stacks here, we can go back to using (fat) pointer chains 1250// (although some performance comparisons would be useful since 1251// single global lists have their own performance disadvantages 1252// as we were made painfully aware not long ago, see 6786503). 1253#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1254void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1255 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1256 if (ParGCUseLocalOverflow) { 1257 // In the case of compressed oops, we use a private, not-shared 1258 // overflow stack. 1259 par_scan_state->push_on_overflow_stack(from_space_obj); 1260 } else { 1261 assert(!UseCompressedOops, "Error"); 1262 // if the object has been forwarded to itself, then we cannot 1263 // use the klass pointer for the linked list. Instead we have 1264 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1265 // XXX This is horribly inefficient when a promotion failure occurs 1266 // and should be fixed. XXX FIX ME !!! 1267#ifndef PRODUCT 1268 Atomic::inc_ptr(&_num_par_pushes); 1269 assert(_num_par_pushes > 0, "Tautology"); 1270#endif 1271 if (from_space_obj->forwardee() == from_space_obj) { 1272 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1273 listhead->forward_to(from_space_obj); 1274 from_space_obj = listhead; 1275 } 1276 oop observed_overflow_list = _overflow_list; 1277 oop cur_overflow_list; 1278 do { 1279 cur_overflow_list = observed_overflow_list; 1280 if (cur_overflow_list != BUSY) { 1281 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1282 } else { 1283 from_space_obj->set_klass_to_list_ptr(NULL); 1284 } 1285 observed_overflow_list = 1286 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1287 } while (cur_overflow_list != observed_overflow_list); 1288 } 1289} 1290 1291bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1292 bool res; 1293 1294 if (ParGCUseLocalOverflow) { 1295 res = par_scan_state->take_from_overflow_stack(); 1296 } else { 1297 assert(!UseCompressedOops, "Error"); 1298 res = take_from_overflow_list_work(par_scan_state); 1299 } 1300 return res; 1301} 1302 1303 1304// *NOTE*: The overflow list manipulation code here and 1305// in CMSCollector:: are very similar in shape, 1306// except that in the CMS case we thread the objects 1307// directly into the list via their mark word, and do 1308// not need to deal with special cases below related 1309// to chunking of object arrays and promotion failure 1310// handling. 1311// CR 6797058 has been filed to attempt consolidation of 1312// the common code. 1313// Because of the common code, if you make any changes in 1314// the code below, please check the CMS version to see if 1315// similar changes might be needed. 1316// See CMSCollector::par_take_from_overflow_list() for 1317// more extensive documentation comments. 1318bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1319 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1320 // How many to take? 1321 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1322 (size_t)ParGCDesiredObjsFromOverflowList); 1323 1324 assert(!UseCompressedOops, "Error"); 1325 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1326 if (_overflow_list == NULL) return false; 1327 1328 // Otherwise, there was something there; try claiming the list. 1329 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1330 // Trim off a prefix of at most objsFromOverflow items 1331 Thread* tid = Thread::current(); 1332 size_t spin_count = ParallelGCThreads; 1333 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1334 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1335 // someone grabbed it before we did ... 1336 // ... we spin for a short while... 1337 os::sleep(tid, sleep_time_millis, false); 1338 if (_overflow_list == NULL) { 1339 // nothing left to take 1340 return false; 1341 } else if (_overflow_list != BUSY) { 1342 // try and grab the prefix 1343 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1344 } 1345 } 1346 if (prefix == NULL || prefix == BUSY) { 1347 // Nothing to take or waited long enough 1348 if (prefix == NULL) { 1349 // Write back the NULL in case we overwrote it with BUSY above 1350 // and it is still the same value. 1351 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1352 } 1353 return false; 1354 } 1355 assert(prefix != NULL && prefix != BUSY, "Error"); 1356 size_t i = 1; 1357 oop cur = prefix; 1358 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1359 i++; cur = cur->list_ptr_from_klass(); 1360 } 1361 1362 // Reattach remaining (suffix) to overflow list 1363 if (cur->klass_or_null() == NULL) { 1364 // Write back the NULL in lieu of the BUSY we wrote 1365 // above and it is still the same value. 1366 if (_overflow_list == BUSY) { 1367 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1368 } 1369 } else { 1370 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1371 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1372 cur->set_klass_to_list_ptr(NULL); // break off suffix 1373 // It's possible that the list is still in the empty(busy) state 1374 // we left it in a short while ago; in that case we may be 1375 // able to place back the suffix. 1376 oop observed_overflow_list = _overflow_list; 1377 oop cur_overflow_list = observed_overflow_list; 1378 bool attached = false; 1379 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1380 observed_overflow_list = 1381 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1382 if (cur_overflow_list == observed_overflow_list) { 1383 attached = true; 1384 break; 1385 } else cur_overflow_list = observed_overflow_list; 1386 } 1387 if (!attached) { 1388 // Too bad, someone else got in in between; we'll need to do a splice. 1389 // Find the last item of suffix list 1390 oop last = suffix; 1391 while (last->klass_or_null() != NULL) { 1392 last = last->list_ptr_from_klass(); 1393 } 1394 // Atomically prepend suffix to current overflow list 1395 observed_overflow_list = _overflow_list; 1396 do { 1397 cur_overflow_list = observed_overflow_list; 1398 if (cur_overflow_list != BUSY) { 1399 // Do the splice ... 1400 last->set_klass_to_list_ptr(cur_overflow_list); 1401 } else { // cur_overflow_list == BUSY 1402 last->set_klass_to_list_ptr(NULL); 1403 } 1404 observed_overflow_list = 1405 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1406 } while (cur_overflow_list != observed_overflow_list); 1407 } 1408 } 1409 1410 // Push objects on prefix list onto this thread's work queue 1411 assert(prefix != NULL && prefix != BUSY, "program logic"); 1412 cur = prefix; 1413 ssize_t n = 0; 1414 while (cur != NULL) { 1415 oop obj_to_push = cur->forwardee(); 1416 oop next = cur->list_ptr_from_klass(); 1417 cur->set_klass(obj_to_push->klass()); 1418 // This may be an array object that is self-forwarded. In that case, the list pointer 1419 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1420 if (!is_in_reserved(cur)) { 1421 // This can become a scaling bottleneck when there is work queue overflow coincident 1422 // with promotion failure. 1423 oopDesc* f = cur; 1424 FREE_C_HEAP_ARRAY(oopDesc, f); 1425 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1426 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1427 obj_to_push = cur; 1428 } 1429 bool ok = work_q->push(obj_to_push); 1430 assert(ok, "Should have succeeded"); 1431 cur = next; 1432 n++; 1433 } 1434 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1435#ifndef PRODUCT 1436 assert(_num_par_pushes >= n, "Too many pops?"); 1437 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1438#endif 1439 return true; 1440} 1441#undef BUSY 1442 1443void ParNewGeneration::ref_processor_init() { 1444 if (_ref_processor == NULL) { 1445 // Allocate and initialize a reference processor 1446 _ref_processor = 1447 new ReferenceProcessor(_reserved, // span 1448 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1449 ParallelGCThreads, // mt processing degree 1450 refs_discovery_is_mt(), // mt discovery 1451 ParallelGCThreads, // mt discovery degree 1452 refs_discovery_is_atomic(), // atomic_discovery 1453 NULL); // is_alive_non_header 1454 } 1455} 1456 1457const char* ParNewGeneration::name() const { 1458 return "par new generation"; 1459} 1460