parNewGeneration.cpp revision 9058:983c56341c80
1242723Sjhibbits/* 2242723Sjhibbits * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3242723Sjhibbits * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4242723Sjhibbits * 5242723Sjhibbits * This code is free software; you can redistribute it and/or modify it 6242723Sjhibbits * under the terms of the GNU General Public License version 2 only, as 7242723Sjhibbits * published by the Free Software Foundation. 8242723Sjhibbits * 9242723Sjhibbits * This code is distributed in the hope that it will be useful, but WITHOUT 10242723Sjhibbits * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11242723Sjhibbits * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12242723Sjhibbits * version 2 for more details (a copy is included in the LICENSE file that 13242723Sjhibbits * accompanied this code). 14242723Sjhibbits * 15242723Sjhibbits * You should have received a copy of the GNU General Public License version 16242723Sjhibbits * 2 along with this work; if not, write to the Free Software Foundation, 17242723Sjhibbits * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18242723Sjhibbits * 19242723Sjhibbits * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20242723Sjhibbits * or visit www.oracle.com if you need additional information or have any 21242723Sjhibbits * questions. 22256543Sjhibbits * 23242723Sjhibbits */ 24242723Sjhibbits 25242723Sjhibbits#include "precompiled.hpp" 26242723Sjhibbits#include "gc/cms/compactibleFreeListSpace.hpp" 27242723Sjhibbits#include "gc/cms/concurrentMarkSweepGeneration.hpp" 28242723Sjhibbits#include "gc/cms/parNewGeneration.inline.hpp" 29242723Sjhibbits#include "gc/cms/parOopClosures.inline.hpp" 30242723Sjhibbits#include "gc/serial/defNewGeneration.inline.hpp" 31242723Sjhibbits#include "gc/shared/adaptiveSizePolicy.hpp" 32242723Sjhibbits#include "gc/shared/ageTable.hpp" 33242723Sjhibbits#include "gc/shared/copyFailedInfo.hpp" 34242723Sjhibbits#include "gc/shared/gcHeapSummary.hpp" 35242723Sjhibbits#include "gc/shared/gcTimer.hpp" 36242723Sjhibbits#include "gc/shared/gcTrace.hpp" 37242723Sjhibbits#include "gc/shared/gcTraceTime.hpp" 38242723Sjhibbits#include "gc/shared/genCollectedHeap.hpp" 39242723Sjhibbits#include "gc/shared/genOopClosures.inline.hpp" 40256543Sjhibbits#include "gc/shared/generation.hpp" 41242723Sjhibbits#include "gc/shared/plab.inline.hpp" 42256543Sjhibbits#include "gc/shared/referencePolicy.hpp" 43256543Sjhibbits#include "gc/shared/space.hpp" 44256543Sjhibbits#include "gc/shared/spaceDecorator.hpp" 45256543Sjhibbits#include "gc/shared/strongRootsScope.hpp" 46256543Sjhibbits#include "gc/shared/taskqueue.inline.hpp" 47256543Sjhibbits#include "gc/shared/workgroup.hpp" 48256543Sjhibbits#include "memory/resourceArea.hpp" 49256543Sjhibbits#include "oops/objArrayOop.hpp" 50256543Sjhibbits#include "oops/oop.inline.hpp" 51256543Sjhibbits#include "runtime/atomic.inline.hpp" 52256543Sjhibbits#include "runtime/handles.hpp" 53256543Sjhibbits#include "runtime/handles.inline.hpp" 54256543Sjhibbits#include "runtime/java.hpp" 55256543Sjhibbits#include "runtime/thread.inline.hpp" 56256543Sjhibbits#include "utilities/copy.hpp" 57256543Sjhibbits#include "utilities/globalDefinitions.hpp" 58256543Sjhibbits#include "utilities/stack.inline.hpp" 59256543Sjhibbits 60256543Sjhibbits#ifdef _MSC_VER 61256543Sjhibbits#pragma warning( push ) 62256543Sjhibbits#pragma warning( disable:4355 ) // 'this' : used in base member initializer list 63256543Sjhibbits#endif 64256543SjhibbitsParScanThreadState::ParScanThreadState(Space* to_space_, 65256543Sjhibbits ParNewGeneration* young_gen_, 66256543Sjhibbits Generation* old_gen_, 67256543Sjhibbits int thread_num_, 68256543Sjhibbits ObjToScanQueueSet* work_queue_set_, 69256543Sjhibbits Stack<oop, mtGC>* overflow_stacks_, 70256543Sjhibbits size_t desired_plab_sz_, 71256543Sjhibbits ParallelTaskTerminator& term_) : 72242723Sjhibbits _to_space(to_space_), 73242723Sjhibbits _old_gen(old_gen_), 74242723Sjhibbits _young_gen(young_gen_), 75242723Sjhibbits _thread_num(thread_num_), 76242723Sjhibbits _work_queue(work_queue_set_->queue(thread_num_)), 77 _to_space_full(false), 78 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 79 _ageTable(false), // false ==> not the global age table, no perf data. 80 _to_space_alloc_buffer(desired_plab_sz_), 81 _to_space_closure(young_gen_, this), 82 _old_gen_closure(young_gen_, this), 83 _to_space_root_closure(young_gen_, this), 84 _old_gen_root_closure(young_gen_, this), 85 _older_gen_closure(young_gen_, this), 86 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 87 &_to_space_root_closure, young_gen_, &_old_gen_root_closure, 88 work_queue_set_, &term_), 89 _is_alive_closure(young_gen_), 90 _scan_weak_ref_closure(young_gen_, this), 91 _keep_alive_closure(&_scan_weak_ref_closure), 92 _strong_roots_time(0.0), 93 _term_time(0.0) 94{ 95 #if TASKQUEUE_STATS 96 _term_attempts = 0; 97 _overflow_refills = 0; 98 _overflow_refill_objs = 0; 99 #endif // TASKQUEUE_STATS 100 101 _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 102 _hash_seed = 17; // Might want to take time-based random value. 103 _start = os::elapsedTime(); 104 _old_gen_closure.set_generation(old_gen_); 105 _old_gen_root_closure.set_generation(old_gen_); 106} 107#ifdef _MSC_VER 108#pragma warning( pop ) 109#endif 110 111void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 112 size_t plab_word_size) { 113 ChunkArray* sca = survivor_chunk_array(); 114 if (sca != NULL) { 115 // A non-null SCA implies that we want the PLAB data recorded. 116 sca->record_sample(plab_start, plab_word_size); 117 } 118} 119 120bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 121 return new_obj->is_objArray() && 122 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 123 new_obj != old_obj; 124} 125 126void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 127 assert(old->is_objArray(), "must be obj array"); 128 assert(old->is_forwarded(), "must be forwarded"); 129 assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap."); 130 assert(!old_gen()->is_in(old), "must be in young generation."); 131 132 objArrayOop obj = objArrayOop(old->forwardee()); 133 // Process ParGCArrayScanChunk elements now 134 // and push the remainder back onto queue 135 int start = arrayOop(old)->length(); 136 int end = obj->length(); 137 int remainder = end - start; 138 assert(start <= end, "just checking"); 139 if (remainder > 2 * ParGCArrayScanChunk) { 140 // Test above combines last partial chunk with a full chunk 141 end = start + ParGCArrayScanChunk; 142 arrayOop(old)->set_length(end); 143 // Push remainder. 144 bool ok = work_queue()->push(old); 145 assert(ok, "just popped, push must be okay"); 146 } else { 147 // Restore length so that it can be used if there 148 // is a promotion failure and forwarding pointers 149 // must be removed. 150 arrayOop(old)->set_length(end); 151 } 152 153 // process our set of indices (include header in first chunk) 154 // should make sure end is even (aligned to HeapWord in case of compressed oops) 155 if ((HeapWord *)obj < young_old_boundary()) { 156 // object is in to_space 157 obj->oop_iterate_range(&_to_space_closure, start, end); 158 } else { 159 // object is in old generation 160 obj->oop_iterate_range(&_old_gen_closure, start, end); 161 } 162} 163 164void ParScanThreadState::trim_queues(int max_size) { 165 ObjToScanQueue* queue = work_queue(); 166 do { 167 while (queue->size() > (juint)max_size) { 168 oop obj_to_scan; 169 if (queue->pop_local(obj_to_scan)) { 170 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 171 if (obj_to_scan->is_objArray() && 172 obj_to_scan->is_forwarded() && 173 obj_to_scan->forwardee() != obj_to_scan) { 174 scan_partial_array_and_push_remainder(obj_to_scan); 175 } else { 176 // object is in to_space 177 obj_to_scan->oop_iterate(&_to_space_closure); 178 } 179 } else { 180 // object is in old generation 181 obj_to_scan->oop_iterate(&_old_gen_closure); 182 } 183 } 184 } 185 // For the case of compressed oops, we have a private, non-shared 186 // overflow stack, so we eagerly drain it so as to more evenly 187 // distribute load early. Note: this may be good to do in 188 // general rather than delay for the final stealing phase. 189 // If applicable, we'll transfer a set of objects over to our 190 // work queue, allowing them to be stolen and draining our 191 // private overflow stack. 192 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 193} 194 195bool ParScanThreadState::take_from_overflow_stack() { 196 assert(ParGCUseLocalOverflow, "Else should not call"); 197 assert(young_gen()->overflow_list() == NULL, "Error"); 198 ObjToScanQueue* queue = work_queue(); 199 Stack<oop, mtGC>* const of_stack = overflow_stack(); 200 const size_t num_overflow_elems = of_stack->size(); 201 const size_t space_available = queue->max_elems() - queue->size(); 202 const size_t num_take_elems = MIN3(space_available / 4, 203 ParGCDesiredObjsFromOverflowList, 204 num_overflow_elems); 205 // Transfer the most recent num_take_elems from the overflow 206 // stack to our work queue. 207 for (size_t i = 0; i != num_take_elems; i++) { 208 oop cur = of_stack->pop(); 209 oop obj_to_push = cur->forwardee(); 210 assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap"); 211 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 212 assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 213 if (should_be_partially_scanned(obj_to_push, cur)) { 214 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 215 obj_to_push = cur; 216 } 217 bool ok = queue->push(obj_to_push); 218 assert(ok, "Should have succeeded"); 219 } 220 assert(young_gen()->overflow_list() == NULL, "Error"); 221 return num_take_elems > 0; // was something transferred? 222} 223 224void ParScanThreadState::push_on_overflow_stack(oop p) { 225 assert(ParGCUseLocalOverflow, "Else should not call"); 226 overflow_stack()->push(p); 227 assert(young_gen()->overflow_list() == NULL, "Error"); 228} 229 230HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 231 // If the object is small enough, try to reallocate the buffer. 232 HeapWord* obj = NULL; 233 if (!_to_space_full) { 234 PLAB* const plab = to_space_alloc_buffer(); 235 Space* const sp = to_space(); 236 if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { 237 // Is small enough; abandon this buffer and start a new one. 238 plab->retire(); 239 size_t buf_size = plab->word_sz(); 240 HeapWord* buf_space = sp->par_allocate(buf_size); 241 if (buf_space == NULL) { 242 const size_t min_bytes = 243 PLAB::min_size() << LogHeapWordSize; 244 size_t free_bytes = sp->free(); 245 while(buf_space == NULL && free_bytes >= min_bytes) { 246 buf_size = free_bytes >> LogHeapWordSize; 247 assert(buf_size == (size_t)align_object_size(buf_size), "Invariant"); 248 buf_space = sp->par_allocate(buf_size); 249 free_bytes = sp->free(); 250 } 251 } 252 if (buf_space != NULL) { 253 plab->set_buf(buf_space, buf_size); 254 record_survivor_plab(buf_space, buf_size); 255 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 256 // Note that we cannot compare buf_size < word_sz below 257 // because of AlignmentReserve (see PLAB::allocate()). 258 assert(obj != NULL || plab->words_remaining() < word_sz, 259 "Else should have been able to allocate"); 260 // It's conceivable that we may be able to use the 261 // buffer we just grabbed for subsequent small requests 262 // even if not for this one. 263 } else { 264 // We're used up. 265 _to_space_full = true; 266 } 267 } else { 268 // Too large; allocate the object individually. 269 obj = sp->par_allocate(word_sz); 270 } 271 } 272 return obj; 273} 274 275void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { 276 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 277} 278 279void ParScanThreadState::print_promotion_failure_size() { 280 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { 281 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 282 _thread_num, _promotion_failed_info.first_size()); 283 } 284} 285 286class ParScanThreadStateSet: private ResourceArray { 287public: 288 // Initializes states for the specified number of threads; 289 ParScanThreadStateSet(int num_threads, 290 Space& to_space, 291 ParNewGeneration& young_gen, 292 Generation& old_gen, 293 ObjToScanQueueSet& queue_set, 294 Stack<oop, mtGC>* overflow_stacks_, 295 size_t desired_plab_sz, 296 ParallelTaskTerminator& term); 297 298 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 299 300 inline ParScanThreadState& thread_state(int i); 301 302 void trace_promotion_failed(const YoungGCTracer* gc_tracer); 303 void reset(uint active_workers, bool promotion_failed); 304 void flush(); 305 306 #if TASKQUEUE_STATS 307 static void 308 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 309 void print_termination_stats(outputStream* const st = gclog_or_tty); 310 static void 311 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 312 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 313 void reset_stats(); 314 #endif // TASKQUEUE_STATS 315 316private: 317 ParallelTaskTerminator& _term; 318 ParNewGeneration& _young_gen; 319 Generation& _old_gen; 320 public: 321 bool is_valid(int id) const { return id < length(); } 322 ParallelTaskTerminator* terminator() { return &_term; } 323}; 324 325ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, 326 Space& to_space, 327 ParNewGeneration& young_gen, 328 Generation& old_gen, 329 ObjToScanQueueSet& queue_set, 330 Stack<oop, mtGC>* overflow_stacks, 331 size_t desired_plab_sz, 332 ParallelTaskTerminator& term) 333 : ResourceArray(sizeof(ParScanThreadState), num_threads), 334 _young_gen(young_gen), 335 _old_gen(old_gen), 336 _term(term) 337{ 338 assert(num_threads > 0, "sanity check!"); 339 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 340 "overflow_stack allocation mismatch"); 341 // Initialize states. 342 for (int i = 0; i < num_threads; ++i) { 343 new ((ParScanThreadState*)_data + i) 344 ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set, 345 overflow_stacks, desired_plab_sz, term); 346 } 347} 348 349inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { 350 assert(i >= 0 && i < length(), "sanity check!"); 351 return ((ParScanThreadState*)_data)[i]; 352} 353 354void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) { 355 for (int i = 0; i < length(); ++i) { 356 if (thread_state(i).promotion_failed()) { 357 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info()); 358 thread_state(i).promotion_failed_info().reset(); 359 } 360 } 361} 362 363void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) { 364 _term.reset_for_reuse(active_threads); 365 if (promotion_failed) { 366 for (int i = 0; i < length(); ++i) { 367 thread_state(i).print_promotion_failure_size(); 368 } 369 } 370} 371 372#if TASKQUEUE_STATS 373void ParScanThreadState::reset_stats() { 374 taskqueue_stats().reset(); 375 _term_attempts = 0; 376 _overflow_refills = 0; 377 _overflow_refill_objs = 0; 378} 379 380void ParScanThreadStateSet::reset_stats() { 381 for (int i = 0; i < length(); ++i) { 382 thread_state(i).reset_stats(); 383 } 384} 385 386void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { 387 st->print_raw_cr("GC Termination Stats"); 388 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"); 389 st->print_raw_cr("thr ms ms % ms % attempts"); 390 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"); 391} 392 393void ParScanThreadStateSet::print_termination_stats(outputStream* const st) { 394 print_termination_stats_hdr(st); 395 396 for (int i = 0; i < length(); ++i) { 397 const ParScanThreadState & pss = thread_state(i); 398 const double elapsed_ms = pss.elapsed_time() * 1000.0; 399 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 400 const double term_ms = pss.term_time() * 1000.0; 401 st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8), 402 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 403 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 404 } 405} 406 407// Print stats related to work queue activity. 408void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { 409 st->print_raw_cr("GC Task Stats"); 410 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 411 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 412} 413 414void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) { 415 print_taskqueue_stats_hdr(st); 416 417 TaskQueueStats totals; 418 for (int i = 0; i < length(); ++i) { 419 const ParScanThreadState & pss = thread_state(i); 420 const TaskQueueStats & stats = pss.taskqueue_stats(); 421 st->print("%3d ", i); stats.print(st); st->cr(); 422 totals += stats; 423 424 if (pss.overflow_refills() > 0) { 425 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 426 SIZE_FORMAT_W(10) " overflow objects", 427 pss.overflow_refills(), pss.overflow_refill_objs()); 428 } 429 } 430 st->print("tot "); totals.print(st); st->cr(); 431 432 DEBUG_ONLY(totals.verify()); 433} 434#endif // TASKQUEUE_STATS 435 436void ParScanThreadStateSet::flush() { 437 // Work in this loop should be kept as lightweight as 438 // possible since this might otherwise become a bottleneck 439 // to scaling. Should we add heavy-weight work into this 440 // loop, consider parallelizing the loop into the worker threads. 441 for (int i = 0; i < length(); ++i) { 442 ParScanThreadState& par_scan_state = thread_state(i); 443 444 // Flush stats related to To-space PLAB activity and 445 // retire the last buffer. 446 par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats()); 447 448 // Every thread has its own age table. We need to merge 449 // them all into one. 450 ageTable *local_table = par_scan_state.age_table(); 451 _young_gen.age_table()->merge(local_table); 452 453 // Inform old gen that we're done. 454 _old_gen.par_promote_alloc_done(i); 455 _old_gen.par_oop_since_save_marks_iterate_done(i); 456 } 457 458 if (UseConcMarkSweepGC) { 459 // We need to call this even when ResizeOldPLAB is disabled 460 // so as to avoid breaking some asserts. While we may be able 461 // to avoid this by reorganizing the code a bit, I am loathe 462 // to do that unless we find cases where ergo leads to bad 463 // performance. 464 CFLS_LAB::compute_desired_plab_size(); 465 } 466} 467 468ParScanClosure::ParScanClosure(ParNewGeneration* g, 469 ParScanThreadState* par_scan_state) : 470 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { 471 _boundary = _g->reserved().end(); 472} 473 474void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 475void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 476 477void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 478void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 479 480void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 481void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 482 483void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 484void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 485 486ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 487 ParScanThreadState* par_scan_state) 488 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 489{} 490 491void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 492void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 493 494#ifdef WIN32 495#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 496#endif 497 498ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 499 ParScanThreadState* par_scan_state_, 500 ParScanWithoutBarrierClosure* to_space_closure_, 501 ParScanWithBarrierClosure* old_gen_closure_, 502 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 503 ParNewGeneration* par_gen_, 504 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 505 ObjToScanQueueSet* task_queues_, 506 ParallelTaskTerminator* terminator_) : 507 508 _par_scan_state(par_scan_state_), 509 _to_space_closure(to_space_closure_), 510 _old_gen_closure(old_gen_closure_), 511 _to_space_root_closure(to_space_root_closure_), 512 _old_gen_root_closure(old_gen_root_closure_), 513 _par_gen(par_gen_), 514 _task_queues(task_queues_), 515 _terminator(terminator_) 516{} 517 518void ParEvacuateFollowersClosure::do_void() { 519 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 520 521 while (true) { 522 // Scan to-space and old-gen objs until we run out of both. 523 oop obj_to_scan; 524 par_scan_state()->trim_queues(0); 525 526 // We have no local work, attempt to steal from other threads. 527 528 // Attempt to steal work from promoted. 529 if (task_queues()->steal(par_scan_state()->thread_num(), 530 par_scan_state()->hash_seed(), 531 obj_to_scan)) { 532 bool res = work_q->push(obj_to_scan); 533 assert(res, "Empty queue should have room for a push."); 534 535 // If successful, goto Start. 536 continue; 537 538 // Try global overflow list. 539 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 540 continue; 541 } 542 543 // Otherwise, offer termination. 544 par_scan_state()->start_term_time(); 545 if (terminator()->offer_termination()) break; 546 par_scan_state()->end_term_time(); 547 } 548 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 549 "Broken overflow list?"); 550 // Finish the last termination pause. 551 par_scan_state()->end_term_time(); 552} 553 554ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, 555 Generation* old_gen, 556 HeapWord* young_old_boundary, 557 ParScanThreadStateSet* state_set, 558 StrongRootsScope* strong_roots_scope) : 559 AbstractGangTask("ParNewGeneration collection"), 560 _young_gen(young_gen), _old_gen(old_gen), 561 _young_old_boundary(young_old_boundary), 562 _state_set(state_set), 563 _strong_roots_scope(strong_roots_scope) 564{} 565 566void ParNewGenTask::work(uint worker_id) { 567 GenCollectedHeap* gch = GenCollectedHeap::heap(); 568 // Since this is being done in a separate thread, need new resource 569 // and handle marks. 570 ResourceMark rm; 571 HandleMark hm; 572 573 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 574 assert(_state_set->is_valid(worker_id), "Should not have been called"); 575 576 par_scan_state.set_young_old_boundary(_young_old_boundary); 577 578 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 579 gch->rem_set()->klass_rem_set()); 580 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 581 &par_scan_state.to_space_root_closure(), 582 false); 583 584 par_scan_state.start_strong_roots(); 585 gch->gen_process_roots(_strong_roots_scope, 586 GenCollectedHeap::YoungGen, 587 true, // Process younger gens, if any, as strong roots. 588 GenCollectedHeap::SO_ScavengeCodeCache, 589 GenCollectedHeap::StrongAndWeakRoots, 590 &par_scan_state.to_space_root_closure(), 591 &par_scan_state.older_gen_closure(), 592 &cld_scan_closure); 593 594 par_scan_state.end_strong_roots(); 595 596 // "evacuate followers". 597 par_scan_state.evacuate_followers_closure().do_void(); 598} 599 600#ifdef _MSC_VER 601#pragma warning( push ) 602#pragma warning( disable:4355 ) // 'this' : used in base member initializer list 603#endif 604ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) 605 : DefNewGeneration(rs, initial_byte_size, "PCopy"), 606 _overflow_list(NULL), 607 _is_alive_closure(this), 608 _plab_stats(YoungPLABSize, PLABWeight) 609{ 610 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 611 NOT_PRODUCT(_num_par_pushes = 0;) 612 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 613 guarantee(_task_queues != NULL, "task_queues allocation failure."); 614 615 for (uint i = 0; i < ParallelGCThreads; i++) { 616 ObjToScanQueue *q = new ObjToScanQueue(); 617 guarantee(q != NULL, "work_queue Allocation failure."); 618 _task_queues->register_queue(i, q); 619 } 620 621 for (uint i = 0; i < ParallelGCThreads; i++) { 622 _task_queues->queue(i)->initialize(); 623 } 624 625 _overflow_stacks = NULL; 626 if (ParGCUseLocalOverflow) { 627 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ',' 628 typedef Stack<oop, mtGC> GCOopStack; 629 630 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 631 for (size_t i = 0; i < ParallelGCThreads; ++i) { 632 new (_overflow_stacks + i) Stack<oop, mtGC>(); 633 } 634 } 635 636 if (UsePerfData) { 637 EXCEPTION_MARK; 638 ResourceMark rm; 639 640 const char* cname = 641 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 642 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 643 ParallelGCThreads, CHECK); 644 } 645} 646#ifdef _MSC_VER 647#pragma warning( pop ) 648#endif 649 650// ParNewGeneration:: 651ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 652 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 653 654template <class T> 655void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 656#ifdef ASSERT 657 { 658 assert(!oopDesc::is_null(*p), "expected non-null ref"); 659 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 660 // We never expect to see a null reference being processed 661 // as a weak reference. 662 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 663 } 664#endif // ASSERT 665 666 _par_cl->do_oop_nv(p); 667 668 if (GenCollectedHeap::heap()->is_in_reserved(p)) { 669 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 670 _rs->write_ref_field_gc_par(p, obj); 671 } 672} 673 674void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 675void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 676 677// ParNewGeneration:: 678KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 679 DefNewGeneration::KeepAliveClosure(cl) {} 680 681template <class T> 682void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 683#ifdef ASSERT 684 { 685 assert(!oopDesc::is_null(*p), "expected non-null ref"); 686 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 687 // We never expect to see a null reference being processed 688 // as a weak reference. 689 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 690 } 691#endif // ASSERT 692 693 _cl->do_oop_nv(p); 694 695 if (GenCollectedHeap::heap()->is_in_reserved(p)) { 696 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 697 _rs->write_ref_field_gc_par(p, obj); 698 } 699} 700 701void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 702void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 703 704template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 705 T heap_oop = oopDesc::load_heap_oop(p); 706 if (!oopDesc::is_null(heap_oop)) { 707 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 708 if ((HeapWord*)obj < _boundary) { 709 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 710 oop new_obj = obj->is_forwarded() 711 ? obj->forwardee() 712 : _g->DefNewGeneration::copy_to_survivor_space(obj); 713 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 714 } 715 if (_gc_barrier) { 716 // If p points to a younger generation, mark the card. 717 if ((HeapWord*)obj < _gen_boundary) { 718 _rs->write_ref_field_gc_par(p, obj); 719 } 720 } 721 } 722} 723 724void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 725void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 726 727class ParNewRefProcTaskProxy: public AbstractGangTask { 728 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 729public: 730 ParNewRefProcTaskProxy(ProcessTask& task, 731 ParNewGeneration& young_gen, 732 Generation& old_gen, 733 HeapWord* young_old_boundary, 734 ParScanThreadStateSet& state_set); 735 736private: 737 virtual void work(uint worker_id); 738private: 739 ParNewGeneration& _young_gen; 740 ProcessTask& _task; 741 Generation& _old_gen; 742 HeapWord* _young_old_boundary; 743 ParScanThreadStateSet& _state_set; 744}; 745 746ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, 747 ParNewGeneration& young_gen, 748 Generation& old_gen, 749 HeapWord* young_old_boundary, 750 ParScanThreadStateSet& state_set) 751 : AbstractGangTask("ParNewGeneration parallel reference processing"), 752 _young_gen(young_gen), 753 _task(task), 754 _old_gen(old_gen), 755 _young_old_boundary(young_old_boundary), 756 _state_set(state_set) 757{ } 758 759void ParNewRefProcTaskProxy::work(uint worker_id) { 760 ResourceMark rm; 761 HandleMark hm; 762 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 763 par_scan_state.set_young_old_boundary(_young_old_boundary); 764 _task.work(worker_id, par_scan_state.is_alive_closure(), 765 par_scan_state.keep_alive_closure(), 766 par_scan_state.evacuate_followers_closure()); 767} 768 769class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 770 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 771 EnqueueTask& _task; 772 773public: 774 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 775 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 776 _task(task) 777 { } 778 779 virtual void work(uint worker_id) { 780 _task.work(worker_id); 781 } 782}; 783 784void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { 785 GenCollectedHeap* gch = GenCollectedHeap::heap(); 786 WorkGang* workers = gch->workers(); 787 assert(workers != NULL, "Need parallel worker threads."); 788 _state_set.reset(workers->active_workers(), _young_gen.promotion_failed()); 789 ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen, 790 _young_gen.reserved().end(), _state_set); 791 workers->run_task(&rp_task); 792 _state_set.reset(0 /* bad value in debug if not reset */, 793 _young_gen.promotion_failed()); 794} 795 796void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { 797 GenCollectedHeap* gch = GenCollectedHeap::heap(); 798 WorkGang* workers = gch->workers(); 799 assert(workers != NULL, "Need parallel worker threads."); 800 ParNewRefEnqueueTaskProxy enq_task(task); 801 workers->run_task(&enq_task); 802} 803 804void ParNewRefProcTaskExecutor::set_single_threaded_mode() { 805 _state_set.flush(); 806 GenCollectedHeap* gch = GenCollectedHeap::heap(); 807 gch->save_marks(); 808} 809 810ScanClosureWithParBarrier:: 811ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 812 ScanClosure(g, gc_barrier) 813{ } 814 815EvacuateFollowersClosureGeneral:: 816EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, 817 OopsInGenClosure* cur, 818 OopsInGenClosure* older) : 819 _gch(gch), 820 _scan_cur_or_nonheap(cur), _scan_older(older) 821{ } 822 823void EvacuateFollowersClosureGeneral::do_void() { 824 do { 825 // Beware: this call will lead to closure applications via virtual 826 // calls. 827 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, 828 _scan_cur_or_nonheap, 829 _scan_older); 830 } while (!_gch->no_allocs_since_save_marks()); 831} 832 833// A Generation that does parallel young-gen collection. 834 835void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) { 836 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 837 _promo_failure_scan_stack.clear(true); // Clear cached segments. 838 839 remove_forwarding_pointers(); 840 if (PrintGCDetails) { 841 gclog_or_tty->print(" (promotion failed)"); 842 } 843 // All the spaces are in play for mark-sweep. 844 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 845 from()->set_next_compaction_space(to()); 846 gch->set_incremental_collection_failed(); 847 // Inform the next generation that a promotion failure occurred. 848 _old_gen->promotion_failure_occurred(); 849 850 // Trace promotion failure in the parallel GC threads 851 thread_state_set.trace_promotion_failed(gc_tracer()); 852 // Single threaded code may have reported promotion failure to the global state 853 if (_promotion_failed_info.has_failed()) { 854 _gc_tracer.report_promotion_failed(_promotion_failed_info); 855 } 856 // Reset the PromotionFailureALot counters. 857 NOT_PRODUCT(gch->reset_promotion_should_fail();) 858} 859 860void ParNewGeneration::collect(bool full, 861 bool clear_all_soft_refs, 862 size_t size, 863 bool is_tlab) { 864 assert(full || size > 0, "otherwise we don't want to collect"); 865 866 GenCollectedHeap* gch = GenCollectedHeap::heap(); 867 868 _gc_timer->register_gc_start(); 869 870 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 871 WorkGang* workers = gch->workers(); 872 assert(workers != NULL, "Need workgang for parallel work"); 873 uint active_workers = 874 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 875 workers->active_workers(), 876 Threads::number_of_non_daemon_threads()); 877 workers->set_active_workers(active_workers); 878 _old_gen = gch->old_gen(); 879 880 // If the next generation is too full to accommodate worst-case promotion 881 // from this generation, pass on collection; let the next generation 882 // do it. 883 if (!collection_attempt_is_safe()) { 884 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 885 return; 886 } 887 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 888 889 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 890 gch->trace_heap_before_gc(gc_tracer()); 891 892 init_assuming_no_promotion_failure(); 893 894 if (UseAdaptiveSizePolicy) { 895 set_survivor_overflow(false); 896 size_policy->minor_collection_begin(); 897 } 898 899 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 900 // Capture heap used before collection (for printing). 901 size_t gch_prev_used = gch->used(); 902 903 age_table()->clear(); 904 to()->clear(SpaceDecorator::Mangle); 905 906 gch->save_marks(); 907 908 // Set the correct parallelism (number of queues) in the reference processor 909 ref_processor()->set_active_mt_degree(active_workers); 910 911 // Always set the terminator for the active number of workers 912 // because only those workers go through the termination protocol. 913 ParallelTaskTerminator _term(active_workers, task_queues()); 914 ParScanThreadStateSet thread_state_set(active_workers, 915 *to(), *this, *_old_gen, *task_queues(), 916 _overflow_stacks, desired_plab_sz(), _term); 917 918 thread_state_set.reset(active_workers, promotion_failed()); 919 920 { 921 StrongRootsScope srs(active_workers); 922 923 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs); 924 gch->rem_set()->prepare_for_younger_refs_iterate(true); 925 // It turns out that even when we're using 1 thread, doing the work in a 926 // separate thread causes wide variance in run times. We can't help this 927 // in the multi-threaded case, but we special-case n=1 here to get 928 // repeatable measurements of the 1-thread overhead of the parallel code. 929 if (active_workers > 1) { 930 workers->run_task(&tsk); 931 } else { 932 tsk.work(0); 933 } 934 } 935 936 thread_state_set.reset(0 /* Bad value in debug if not reset */, 937 promotion_failed()); 938 939 // Trace and reset failed promotion info. 940 if (promotion_failed()) { 941 thread_state_set.trace_promotion_failed(gc_tracer()); 942 } 943 944 // Process (weak) reference objects found during scavenge. 945 ReferenceProcessor* rp = ref_processor(); 946 IsAliveClosure is_alive(this); 947 ScanWeakRefClosure scan_weak_ref(this); 948 KeepAliveClosure keep_alive(&scan_weak_ref); 949 ScanClosure scan_without_gc_barrier(this, false); 950 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 951 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 952 EvacuateFollowersClosureGeneral evacuate_followers(gch, 953 &scan_without_gc_barrier, &scan_with_gc_barrier); 954 rp->setup_policy(clear_all_soft_refs); 955 // Can the mt_degree be set later (at run_task() time would be best)? 956 rp->set_active_mt_degree(active_workers); 957 ReferenceProcessorStats stats; 958 if (rp->processing_is_mt()) { 959 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); 960 stats = rp->process_discovered_references(&is_alive, &keep_alive, 961 &evacuate_followers, &task_executor, 962 _gc_timer); 963 } else { 964 thread_state_set.flush(); 965 gch->save_marks(); 966 stats = rp->process_discovered_references(&is_alive, &keep_alive, 967 &evacuate_followers, NULL, 968 _gc_timer); 969 } 970 _gc_tracer.report_gc_reference_stats(stats); 971 if (!promotion_failed()) { 972 // Swap the survivor spaces. 973 eden()->clear(SpaceDecorator::Mangle); 974 from()->clear(SpaceDecorator::Mangle); 975 if (ZapUnusedHeapArea) { 976 // This is now done here because of the piece-meal mangling which 977 // can check for valid mangling at intermediate points in the 978 // collection(s). When a young collection fails to collect 979 // sufficient space resizing of the young generation can occur 980 // and redistribute the spaces in the young generation. Mangle 981 // here so that unzapped regions don't get distributed to 982 // other spaces. 983 to()->mangle_unused_area(); 984 } 985 swap_spaces(); 986 987 // A successful scavenge should restart the GC time limit count which is 988 // for full GC's. 989 size_policy->reset_gc_overhead_limit_count(); 990 991 assert(to()->is_empty(), "to space should be empty now"); 992 993 adjust_desired_tenuring_threshold(); 994 } else { 995 handle_promotion_failed(gch, thread_state_set); 996 } 997 // set new iteration safe limit for the survivor spaces 998 from()->set_concurrent_iteration_safe_limit(from()->top()); 999 to()->set_concurrent_iteration_safe_limit(to()->top()); 1000 1001 if (ResizePLAB) { 1002 plab_stats()->adjust_desired_plab_sz(); 1003 } 1004 1005 if (PrintGC && !PrintGCDetails) { 1006 gch->print_heap_change(gch_prev_used); 1007 } 1008 1009 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats()); 1010 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats()); 1011 1012 if (UseAdaptiveSizePolicy) { 1013 size_policy->minor_collection_end(gch->gc_cause()); 1014 size_policy->avg_survived()->sample(from()->used()); 1015 } 1016 1017 // We need to use a monotonically non-decreasing time in ms 1018 // or we will see time-warp warnings and os::javaTimeMillis() 1019 // does not guarantee monotonicity. 1020 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1021 update_time_of_last_gc(now); 1022 1023 rp->set_enqueuing_is_done(true); 1024 if (rp->processing_is_mt()) { 1025 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); 1026 rp->enqueue_discovered_references(&task_executor); 1027 } else { 1028 rp->enqueue_discovered_references(NULL); 1029 } 1030 rp->verify_no_references_recorded(); 1031 1032 gch->trace_heap_after_gc(gc_tracer()); 1033 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1034 1035 _gc_timer->register_gc_end(); 1036 1037 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1038} 1039 1040size_t ParNewGeneration::desired_plab_sz() { 1041 return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers()); 1042} 1043 1044static int sum; 1045void ParNewGeneration::waste_some_time() { 1046 for (int i = 0; i < 100; i++) { 1047 sum += i; 1048 } 1049} 1050 1051static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1052 1053// Because of concurrency, there are times where an object for which 1054// "is_forwarded()" is true contains an "interim" forwarding pointer 1055// value. Such a value will soon be overwritten with a real value. 1056// This method requires "obj" to have a forwarding pointer, and waits, if 1057// necessary for a real one to be inserted, and returns it. 1058 1059oop ParNewGeneration::real_forwardee(oop obj) { 1060 oop forward_ptr = obj->forwardee(); 1061 if (forward_ptr != ClaimedForwardPtr) { 1062 return forward_ptr; 1063 } else { 1064 return real_forwardee_slow(obj); 1065 } 1066} 1067 1068oop ParNewGeneration::real_forwardee_slow(oop obj) { 1069 // Spin-read if it is claimed but not yet written by another thread. 1070 oop forward_ptr = obj->forwardee(); 1071 while (forward_ptr == ClaimedForwardPtr) { 1072 waste_some_time(); 1073 assert(obj->is_forwarded(), "precondition"); 1074 forward_ptr = obj->forwardee(); 1075 } 1076 return forward_ptr; 1077} 1078 1079void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1080 if (m->must_be_preserved_for_promotion_failure(obj)) { 1081 // We should really have separate per-worker stacks, rather 1082 // than use locking of a common pair of stacks. 1083 MutexLocker ml(ParGCRareEvent_lock); 1084 preserve_mark(obj, m); 1085 } 1086} 1087 1088// Multiple GC threads may try to promote an object. If the object 1089// is successfully promoted, a forwarding pointer will be installed in 1090// the object in the young generation. This method claims the right 1091// to install the forwarding pointer before it copies the object, 1092// thus avoiding the need to undo the copy as in 1093// copy_to_survivor_space_avoiding_with_undo. 1094 1095oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state, 1096 oop old, 1097 size_t sz, 1098 markOop m) { 1099 // In the sequential version, this assert also says that the object is 1100 // not forwarded. That might not be the case here. It is the case that 1101 // the caller observed it to be not forwarded at some time in the past. 1102 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1103 1104 // The sequential code read "old->age()" below. That doesn't work here, 1105 // since the age is in the mark word, and that might be overwritten with 1106 // a forwarding pointer by a parallel thread. So we must save the mark 1107 // word in a local and then analyze it. 1108 oopDesc dummyOld; 1109 dummyOld.set_mark(m); 1110 assert(!dummyOld.is_forwarded(), 1111 "should not be called with forwarding pointer mark word."); 1112 1113 oop new_obj = NULL; 1114 oop forward_ptr; 1115 1116 // Try allocating obj in to-space (unless too old) 1117 if (dummyOld.age() < tenuring_threshold()) { 1118 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1119 if (new_obj == NULL) { 1120 set_survivor_overflow(true); 1121 } 1122 } 1123 1124 if (new_obj == NULL) { 1125 // Either to-space is full or we decided to promote try allocating obj tenured 1126 1127 // Attempt to install a null forwarding pointer (atomically), 1128 // to claim the right to install the real forwarding pointer. 1129 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1130 if (forward_ptr != NULL) { 1131 // someone else beat us to it. 1132 return real_forwardee(old); 1133 } 1134 1135 if (!_promotion_failed) { 1136 new_obj = _old_gen->par_promote(par_scan_state->thread_num(), 1137 old, m, sz); 1138 } 1139 1140 if (new_obj == NULL) { 1141 // promotion failed, forward to self 1142 _promotion_failed = true; 1143 new_obj = old; 1144 1145 preserve_mark_if_necessary(old, m); 1146 par_scan_state->register_promotion_failure(sz); 1147 } 1148 1149 old->forward_to(new_obj); 1150 forward_ptr = NULL; 1151 } else { 1152 // Is in to-space; do copying ourselves. 1153 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1154 assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value."); 1155 forward_ptr = old->forward_to_atomic(new_obj); 1156 // Restore the mark word copied above. 1157 new_obj->set_mark(m); 1158 // Increment age if obj still in new generation 1159 new_obj->incr_age(); 1160 par_scan_state->age_table()->add(new_obj, sz); 1161 } 1162 assert(new_obj != NULL, "just checking"); 1163 1164#ifndef PRODUCT 1165 // This code must come after the CAS test, or it will print incorrect 1166 // information. 1167 if (TraceScavenge) { 1168 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1169 is_in_reserved(new_obj) ? "copying" : "tenuring", 1170 new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); 1171 } 1172#endif 1173 1174 if (forward_ptr == NULL) { 1175 oop obj_to_push = new_obj; 1176 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1177 // Length field used as index of next element to be scanned. 1178 // Real length can be obtained from real_forwardee() 1179 arrayOop(old)->set_length(0); 1180 obj_to_push = old; 1181 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1182 "push forwarded object"); 1183 } 1184 // Push it on one of the queues of to-be-scanned objects. 1185 bool simulate_overflow = false; 1186 NOT_PRODUCT( 1187 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1188 // simulate a stack overflow 1189 simulate_overflow = true; 1190 } 1191 ) 1192 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1193 // Add stats for overflow pushes. 1194 if (Verbose && PrintGCDetails) { 1195 gclog_or_tty->print("queue overflow!\n"); 1196 } 1197 push_on_overflow_list(old, par_scan_state); 1198 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1199 } 1200 1201 return new_obj; 1202 } 1203 1204 // Oops. Someone beat us to it. Undo the allocation. Where did we 1205 // allocate it? 1206 if (is_in_reserved(new_obj)) { 1207 // Must be in to_space. 1208 assert(to()->is_in_reserved(new_obj), "Checking"); 1209 if (forward_ptr == ClaimedForwardPtr) { 1210 // Wait to get the real forwarding pointer value. 1211 forward_ptr = real_forwardee(old); 1212 } 1213 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1214 } 1215 1216 return forward_ptr; 1217} 1218 1219#ifndef PRODUCT 1220// It's OK to call this multi-threaded; the worst thing 1221// that can happen is that we'll get a bunch of closely 1222// spaced simulated overflows, but that's OK, in fact 1223// probably good as it would exercise the overflow code 1224// under contention. 1225bool ParNewGeneration::should_simulate_overflow() { 1226 if (_overflow_counter-- <= 0) { // just being defensive 1227 _overflow_counter = ParGCWorkQueueOverflowInterval; 1228 return true; 1229 } else { 1230 return false; 1231 } 1232} 1233#endif 1234 1235// In case we are using compressed oops, we need to be careful. 1236// If the object being pushed is an object array, then its length 1237// field keeps track of the "grey boundary" at which the next 1238// incremental scan will be done (see ParGCArrayScanChunk). 1239// When using compressed oops, this length field is kept in the 1240// lower 32 bits of the erstwhile klass word and cannot be used 1241// for the overflow chaining pointer (OCP below). As such the OCP 1242// would itself need to be compressed into the top 32-bits in this 1243// case. Unfortunately, see below, in the event that we have a 1244// promotion failure, the node to be pushed on the list can be 1245// outside of the Java heap, so the heap-based pointer compression 1246// would not work (we would have potential aliasing between C-heap 1247// and Java-heap pointers). For this reason, when using compressed 1248// oops, we simply use a worker-thread-local, non-shared overflow 1249// list in the form of a growable array, with a slightly different 1250// overflow stack draining strategy. If/when we start using fat 1251// stacks here, we can go back to using (fat) pointer chains 1252// (although some performance comparisons would be useful since 1253// single global lists have their own performance disadvantages 1254// as we were made painfully aware not long ago, see 6786503). 1255#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1256void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1257 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1258 if (ParGCUseLocalOverflow) { 1259 // In the case of compressed oops, we use a private, not-shared 1260 // overflow stack. 1261 par_scan_state->push_on_overflow_stack(from_space_obj); 1262 } else { 1263 assert(!UseCompressedOops, "Error"); 1264 // if the object has been forwarded to itself, then we cannot 1265 // use the klass pointer for the linked list. Instead we have 1266 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1267 // XXX This is horribly inefficient when a promotion failure occurs 1268 // and should be fixed. XXX FIX ME !!! 1269#ifndef PRODUCT 1270 Atomic::inc_ptr(&_num_par_pushes); 1271 assert(_num_par_pushes > 0, "Tautology"); 1272#endif 1273 if (from_space_obj->forwardee() == from_space_obj) { 1274 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1275 listhead->forward_to(from_space_obj); 1276 from_space_obj = listhead; 1277 } 1278 oop observed_overflow_list = _overflow_list; 1279 oop cur_overflow_list; 1280 do { 1281 cur_overflow_list = observed_overflow_list; 1282 if (cur_overflow_list != BUSY) { 1283 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1284 } else { 1285 from_space_obj->set_klass_to_list_ptr(NULL); 1286 } 1287 observed_overflow_list = 1288 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1289 } while (cur_overflow_list != observed_overflow_list); 1290 } 1291} 1292 1293bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1294 bool res; 1295 1296 if (ParGCUseLocalOverflow) { 1297 res = par_scan_state->take_from_overflow_stack(); 1298 } else { 1299 assert(!UseCompressedOops, "Error"); 1300 res = take_from_overflow_list_work(par_scan_state); 1301 } 1302 return res; 1303} 1304 1305 1306// *NOTE*: The overflow list manipulation code here and 1307// in CMSCollector:: are very similar in shape, 1308// except that in the CMS case we thread the objects 1309// directly into the list via their mark word, and do 1310// not need to deal with special cases below related 1311// to chunking of object arrays and promotion failure 1312// handling. 1313// CR 6797058 has been filed to attempt consolidation of 1314// the common code. 1315// Because of the common code, if you make any changes in 1316// the code below, please check the CMS version to see if 1317// similar changes might be needed. 1318// See CMSCollector::par_take_from_overflow_list() for 1319// more extensive documentation comments. 1320bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1321 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1322 // How many to take? 1323 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1324 (size_t)ParGCDesiredObjsFromOverflowList); 1325 1326 assert(!UseCompressedOops, "Error"); 1327 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1328 if (_overflow_list == NULL) return false; 1329 1330 // Otherwise, there was something there; try claiming the list. 1331 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1332 // Trim off a prefix of at most objsFromOverflow items 1333 Thread* tid = Thread::current(); 1334 size_t spin_count = ParallelGCThreads; 1335 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1336 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1337 // someone grabbed it before we did ... 1338 // ... we spin for a short while... 1339 os::sleep(tid, sleep_time_millis, false); 1340 if (_overflow_list == NULL) { 1341 // nothing left to take 1342 return false; 1343 } else if (_overflow_list != BUSY) { 1344 // try and grab the prefix 1345 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1346 } 1347 } 1348 if (prefix == NULL || prefix == BUSY) { 1349 // Nothing to take or waited long enough 1350 if (prefix == NULL) { 1351 // Write back the NULL in case we overwrote it with BUSY above 1352 // and it is still the same value. 1353 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1354 } 1355 return false; 1356 } 1357 assert(prefix != NULL && prefix != BUSY, "Error"); 1358 size_t i = 1; 1359 oop cur = prefix; 1360 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1361 i++; cur = cur->list_ptr_from_klass(); 1362 } 1363 1364 // Reattach remaining (suffix) to overflow list 1365 if (cur->klass_or_null() == NULL) { 1366 // Write back the NULL in lieu of the BUSY we wrote 1367 // above and it is still the same value. 1368 if (_overflow_list == BUSY) { 1369 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1370 } 1371 } else { 1372 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1373 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1374 cur->set_klass_to_list_ptr(NULL); // break off suffix 1375 // It's possible that the list is still in the empty(busy) state 1376 // we left it in a short while ago; in that case we may be 1377 // able to place back the suffix. 1378 oop observed_overflow_list = _overflow_list; 1379 oop cur_overflow_list = observed_overflow_list; 1380 bool attached = false; 1381 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1382 observed_overflow_list = 1383 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1384 if (cur_overflow_list == observed_overflow_list) { 1385 attached = true; 1386 break; 1387 } else cur_overflow_list = observed_overflow_list; 1388 } 1389 if (!attached) { 1390 // Too bad, someone else got in in between; we'll need to do a splice. 1391 // Find the last item of suffix list 1392 oop last = suffix; 1393 while (last->klass_or_null() != NULL) { 1394 last = last->list_ptr_from_klass(); 1395 } 1396 // Atomically prepend suffix to current overflow list 1397 observed_overflow_list = _overflow_list; 1398 do { 1399 cur_overflow_list = observed_overflow_list; 1400 if (cur_overflow_list != BUSY) { 1401 // Do the splice ... 1402 last->set_klass_to_list_ptr(cur_overflow_list); 1403 } else { // cur_overflow_list == BUSY 1404 last->set_klass_to_list_ptr(NULL); 1405 } 1406 observed_overflow_list = 1407 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1408 } while (cur_overflow_list != observed_overflow_list); 1409 } 1410 } 1411 1412 // Push objects on prefix list onto this thread's work queue 1413 assert(prefix != NULL && prefix != BUSY, "program logic"); 1414 cur = prefix; 1415 ssize_t n = 0; 1416 while (cur != NULL) { 1417 oop obj_to_push = cur->forwardee(); 1418 oop next = cur->list_ptr_from_klass(); 1419 cur->set_klass(obj_to_push->klass()); 1420 // This may be an array object that is self-forwarded. In that case, the list pointer 1421 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1422 if (!is_in_reserved(cur)) { 1423 // This can become a scaling bottleneck when there is work queue overflow coincident 1424 // with promotion failure. 1425 oopDesc* f = cur; 1426 FREE_C_HEAP_ARRAY(oopDesc, f); 1427 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1428 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1429 obj_to_push = cur; 1430 } 1431 bool ok = work_q->push(obj_to_push); 1432 assert(ok, "Should have succeeded"); 1433 cur = next; 1434 n++; 1435 } 1436 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1437#ifndef PRODUCT 1438 assert(_num_par_pushes >= n, "Too many pops?"); 1439 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1440#endif 1441 return true; 1442} 1443#undef BUSY 1444 1445void ParNewGeneration::ref_processor_init() { 1446 if (_ref_processor == NULL) { 1447 // Allocate and initialize a reference processor 1448 _ref_processor = 1449 new ReferenceProcessor(_reserved, // span 1450 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1451 ParallelGCThreads, // mt processing degree 1452 refs_discovery_is_mt(), // mt discovery 1453 ParallelGCThreads, // mt discovery degree 1454 refs_discovery_is_atomic(), // atomic_discovery 1455 NULL); // is_alive_non_header 1456 } 1457} 1458 1459const char* ParNewGeneration::name() const { 1460 return "par new generation"; 1461} 1462