parNewGeneration.cpp revision 9473:5132864a5d5f
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/cms/compactibleFreeListSpace.hpp"
27#include "gc/cms/concurrentMarkSweepGeneration.hpp"
28#include "gc/cms/parNewGeneration.inline.hpp"
29#include "gc/cms/parOopClosures.inline.hpp"
30#include "gc/serial/defNewGeneration.inline.hpp"
31#include "gc/shared/adaptiveSizePolicy.hpp"
32#include "gc/shared/ageTable.hpp"
33#include "gc/shared/copyFailedInfo.hpp"
34#include "gc/shared/gcHeapSummary.hpp"
35#include "gc/shared/gcTimer.hpp"
36#include "gc/shared/gcTrace.hpp"
37#include "gc/shared/gcTraceTime.hpp"
38#include "gc/shared/genCollectedHeap.hpp"
39#include "gc/shared/genOopClosures.inline.hpp"
40#include "gc/shared/generation.hpp"
41#include "gc/shared/plab.inline.hpp"
42#include "gc/shared/referencePolicy.hpp"
43#include "gc/shared/space.hpp"
44#include "gc/shared/spaceDecorator.hpp"
45#include "gc/shared/strongRootsScope.hpp"
46#include "gc/shared/taskqueue.inline.hpp"
47#include "gc/shared/workgroup.hpp"
48#include "memory/resourceArea.hpp"
49#include "oops/objArrayOop.hpp"
50#include "oops/oop.inline.hpp"
51#include "runtime/atomic.inline.hpp"
52#include "runtime/handles.hpp"
53#include "runtime/handles.inline.hpp"
54#include "runtime/java.hpp"
55#include "runtime/thread.inline.hpp"
56#include "utilities/copy.hpp"
57#include "utilities/globalDefinitions.hpp"
58#include "utilities/stack.inline.hpp"
59
60ParScanThreadState::ParScanThreadState(Space* to_space_,
61                                       ParNewGeneration* young_gen_,
62                                       Generation* old_gen_,
63                                       int thread_num_,
64                                       ObjToScanQueueSet* work_queue_set_,
65                                       Stack<oop, mtGC>* overflow_stacks_,
66                                       size_t desired_plab_sz_,
67                                       ParallelTaskTerminator& term_) :
68  _to_space(to_space_),
69  _old_gen(old_gen_),
70  _young_gen(young_gen_),
71  _thread_num(thread_num_),
72  _work_queue(work_queue_set_->queue(thread_num_)),
73  _to_space_full(false),
74  _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
75  _ageTable(false), // false ==> not the global age table, no perf data.
76  _to_space_alloc_buffer(desired_plab_sz_),
77  _to_space_closure(young_gen_, this),
78  _old_gen_closure(young_gen_, this),
79  _to_space_root_closure(young_gen_, this),
80  _old_gen_root_closure(young_gen_, this),
81  _older_gen_closure(young_gen_, this),
82  _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
83                      &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
84                      work_queue_set_, &term_),
85  _is_alive_closure(young_gen_),
86  _scan_weak_ref_closure(young_gen_, this),
87  _keep_alive_closure(&_scan_weak_ref_closure),
88  _strong_roots_time(0.0),
89  _term_time(0.0)
90{
91  #if TASKQUEUE_STATS
92  _term_attempts = 0;
93  _overflow_refills = 0;
94  _overflow_refill_objs = 0;
95  #endif // TASKQUEUE_STATS
96
97  _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
98  _hash_seed = 17;  // Might want to take time-based random value.
99  _start = os::elapsedTime();
100  _old_gen_closure.set_generation(old_gen_);
101  _old_gen_root_closure.set_generation(old_gen_);
102}
103
104void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
105                                              size_t plab_word_size) {
106  ChunkArray* sca = survivor_chunk_array();
107  if (sca != NULL) {
108    // A non-null SCA implies that we want the PLAB data recorded.
109    sca->record_sample(plab_start, plab_word_size);
110  }
111}
112
113bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
114  return new_obj->is_objArray() &&
115         arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
116         new_obj != old_obj;
117}
118
119void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
120  assert(old->is_objArray(), "must be obj array");
121  assert(old->is_forwarded(), "must be forwarded");
122  assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
123  assert(!old_gen()->is_in(old), "must be in young generation.");
124
125  objArrayOop obj = objArrayOop(old->forwardee());
126  // Process ParGCArrayScanChunk elements now
127  // and push the remainder back onto queue
128  int start     = arrayOop(old)->length();
129  int end       = obj->length();
130  int remainder = end - start;
131  assert(start <= end, "just checking");
132  if (remainder > 2 * ParGCArrayScanChunk) {
133    // Test above combines last partial chunk with a full chunk
134    end = start + ParGCArrayScanChunk;
135    arrayOop(old)->set_length(end);
136    // Push remainder.
137    bool ok = work_queue()->push(old);
138    assert(ok, "just popped, push must be okay");
139  } else {
140    // Restore length so that it can be used if there
141    // is a promotion failure and forwarding pointers
142    // must be removed.
143    arrayOop(old)->set_length(end);
144  }
145
146  // process our set of indices (include header in first chunk)
147  // should make sure end is even (aligned to HeapWord in case of compressed oops)
148  if ((HeapWord *)obj < young_old_boundary()) {
149    // object is in to_space
150    obj->oop_iterate_range(&_to_space_closure, start, end);
151  } else {
152    // object is in old generation
153    obj->oop_iterate_range(&_old_gen_closure, start, end);
154  }
155}
156
157void ParScanThreadState::trim_queues(int max_size) {
158  ObjToScanQueue* queue = work_queue();
159  do {
160    while (queue->size() > (juint)max_size) {
161      oop obj_to_scan;
162      if (queue->pop_local(obj_to_scan)) {
163        if ((HeapWord *)obj_to_scan < young_old_boundary()) {
164          if (obj_to_scan->is_objArray() &&
165              obj_to_scan->is_forwarded() &&
166              obj_to_scan->forwardee() != obj_to_scan) {
167            scan_partial_array_and_push_remainder(obj_to_scan);
168          } else {
169            // object is in to_space
170            obj_to_scan->oop_iterate(&_to_space_closure);
171          }
172        } else {
173          // object is in old generation
174          obj_to_scan->oop_iterate(&_old_gen_closure);
175        }
176      }
177    }
178    // For the  case of compressed oops, we have a private, non-shared
179    // overflow stack, so we eagerly drain it so as to more evenly
180    // distribute load early. Note: this may be good to do in
181    // general rather than delay for the final stealing phase.
182    // If applicable, we'll transfer a set of objects over to our
183    // work queue, allowing them to be stolen and draining our
184    // private overflow stack.
185  } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
186}
187
188bool ParScanThreadState::take_from_overflow_stack() {
189  assert(ParGCUseLocalOverflow, "Else should not call");
190  assert(young_gen()->overflow_list() == NULL, "Error");
191  ObjToScanQueue* queue = work_queue();
192  Stack<oop, mtGC>* const of_stack = overflow_stack();
193  const size_t num_overflow_elems = of_stack->size();
194  const size_t space_available = queue->max_elems() - queue->size();
195  const size_t num_take_elems = MIN3(space_available / 4,
196                                     ParGCDesiredObjsFromOverflowList,
197                                     num_overflow_elems);
198  // Transfer the most recent num_take_elems from the overflow
199  // stack to our work queue.
200  for (size_t i = 0; i != num_take_elems; i++) {
201    oop cur = of_stack->pop();
202    oop obj_to_push = cur->forwardee();
203    assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
204    assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
205    assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
206    if (should_be_partially_scanned(obj_to_push, cur)) {
207      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
208      obj_to_push = cur;
209    }
210    bool ok = queue->push(obj_to_push);
211    assert(ok, "Should have succeeded");
212  }
213  assert(young_gen()->overflow_list() == NULL, "Error");
214  return num_take_elems > 0;  // was something transferred?
215}
216
217void ParScanThreadState::push_on_overflow_stack(oop p) {
218  assert(ParGCUseLocalOverflow, "Else should not call");
219  overflow_stack()->push(p);
220  assert(young_gen()->overflow_list() == NULL, "Error");
221}
222
223HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
224  // If the object is small enough, try to reallocate the buffer.
225  HeapWord* obj = NULL;
226  if (!_to_space_full) {
227    PLAB* const plab = to_space_alloc_buffer();
228    Space* const sp  = to_space();
229    if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
230      // Is small enough; abandon this buffer and start a new one.
231      plab->retire();
232      size_t buf_size = plab->word_sz();
233      HeapWord* buf_space = sp->par_allocate(buf_size);
234      if (buf_space == NULL) {
235        const size_t min_bytes =
236          PLAB::min_size() << LogHeapWordSize;
237        size_t free_bytes = sp->free();
238        while(buf_space == NULL && free_bytes >= min_bytes) {
239          buf_size = free_bytes >> LogHeapWordSize;
240          assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
241          buf_space  = sp->par_allocate(buf_size);
242          free_bytes = sp->free();
243        }
244      }
245      if (buf_space != NULL) {
246        plab->set_buf(buf_space, buf_size);
247        record_survivor_plab(buf_space, buf_size);
248        obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
249        // Note that we cannot compare buf_size < word_sz below
250        // because of AlignmentReserve (see PLAB::allocate()).
251        assert(obj != NULL || plab->words_remaining() < word_sz,
252               "Else should have been able to allocate");
253        // It's conceivable that we may be able to use the
254        // buffer we just grabbed for subsequent small requests
255        // even if not for this one.
256      } else {
257        // We're used up.
258        _to_space_full = true;
259      }
260    } else {
261      // Too large; allocate the object individually.
262      obj = sp->par_allocate(word_sz);
263    }
264  }
265  return obj;
266}
267
268void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
269  to_space_alloc_buffer()->undo_allocation(obj, word_sz);
270}
271
272void ParScanThreadState::print_promotion_failure_size() {
273  if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
274    gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
275                        _thread_num, _promotion_failed_info.first_size());
276  }
277}
278
279class ParScanThreadStateSet: private ResourceArray {
280public:
281  // Initializes states for the specified number of threads;
282  ParScanThreadStateSet(int                     num_threads,
283                        Space&                  to_space,
284                        ParNewGeneration&       young_gen,
285                        Generation&             old_gen,
286                        ObjToScanQueueSet&      queue_set,
287                        Stack<oop, mtGC>*       overflow_stacks_,
288                        size_t                  desired_plab_sz,
289                        ParallelTaskTerminator& term);
290
291  ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
292
293  inline ParScanThreadState& thread_state(int i);
294
295  void trace_promotion_failed(const YoungGCTracer* gc_tracer);
296  void reset(uint active_workers, bool promotion_failed);
297  void flush();
298
299  #if TASKQUEUE_STATS
300  static void
301    print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
302  void print_termination_stats(outputStream* const st = gclog_or_tty);
303  static void
304    print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
305  void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
306  void reset_stats();
307  #endif // TASKQUEUE_STATS
308
309private:
310  ParallelTaskTerminator& _term;
311  ParNewGeneration&       _young_gen;
312  Generation&             _old_gen;
313 public:
314  bool is_valid(int id) const { return id < length(); }
315  ParallelTaskTerminator* terminator() { return &_term; }
316};
317
318ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
319                                             Space& to_space,
320                                             ParNewGeneration& young_gen,
321                                             Generation& old_gen,
322                                             ObjToScanQueueSet& queue_set,
323                                             Stack<oop, mtGC>* overflow_stacks,
324                                             size_t desired_plab_sz,
325                                             ParallelTaskTerminator& term)
326  : ResourceArray(sizeof(ParScanThreadState), num_threads),
327    _young_gen(young_gen),
328    _old_gen(old_gen),
329    _term(term)
330{
331  assert(num_threads > 0, "sanity check!");
332  assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
333         "overflow_stack allocation mismatch");
334  // Initialize states.
335  for (int i = 0; i < num_threads; ++i) {
336    new ((ParScanThreadState*)_data + i)
337        ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
338                           overflow_stacks, desired_plab_sz, term);
339  }
340}
341
342inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
343  assert(i >= 0 && i < length(), "sanity check!");
344  return ((ParScanThreadState*)_data)[i];
345}
346
347void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
348  for (int i = 0; i < length(); ++i) {
349    if (thread_state(i).promotion_failed()) {
350      gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
351      thread_state(i).promotion_failed_info().reset();
352    }
353  }
354}
355
356void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
357  _term.reset_for_reuse(active_threads);
358  if (promotion_failed) {
359    for (int i = 0; i < length(); ++i) {
360      thread_state(i).print_promotion_failure_size();
361    }
362  }
363}
364
365#if TASKQUEUE_STATS
366void ParScanThreadState::reset_stats() {
367  taskqueue_stats().reset();
368  _term_attempts = 0;
369  _overflow_refills = 0;
370  _overflow_refill_objs = 0;
371}
372
373void ParScanThreadStateSet::reset_stats() {
374  for (int i = 0; i < length(); ++i) {
375    thread_state(i).reset_stats();
376  }
377}
378
379void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
380  st->print_raw_cr("GC Termination Stats");
381  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
382  st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
383  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
384}
385
386void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
387  print_termination_stats_hdr(st);
388
389  for (int i = 0; i < length(); ++i) {
390    const ParScanThreadState & pss = thread_state(i);
391    const double elapsed_ms = pss.elapsed_time() * 1000.0;
392    const double s_roots_ms = pss.strong_roots_time() * 1000.0;
393    const double term_ms = pss.term_time() * 1000.0;
394    st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
395                 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
396                 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
397  }
398}
399
400// Print stats related to work queue activity.
401void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
402  st->print_raw_cr("GC Task Stats");
403  st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
404  st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
405}
406
407void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
408  print_taskqueue_stats_hdr(st);
409
410  TaskQueueStats totals;
411  for (int i = 0; i < length(); ++i) {
412    const ParScanThreadState & pss = thread_state(i);
413    const TaskQueueStats & stats = pss.taskqueue_stats();
414    st->print("%3d ", i); stats.print(st); st->cr();
415    totals += stats;
416
417    if (pss.overflow_refills() > 0) {
418      st->print_cr("    " SIZE_FORMAT_W(10) " overflow refills    "
419                   SIZE_FORMAT_W(10) " overflow objects",
420                   pss.overflow_refills(), pss.overflow_refill_objs());
421    }
422  }
423  st->print("tot "); totals.print(st); st->cr();
424
425  DEBUG_ONLY(totals.verify());
426}
427#endif // TASKQUEUE_STATS
428
429void ParScanThreadStateSet::flush() {
430  // Work in this loop should be kept as lightweight as
431  // possible since this might otherwise become a bottleneck
432  // to scaling. Should we add heavy-weight work into this
433  // loop, consider parallelizing the loop into the worker threads.
434  for (int i = 0; i < length(); ++i) {
435    ParScanThreadState& par_scan_state = thread_state(i);
436
437    // Flush stats related to To-space PLAB activity and
438    // retire the last buffer.
439    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
440
441    // Every thread has its own age table.  We need to merge
442    // them all into one.
443    ageTable *local_table = par_scan_state.age_table();
444    _young_gen.age_table()->merge(local_table);
445
446    // Inform old gen that we're done.
447    _old_gen.par_promote_alloc_done(i);
448    _old_gen.par_oop_since_save_marks_iterate_done(i);
449  }
450
451  if (UseConcMarkSweepGC) {
452    // We need to call this even when ResizeOldPLAB is disabled
453    // so as to avoid breaking some asserts. While we may be able
454    // to avoid this by reorganizing the code a bit, I am loathe
455    // to do that unless we find cases where ergo leads to bad
456    // performance.
457    CFLS_LAB::compute_desired_plab_size();
458  }
459}
460
461ParScanClosure::ParScanClosure(ParNewGeneration* g,
462                               ParScanThreadState* par_scan_state) :
463  OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
464  _boundary = _g->reserved().end();
465}
466
467void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
468void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
469
470void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
471void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
472
473void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
474void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
475
476void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
477void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
478
479ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
480                                             ParScanThreadState* par_scan_state)
481  : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
482{}
483
484void ParScanWeakRefClosure::do_oop(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
485void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
486
487#ifdef WIN32
488#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
489#endif
490
491ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
492    ParScanThreadState* par_scan_state_,
493    ParScanWithoutBarrierClosure* to_space_closure_,
494    ParScanWithBarrierClosure* old_gen_closure_,
495    ParRootScanWithoutBarrierClosure* to_space_root_closure_,
496    ParNewGeneration* par_gen_,
497    ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
498    ObjToScanQueueSet* task_queues_,
499    ParallelTaskTerminator* terminator_) :
500
501    _par_scan_state(par_scan_state_),
502    _to_space_closure(to_space_closure_),
503    _old_gen_closure(old_gen_closure_),
504    _to_space_root_closure(to_space_root_closure_),
505    _old_gen_root_closure(old_gen_root_closure_),
506    _par_gen(par_gen_),
507    _task_queues(task_queues_),
508    _terminator(terminator_)
509{}
510
511void ParEvacuateFollowersClosure::do_void() {
512  ObjToScanQueue* work_q = par_scan_state()->work_queue();
513
514  while (true) {
515    // Scan to-space and old-gen objs until we run out of both.
516    oop obj_to_scan;
517    par_scan_state()->trim_queues(0);
518
519    // We have no local work, attempt to steal from other threads.
520
521    // Attempt to steal work from promoted.
522    if (task_queues()->steal(par_scan_state()->thread_num(),
523                             par_scan_state()->hash_seed(),
524                             obj_to_scan)) {
525      bool res = work_q->push(obj_to_scan);
526      assert(res, "Empty queue should have room for a push.");
527
528      // If successful, goto Start.
529      continue;
530
531      // Try global overflow list.
532    } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
533      continue;
534    }
535
536    // Otherwise, offer termination.
537    par_scan_state()->start_term_time();
538    if (terminator()->offer_termination()) break;
539    par_scan_state()->end_term_time();
540  }
541  assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
542         "Broken overflow list?");
543  // Finish the last termination pause.
544  par_scan_state()->end_term_time();
545}
546
547ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
548                             Generation* old_gen,
549                             HeapWord* young_old_boundary,
550                             ParScanThreadStateSet* state_set,
551                             StrongRootsScope* strong_roots_scope) :
552    AbstractGangTask("ParNewGeneration collection"),
553    _young_gen(young_gen), _old_gen(old_gen),
554    _young_old_boundary(young_old_boundary),
555    _state_set(state_set),
556    _strong_roots_scope(strong_roots_scope)
557{}
558
559void ParNewGenTask::work(uint worker_id) {
560  GenCollectedHeap* gch = GenCollectedHeap::heap();
561  // Since this is being done in a separate thread, need new resource
562  // and handle marks.
563  ResourceMark rm;
564  HandleMark hm;
565
566  ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
567  assert(_state_set->is_valid(worker_id), "Should not have been called");
568
569  par_scan_state.set_young_old_boundary(_young_old_boundary);
570
571  KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
572                                      gch->rem_set()->klass_rem_set());
573  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
574                                           &par_scan_state.to_space_root_closure(),
575                                           false);
576
577  par_scan_state.start_strong_roots();
578  gch->gen_process_roots(_strong_roots_scope,
579                         GenCollectedHeap::YoungGen,
580                         true,  // Process younger gens, if any, as strong roots.
581                         GenCollectedHeap::SO_ScavengeCodeCache,
582                         GenCollectedHeap::StrongAndWeakRoots,
583                         &par_scan_state.to_space_root_closure(),
584                         &par_scan_state.older_gen_closure(),
585                         &cld_scan_closure);
586
587  par_scan_state.end_strong_roots();
588
589  // "evacuate followers".
590  par_scan_state.evacuate_followers_closure().do_void();
591}
592
593ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
594  : DefNewGeneration(rs, initial_byte_size, "PCopy"),
595  _overflow_list(NULL),
596  _is_alive_closure(this),
597  _plab_stats(YoungPLABSize, PLABWeight)
598{
599  NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
600  NOT_PRODUCT(_num_par_pushes = 0;)
601  _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
602  guarantee(_task_queues != NULL, "task_queues allocation failure.");
603
604  for (uint i = 0; i < ParallelGCThreads; i++) {
605    ObjToScanQueue *q = new ObjToScanQueue();
606    guarantee(q != NULL, "work_queue Allocation failure.");
607    _task_queues->register_queue(i, q);
608  }
609
610  for (uint i = 0; i < ParallelGCThreads; i++) {
611    _task_queues->queue(i)->initialize();
612  }
613
614  _overflow_stacks = NULL;
615  if (ParGCUseLocalOverflow) {
616    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
617    typedef Stack<oop, mtGC> GCOopStack;
618
619    _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
620    for (size_t i = 0; i < ParallelGCThreads; ++i) {
621      new (_overflow_stacks + i) Stack<oop, mtGC>();
622    }
623  }
624
625  if (UsePerfData) {
626    EXCEPTION_MARK;
627    ResourceMark rm;
628
629    const char* cname =
630         PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
631    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
632                                     ParallelGCThreads, CHECK);
633  }
634}
635
636// ParNewGeneration::
637ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
638  DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
639
640template <class T>
641void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
642#ifdef ASSERT
643  {
644    assert(!oopDesc::is_null(*p), "expected non-null ref");
645    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
646    // We never expect to see a null reference being processed
647    // as a weak reference.
648    assert(obj->is_oop(), "expected an oop while scanning weak refs");
649  }
650#endif // ASSERT
651
652  _par_cl->do_oop_nv(p);
653
654  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
655    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
656    _rs->write_ref_field_gc_par(p, obj);
657  }
658}
659
660void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p)       { ParKeepAliveClosure::do_oop_work(p); }
661void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
662
663// ParNewGeneration::
664KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
665  DefNewGeneration::KeepAliveClosure(cl) {}
666
667template <class T>
668void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
669#ifdef ASSERT
670  {
671    assert(!oopDesc::is_null(*p), "expected non-null ref");
672    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
673    // We never expect to see a null reference being processed
674    // as a weak reference.
675    assert(obj->is_oop(), "expected an oop while scanning weak refs");
676  }
677#endif // ASSERT
678
679  _cl->do_oop_nv(p);
680
681  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
682    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
683    _rs->write_ref_field_gc_par(p, obj);
684  }
685}
686
687void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p)       { KeepAliveClosure::do_oop_work(p); }
688void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
689
690template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
691  T heap_oop = oopDesc::load_heap_oop(p);
692  if (!oopDesc::is_null(heap_oop)) {
693    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
694    if ((HeapWord*)obj < _boundary) {
695      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
696      oop new_obj = obj->is_forwarded()
697                      ? obj->forwardee()
698                      : _g->DefNewGeneration::copy_to_survivor_space(obj);
699      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
700    }
701    if (_gc_barrier) {
702      // If p points to a younger generation, mark the card.
703      if ((HeapWord*)obj < _gen_boundary) {
704        _rs->write_ref_field_gc_par(p, obj);
705      }
706    }
707  }
708}
709
710void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
711void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
712
713class ParNewRefProcTaskProxy: public AbstractGangTask {
714  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
715public:
716  ParNewRefProcTaskProxy(ProcessTask& task,
717                         ParNewGeneration& young_gen,
718                         Generation& old_gen,
719                         HeapWord* young_old_boundary,
720                         ParScanThreadStateSet& state_set);
721
722private:
723  virtual void work(uint worker_id);
724private:
725  ParNewGeneration&      _young_gen;
726  ProcessTask&           _task;
727  Generation&            _old_gen;
728  HeapWord*              _young_old_boundary;
729  ParScanThreadStateSet& _state_set;
730};
731
732ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
733                                               ParNewGeneration& young_gen,
734                                               Generation& old_gen,
735                                               HeapWord* young_old_boundary,
736                                               ParScanThreadStateSet& state_set)
737  : AbstractGangTask("ParNewGeneration parallel reference processing"),
738    _young_gen(young_gen),
739    _task(task),
740    _old_gen(old_gen),
741    _young_old_boundary(young_old_boundary),
742    _state_set(state_set)
743{ }
744
745void ParNewRefProcTaskProxy::work(uint worker_id) {
746  ResourceMark rm;
747  HandleMark hm;
748  ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
749  par_scan_state.set_young_old_boundary(_young_old_boundary);
750  _task.work(worker_id, par_scan_state.is_alive_closure(),
751             par_scan_state.keep_alive_closure(),
752             par_scan_state.evacuate_followers_closure());
753}
754
755class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
756  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
757  EnqueueTask& _task;
758
759public:
760  ParNewRefEnqueueTaskProxy(EnqueueTask& task)
761    : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
762      _task(task)
763  { }
764
765  virtual void work(uint worker_id) {
766    _task.work(worker_id);
767  }
768};
769
770void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
771  GenCollectedHeap* gch = GenCollectedHeap::heap();
772  WorkGang* workers = gch->workers();
773  assert(workers != NULL, "Need parallel worker threads.");
774  _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
775  ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
776                                 _young_gen.reserved().end(), _state_set);
777  workers->run_task(&rp_task);
778  _state_set.reset(0 /* bad value in debug if not reset */,
779                   _young_gen.promotion_failed());
780}
781
782void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
783  GenCollectedHeap* gch = GenCollectedHeap::heap();
784  WorkGang* workers = gch->workers();
785  assert(workers != NULL, "Need parallel worker threads.");
786  ParNewRefEnqueueTaskProxy enq_task(task);
787  workers->run_task(&enq_task);
788}
789
790void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
791  _state_set.flush();
792  GenCollectedHeap* gch = GenCollectedHeap::heap();
793  gch->save_marks();
794}
795
796ScanClosureWithParBarrier::
797ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
798  ScanClosure(g, gc_barrier)
799{ }
800
801EvacuateFollowersClosureGeneral::
802EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
803                                OopsInGenClosure* cur,
804                                OopsInGenClosure* older) :
805  _gch(gch),
806  _scan_cur_or_nonheap(cur), _scan_older(older)
807{ }
808
809void EvacuateFollowersClosureGeneral::do_void() {
810  do {
811    // Beware: this call will lead to closure applications via virtual
812    // calls.
813    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
814                                       _scan_cur_or_nonheap,
815                                       _scan_older);
816  } while (!_gch->no_allocs_since_save_marks());
817}
818
819// A Generation that does parallel young-gen collection.
820
821void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
822  assert(_promo_failure_scan_stack.is_empty(), "post condition");
823  _promo_failure_scan_stack.clear(true); // Clear cached segments.
824
825  remove_forwarding_pointers();
826  if (PrintGCDetails) {
827    gclog_or_tty->print(" (promotion failed)");
828  }
829  // All the spaces are in play for mark-sweep.
830  swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
831  from()->set_next_compaction_space(to());
832  gch->set_incremental_collection_failed();
833  // Inform the next generation that a promotion failure occurred.
834  _old_gen->promotion_failure_occurred();
835
836  // Trace promotion failure in the parallel GC threads
837  thread_state_set.trace_promotion_failed(gc_tracer());
838  // Single threaded code may have reported promotion failure to the global state
839  if (_promotion_failed_info.has_failed()) {
840    _gc_tracer.report_promotion_failed(_promotion_failed_info);
841  }
842  // Reset the PromotionFailureALot counters.
843  NOT_PRODUCT(gch->reset_promotion_should_fail();)
844}
845
846void ParNewGeneration::collect(bool   full,
847                               bool   clear_all_soft_refs,
848                               size_t size,
849                               bool   is_tlab) {
850  assert(full || size > 0, "otherwise we don't want to collect");
851
852  GenCollectedHeap* gch = GenCollectedHeap::heap();
853
854  _gc_timer->register_gc_start();
855
856  AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
857  WorkGang* workers = gch->workers();
858  assert(workers != NULL, "Need workgang for parallel work");
859  uint active_workers =
860       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
861                                               workers->active_workers(),
862                                               Threads::number_of_non_daemon_threads());
863  workers->set_active_workers(active_workers);
864  _old_gen = gch->old_gen();
865
866  // If the next generation is too full to accommodate worst-case promotion
867  // from this generation, pass on collection; let the next generation
868  // do it.
869  if (!collection_attempt_is_safe()) {
870    gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
871    return;
872  }
873  assert(to()->is_empty(), "Else not collection_attempt_is_safe");
874
875  _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
876  gch->trace_heap_before_gc(gc_tracer());
877
878  init_assuming_no_promotion_failure();
879
880  if (UseAdaptiveSizePolicy) {
881    set_survivor_overflow(false);
882    size_policy->minor_collection_begin();
883  }
884
885  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
886  // Capture heap used before collection (for printing).
887  size_t gch_prev_used = gch->used();
888
889  age_table()->clear();
890  to()->clear(SpaceDecorator::Mangle);
891
892  gch->save_marks();
893
894  // Set the correct parallelism (number of queues) in the reference processor
895  ref_processor()->set_active_mt_degree(active_workers);
896
897  // Always set the terminator for the active number of workers
898  // because only those workers go through the termination protocol.
899  ParallelTaskTerminator _term(active_workers, task_queues());
900  ParScanThreadStateSet thread_state_set(active_workers,
901                                         *to(), *this, *_old_gen, *task_queues(),
902                                         _overflow_stacks, desired_plab_sz(), _term);
903
904  thread_state_set.reset(active_workers, promotion_failed());
905
906  {
907    StrongRootsScope srs(active_workers);
908
909    ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
910    gch->rem_set()->prepare_for_younger_refs_iterate(true);
911    // It turns out that even when we're using 1 thread, doing the work in a
912    // separate thread causes wide variance in run times.  We can't help this
913    // in the multi-threaded case, but we special-case n=1 here to get
914    // repeatable measurements of the 1-thread overhead of the parallel code.
915    if (active_workers > 1) {
916      workers->run_task(&tsk);
917    } else {
918      tsk.work(0);
919    }
920  }
921
922  thread_state_set.reset(0 /* Bad value in debug if not reset */,
923                         promotion_failed());
924
925  // Trace and reset failed promotion info.
926  if (promotion_failed()) {
927    thread_state_set.trace_promotion_failed(gc_tracer());
928  }
929
930  // Process (weak) reference objects found during scavenge.
931  ReferenceProcessor* rp = ref_processor();
932  IsAliveClosure is_alive(this);
933  ScanWeakRefClosure scan_weak_ref(this);
934  KeepAliveClosure keep_alive(&scan_weak_ref);
935  ScanClosure               scan_without_gc_barrier(this, false);
936  ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
937  set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
938  EvacuateFollowersClosureGeneral evacuate_followers(gch,
939    &scan_without_gc_barrier, &scan_with_gc_barrier);
940  rp->setup_policy(clear_all_soft_refs);
941  // Can  the mt_degree be set later (at run_task() time would be best)?
942  rp->set_active_mt_degree(active_workers);
943  ReferenceProcessorStats stats;
944  if (rp->processing_is_mt()) {
945    ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
946    stats = rp->process_discovered_references(&is_alive, &keep_alive,
947                                              &evacuate_followers, &task_executor,
948                                              _gc_timer);
949  } else {
950    thread_state_set.flush();
951    gch->save_marks();
952    stats = rp->process_discovered_references(&is_alive, &keep_alive,
953                                              &evacuate_followers, NULL,
954                                              _gc_timer);
955  }
956  _gc_tracer.report_gc_reference_stats(stats);
957  _gc_tracer.report_tenuring_threshold(tenuring_threshold());
958
959  if (!promotion_failed()) {
960    // Swap the survivor spaces.
961    eden()->clear(SpaceDecorator::Mangle);
962    from()->clear(SpaceDecorator::Mangle);
963    if (ZapUnusedHeapArea) {
964      // This is now done here because of the piece-meal mangling which
965      // can check for valid mangling at intermediate points in the
966      // collection(s).  When a young collection fails to collect
967      // sufficient space resizing of the young generation can occur
968      // and redistribute the spaces in the young generation.  Mangle
969      // here so that unzapped regions don't get distributed to
970      // other spaces.
971      to()->mangle_unused_area();
972    }
973    swap_spaces();
974
975    // A successful scavenge should restart the GC time limit count which is
976    // for full GC's.
977    size_policy->reset_gc_overhead_limit_count();
978
979    assert(to()->is_empty(), "to space should be empty now");
980
981    adjust_desired_tenuring_threshold();
982  } else {
983    handle_promotion_failed(gch, thread_state_set);
984  }
985  // set new iteration safe limit for the survivor spaces
986  from()->set_concurrent_iteration_safe_limit(from()->top());
987  to()->set_concurrent_iteration_safe_limit(to()->top());
988
989  if (ResizePLAB) {
990    plab_stats()->adjust_desired_plab_sz();
991  }
992
993  if (PrintGC && !PrintGCDetails) {
994    gch->print_heap_change(gch_prev_used);
995  }
996
997  TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
998  TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
999
1000  if (UseAdaptiveSizePolicy) {
1001    size_policy->minor_collection_end(gch->gc_cause());
1002    size_policy->avg_survived()->sample(from()->used());
1003  }
1004
1005  // We need to use a monotonically non-decreasing time in ms
1006  // or we will see time-warp warnings and os::javaTimeMillis()
1007  // does not guarantee monotonicity.
1008  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1009  update_time_of_last_gc(now);
1010
1011  rp->set_enqueuing_is_done(true);
1012  if (rp->processing_is_mt()) {
1013    ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1014    rp->enqueue_discovered_references(&task_executor);
1015  } else {
1016    rp->enqueue_discovered_references(NULL);
1017  }
1018  rp->verify_no_references_recorded();
1019
1020  gch->trace_heap_after_gc(gc_tracer());
1021
1022  _gc_timer->register_gc_end();
1023
1024  _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1025}
1026
1027size_t ParNewGeneration::desired_plab_sz() {
1028  return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
1029}
1030
1031static int sum;
1032void ParNewGeneration::waste_some_time() {
1033  for (int i = 0; i < 100; i++) {
1034    sum += i;
1035  }
1036}
1037
1038static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1039
1040// Because of concurrency, there are times where an object for which
1041// "is_forwarded()" is true contains an "interim" forwarding pointer
1042// value.  Such a value will soon be overwritten with a real value.
1043// This method requires "obj" to have a forwarding pointer, and waits, if
1044// necessary for a real one to be inserted, and returns it.
1045
1046oop ParNewGeneration::real_forwardee(oop obj) {
1047  oop forward_ptr = obj->forwardee();
1048  if (forward_ptr != ClaimedForwardPtr) {
1049    return forward_ptr;
1050  } else {
1051    return real_forwardee_slow(obj);
1052  }
1053}
1054
1055oop ParNewGeneration::real_forwardee_slow(oop obj) {
1056  // Spin-read if it is claimed but not yet written by another thread.
1057  oop forward_ptr = obj->forwardee();
1058  while (forward_ptr == ClaimedForwardPtr) {
1059    waste_some_time();
1060    assert(obj->is_forwarded(), "precondition");
1061    forward_ptr = obj->forwardee();
1062  }
1063  return forward_ptr;
1064}
1065
1066void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
1067  if (m->must_be_preserved_for_promotion_failure(obj)) {
1068    // We should really have separate per-worker stacks, rather
1069    // than use locking of a common pair of stacks.
1070    MutexLocker ml(ParGCRareEvent_lock);
1071    preserve_mark(obj, m);
1072  }
1073}
1074
1075// Multiple GC threads may try to promote an object.  If the object
1076// is successfully promoted, a forwarding pointer will be installed in
1077// the object in the young generation.  This method claims the right
1078// to install the forwarding pointer before it copies the object,
1079// thus avoiding the need to undo the copy as in
1080// copy_to_survivor_space_avoiding_with_undo.
1081
1082oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
1083                                             oop old,
1084                                             size_t sz,
1085                                             markOop m) {
1086  // In the sequential version, this assert also says that the object is
1087  // not forwarded.  That might not be the case here.  It is the case that
1088  // the caller observed it to be not forwarded at some time in the past.
1089  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1090
1091  // The sequential code read "old->age()" below.  That doesn't work here,
1092  // since the age is in the mark word, and that might be overwritten with
1093  // a forwarding pointer by a parallel thread.  So we must save the mark
1094  // word in a local and then analyze it.
1095  oopDesc dummyOld;
1096  dummyOld.set_mark(m);
1097  assert(!dummyOld.is_forwarded(),
1098         "should not be called with forwarding pointer mark word.");
1099
1100  oop new_obj = NULL;
1101  oop forward_ptr;
1102
1103  // Try allocating obj in to-space (unless too old)
1104  if (dummyOld.age() < tenuring_threshold()) {
1105    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1106    if (new_obj == NULL) {
1107      set_survivor_overflow(true);
1108    }
1109  }
1110
1111  if (new_obj == NULL) {
1112    // Either to-space is full or we decided to promote try allocating obj tenured
1113
1114    // Attempt to install a null forwarding pointer (atomically),
1115    // to claim the right to install the real forwarding pointer.
1116    forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1117    if (forward_ptr != NULL) {
1118      // someone else beat us to it.
1119        return real_forwardee(old);
1120    }
1121
1122    if (!_promotion_failed) {
1123      new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
1124                                      old, m, sz);
1125    }
1126
1127    if (new_obj == NULL) {
1128      // promotion failed, forward to self
1129      _promotion_failed = true;
1130      new_obj = old;
1131
1132      preserve_mark_if_necessary(old, m);
1133      par_scan_state->register_promotion_failure(sz);
1134    }
1135
1136    old->forward_to(new_obj);
1137    forward_ptr = NULL;
1138  } else {
1139    // Is in to-space; do copying ourselves.
1140    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1141    assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
1142    forward_ptr = old->forward_to_atomic(new_obj);
1143    // Restore the mark word copied above.
1144    new_obj->set_mark(m);
1145    // Increment age if obj still in new generation
1146    new_obj->incr_age();
1147    par_scan_state->age_table()->add(new_obj, sz);
1148  }
1149  assert(new_obj != NULL, "just checking");
1150
1151  // This code must come after the CAS test, or it will print incorrect
1152  // information.
1153  if (TraceScavenge) {
1154    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1155       is_in_reserved(new_obj) ? "copying" : "tenuring",
1156       new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
1157  }
1158
1159  if (forward_ptr == NULL) {
1160    oop obj_to_push = new_obj;
1161    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1162      // Length field used as index of next element to be scanned.
1163      // Real length can be obtained from real_forwardee()
1164      arrayOop(old)->set_length(0);
1165      obj_to_push = old;
1166      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1167             "push forwarded object");
1168    }
1169    // Push it on one of the queues of to-be-scanned objects.
1170    bool simulate_overflow = false;
1171    NOT_PRODUCT(
1172      if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1173        // simulate a stack overflow
1174        simulate_overflow = true;
1175      }
1176    )
1177    if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1178      // Add stats for overflow pushes.
1179      if (Verbose && PrintGCDetails) {
1180        gclog_or_tty->print("queue overflow!\n");
1181      }
1182      push_on_overflow_list(old, par_scan_state);
1183      TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1184    }
1185
1186    return new_obj;
1187  }
1188
1189  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
1190  // allocate it?
1191  if (is_in_reserved(new_obj)) {
1192    // Must be in to_space.
1193    assert(to()->is_in_reserved(new_obj), "Checking");
1194    if (forward_ptr == ClaimedForwardPtr) {
1195      // Wait to get the real forwarding pointer value.
1196      forward_ptr = real_forwardee(old);
1197    }
1198    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1199  }
1200
1201  return forward_ptr;
1202}
1203
1204#ifndef PRODUCT
1205// It's OK to call this multi-threaded;  the worst thing
1206// that can happen is that we'll get a bunch of closely
1207// spaced simulated overflows, but that's OK, in fact
1208// probably good as it would exercise the overflow code
1209// under contention.
1210bool ParNewGeneration::should_simulate_overflow() {
1211  if (_overflow_counter-- <= 0) { // just being defensive
1212    _overflow_counter = ParGCWorkQueueOverflowInterval;
1213    return true;
1214  } else {
1215    return false;
1216  }
1217}
1218#endif
1219
1220// In case we are using compressed oops, we need to be careful.
1221// If the object being pushed is an object array, then its length
1222// field keeps track of the "grey boundary" at which the next
1223// incremental scan will be done (see ParGCArrayScanChunk).
1224// When using compressed oops, this length field is kept in the
1225// lower 32 bits of the erstwhile klass word and cannot be used
1226// for the overflow chaining pointer (OCP below). As such the OCP
1227// would itself need to be compressed into the top 32-bits in this
1228// case. Unfortunately, see below, in the event that we have a
1229// promotion failure, the node to be pushed on the list can be
1230// outside of the Java heap, so the heap-based pointer compression
1231// would not work (we would have potential aliasing between C-heap
1232// and Java-heap pointers). For this reason, when using compressed
1233// oops, we simply use a worker-thread-local, non-shared overflow
1234// list in the form of a growable array, with a slightly different
1235// overflow stack draining strategy. If/when we start using fat
1236// stacks here, we can go back to using (fat) pointer chains
1237// (although some performance comparisons would be useful since
1238// single global lists have their own performance disadvantages
1239// as we were made painfully aware not long ago, see 6786503).
1240#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1241void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1242  assert(is_in_reserved(from_space_obj), "Should be from this generation");
1243  if (ParGCUseLocalOverflow) {
1244    // In the case of compressed oops, we use a private, not-shared
1245    // overflow stack.
1246    par_scan_state->push_on_overflow_stack(from_space_obj);
1247  } else {
1248    assert(!UseCompressedOops, "Error");
1249    // if the object has been forwarded to itself, then we cannot
1250    // use the klass pointer for the linked list.  Instead we have
1251    // to allocate an oopDesc in the C-Heap and use that for the linked list.
1252    // XXX This is horribly inefficient when a promotion failure occurs
1253    // and should be fixed. XXX FIX ME !!!
1254#ifndef PRODUCT
1255    Atomic::inc_ptr(&_num_par_pushes);
1256    assert(_num_par_pushes > 0, "Tautology");
1257#endif
1258    if (from_space_obj->forwardee() == from_space_obj) {
1259      oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1260      listhead->forward_to(from_space_obj);
1261      from_space_obj = listhead;
1262    }
1263    oop observed_overflow_list = _overflow_list;
1264    oop cur_overflow_list;
1265    do {
1266      cur_overflow_list = observed_overflow_list;
1267      if (cur_overflow_list != BUSY) {
1268        from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1269      } else {
1270        from_space_obj->set_klass_to_list_ptr(NULL);
1271      }
1272      observed_overflow_list =
1273        (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
1274    } while (cur_overflow_list != observed_overflow_list);
1275  }
1276}
1277
1278bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1279  bool res;
1280
1281  if (ParGCUseLocalOverflow) {
1282    res = par_scan_state->take_from_overflow_stack();
1283  } else {
1284    assert(!UseCompressedOops, "Error");
1285    res = take_from_overflow_list_work(par_scan_state);
1286  }
1287  return res;
1288}
1289
1290
1291// *NOTE*: The overflow list manipulation code here and
1292// in CMSCollector:: are very similar in shape,
1293// except that in the CMS case we thread the objects
1294// directly into the list via their mark word, and do
1295// not need to deal with special cases below related
1296// to chunking of object arrays and promotion failure
1297// handling.
1298// CR 6797058 has been filed to attempt consolidation of
1299// the common code.
1300// Because of the common code, if you make any changes in
1301// the code below, please check the CMS version to see if
1302// similar changes might be needed.
1303// See CMSCollector::par_take_from_overflow_list() for
1304// more extensive documentation comments.
1305bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1306  ObjToScanQueue* work_q = par_scan_state->work_queue();
1307  // How many to take?
1308  size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1309                                 (size_t)ParGCDesiredObjsFromOverflowList);
1310
1311  assert(!UseCompressedOops, "Error");
1312  assert(par_scan_state->overflow_stack() == NULL, "Error");
1313  if (_overflow_list == NULL) return false;
1314
1315  // Otherwise, there was something there; try claiming the list.
1316  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1317  // Trim off a prefix of at most objsFromOverflow items
1318  Thread* tid = Thread::current();
1319  size_t spin_count = ParallelGCThreads;
1320  size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1321  for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1322    // someone grabbed it before we did ...
1323    // ... we spin for a short while...
1324    os::sleep(tid, sleep_time_millis, false);
1325    if (_overflow_list == NULL) {
1326      // nothing left to take
1327      return false;
1328    } else if (_overflow_list != BUSY) {
1329     // try and grab the prefix
1330     prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1331    }
1332  }
1333  if (prefix == NULL || prefix == BUSY) {
1334     // Nothing to take or waited long enough
1335     if (prefix == NULL) {
1336       // Write back the NULL in case we overwrote it with BUSY above
1337       // and it is still the same value.
1338       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1339     }
1340     return false;
1341  }
1342  assert(prefix != NULL && prefix != BUSY, "Error");
1343  size_t i = 1;
1344  oop cur = prefix;
1345  while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
1346    i++; cur = cur->list_ptr_from_klass();
1347  }
1348
1349  // Reattach remaining (suffix) to overflow list
1350  if (cur->klass_or_null() == NULL) {
1351    // Write back the NULL in lieu of the BUSY we wrote
1352    // above and it is still the same value.
1353    if (_overflow_list == BUSY) {
1354      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1355    }
1356  } else {
1357    assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error");
1358    oop suffix = cur->list_ptr_from_klass();       // suffix will be put back on global list
1359    cur->set_klass_to_list_ptr(NULL);     // break off suffix
1360    // It's possible that the list is still in the empty(busy) state
1361    // we left it in a short while ago; in that case we may be
1362    // able to place back the suffix.
1363    oop observed_overflow_list = _overflow_list;
1364    oop cur_overflow_list = observed_overflow_list;
1365    bool attached = false;
1366    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1367      observed_overflow_list =
1368        (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1369      if (cur_overflow_list == observed_overflow_list) {
1370        attached = true;
1371        break;
1372      } else cur_overflow_list = observed_overflow_list;
1373    }
1374    if (!attached) {
1375      // Too bad, someone else got in in between; we'll need to do a splice.
1376      // Find the last item of suffix list
1377      oop last = suffix;
1378      while (last->klass_or_null() != NULL) {
1379        last = last->list_ptr_from_klass();
1380      }
1381      // Atomically prepend suffix to current overflow list
1382      observed_overflow_list = _overflow_list;
1383      do {
1384        cur_overflow_list = observed_overflow_list;
1385        if (cur_overflow_list != BUSY) {
1386          // Do the splice ...
1387          last->set_klass_to_list_ptr(cur_overflow_list);
1388        } else { // cur_overflow_list == BUSY
1389          last->set_klass_to_list_ptr(NULL);
1390        }
1391        observed_overflow_list =
1392          (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1393      } while (cur_overflow_list != observed_overflow_list);
1394    }
1395  }
1396
1397  // Push objects on prefix list onto this thread's work queue
1398  assert(prefix != NULL && prefix != BUSY, "program logic");
1399  cur = prefix;
1400  ssize_t n = 0;
1401  while (cur != NULL) {
1402    oop obj_to_push = cur->forwardee();
1403    oop next        = cur->list_ptr_from_klass();
1404    cur->set_klass(obj_to_push->klass());
1405    // This may be an array object that is self-forwarded. In that case, the list pointer
1406    // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1407    if (!is_in_reserved(cur)) {
1408      // This can become a scaling bottleneck when there is work queue overflow coincident
1409      // with promotion failure.
1410      oopDesc* f = cur;
1411      FREE_C_HEAP_ARRAY(oopDesc, f);
1412    } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1413      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1414      obj_to_push = cur;
1415    }
1416    bool ok = work_q->push(obj_to_push);
1417    assert(ok, "Should have succeeded");
1418    cur = next;
1419    n++;
1420  }
1421  TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1422#ifndef PRODUCT
1423  assert(_num_par_pushes >= n, "Too many pops?");
1424  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1425#endif
1426  return true;
1427}
1428#undef BUSY
1429
1430void ParNewGeneration::ref_processor_init() {
1431  if (_ref_processor == NULL) {
1432    // Allocate and initialize a reference processor
1433    _ref_processor =
1434      new ReferenceProcessor(_reserved,                  // span
1435                             ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1436                             ParallelGCThreads,          // mt processing degree
1437                             refs_discovery_is_mt(),     // mt discovery
1438                             ParallelGCThreads,          // mt discovery degree
1439                             refs_discovery_is_atomic(), // atomic_discovery
1440                             NULL);                      // is_alive_non_header
1441  }
1442}
1443
1444const char* ParNewGeneration::name() const {
1445  return "par new generation";
1446}
1447