parNewGeneration.cpp revision 11945:6d3c44100184
1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/cms/compactibleFreeListSpace.hpp"
27#include "gc/cms/concurrentMarkSweepGeneration.hpp"
28#include "gc/cms/parNewGeneration.inline.hpp"
29#include "gc/cms/parOopClosures.inline.hpp"
30#include "gc/serial/defNewGeneration.inline.hpp"
31#include "gc/shared/adaptiveSizePolicy.hpp"
32#include "gc/shared/ageTable.inline.hpp"
33#include "gc/shared/copyFailedInfo.hpp"
34#include "gc/shared/gcHeapSummary.hpp"
35#include "gc/shared/gcTimer.hpp"
36#include "gc/shared/gcTrace.hpp"
37#include "gc/shared/gcTraceTime.inline.hpp"
38#include "gc/shared/genCollectedHeap.hpp"
39#include "gc/shared/genOopClosures.inline.hpp"
40#include "gc/shared/generation.hpp"
41#include "gc/shared/plab.inline.hpp"
42#include "gc/shared/preservedMarks.inline.hpp"
43#include "gc/shared/referencePolicy.hpp"
44#include "gc/shared/space.hpp"
45#include "gc/shared/spaceDecorator.hpp"
46#include "gc/shared/strongRootsScope.hpp"
47#include "gc/shared/taskqueue.inline.hpp"
48#include "gc/shared/workgroup.hpp"
49#include "logging/log.hpp"
50#include "memory/resourceArea.hpp"
51#include "oops/objArrayOop.hpp"
52#include "oops/oop.inline.hpp"
53#include "runtime/atomic.hpp"
54#include "runtime/handles.hpp"
55#include "runtime/handles.inline.hpp"
56#include "runtime/java.hpp"
57#include "runtime/thread.inline.hpp"
58#include "utilities/copy.hpp"
59#include "utilities/globalDefinitions.hpp"
60#include "utilities/stack.inline.hpp"
61
62ParScanThreadState::ParScanThreadState(Space* to_space_,
63                                       ParNewGeneration* young_gen_,
64                                       Generation* old_gen_,
65                                       int thread_num_,
66                                       ObjToScanQueueSet* work_queue_set_,
67                                       Stack<oop, mtGC>* overflow_stacks_,
68                                       PreservedMarks* preserved_marks_,
69                                       size_t desired_plab_sz_,
70                                       ParallelTaskTerminator& term_) :
71  _to_space(to_space_),
72  _old_gen(old_gen_),
73  _young_gen(young_gen_),
74  _thread_num(thread_num_),
75  _work_queue(work_queue_set_->queue(thread_num_)),
76  _to_space_full(false),
77  _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
78  _preserved_marks(preserved_marks_),
79  _ageTable(false), // false ==> not the global age table, no perf data.
80  _to_space_alloc_buffer(desired_plab_sz_),
81  _to_space_closure(young_gen_, this),
82  _old_gen_closure(young_gen_, this),
83  _to_space_root_closure(young_gen_, this),
84  _old_gen_root_closure(young_gen_, this),
85  _older_gen_closure(young_gen_, this),
86  _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
87                      &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
88                      work_queue_set_, &term_),
89  _is_alive_closure(young_gen_),
90  _scan_weak_ref_closure(young_gen_, this),
91  _keep_alive_closure(&_scan_weak_ref_closure),
92  _strong_roots_time(0.0),
93  _term_time(0.0)
94{
95  #if TASKQUEUE_STATS
96  _term_attempts = 0;
97  _overflow_refills = 0;
98  _overflow_refill_objs = 0;
99  #endif // TASKQUEUE_STATS
100
101  _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
102  _hash_seed = 17;  // Might want to take time-based random value.
103  _start = os::elapsedTime();
104  _old_gen_closure.set_generation(old_gen_);
105  _old_gen_root_closure.set_generation(old_gen_);
106}
107
108void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
109                                              size_t plab_word_size) {
110  ChunkArray* sca = survivor_chunk_array();
111  if (sca != NULL) {
112    // A non-null SCA implies that we want the PLAB data recorded.
113    sca->record_sample(plab_start, plab_word_size);
114  }
115}
116
117bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
118  return new_obj->is_objArray() &&
119         arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
120         new_obj != old_obj;
121}
122
123void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
124  assert(old->is_objArray(), "must be obj array");
125  assert(old->is_forwarded(), "must be forwarded");
126  assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
127  assert(!old_gen()->is_in(old), "must be in young generation.");
128
129  objArrayOop obj = objArrayOop(old->forwardee());
130  // Process ParGCArrayScanChunk elements now
131  // and push the remainder back onto queue
132  int start     = arrayOop(old)->length();
133  int end       = obj->length();
134  int remainder = end - start;
135  assert(start <= end, "just checking");
136  if (remainder > 2 * ParGCArrayScanChunk) {
137    // Test above combines last partial chunk with a full chunk
138    end = start + ParGCArrayScanChunk;
139    arrayOop(old)->set_length(end);
140    // Push remainder.
141    bool ok = work_queue()->push(old);
142    assert(ok, "just popped, push must be okay");
143  } else {
144    // Restore length so that it can be used if there
145    // is a promotion failure and forwarding pointers
146    // must be removed.
147    arrayOop(old)->set_length(end);
148  }
149
150  // process our set of indices (include header in first chunk)
151  // should make sure end is even (aligned to HeapWord in case of compressed oops)
152  if ((HeapWord *)obj < young_old_boundary()) {
153    // object is in to_space
154    obj->oop_iterate_range(&_to_space_closure, start, end);
155  } else {
156    // object is in old generation
157    obj->oop_iterate_range(&_old_gen_closure, start, end);
158  }
159}
160
161void ParScanThreadState::trim_queues(int max_size) {
162  ObjToScanQueue* queue = work_queue();
163  do {
164    while (queue->size() > (juint)max_size) {
165      oop obj_to_scan;
166      if (queue->pop_local(obj_to_scan)) {
167        if ((HeapWord *)obj_to_scan < young_old_boundary()) {
168          if (obj_to_scan->is_objArray() &&
169              obj_to_scan->is_forwarded() &&
170              obj_to_scan->forwardee() != obj_to_scan) {
171            scan_partial_array_and_push_remainder(obj_to_scan);
172          } else {
173            // object is in to_space
174            obj_to_scan->oop_iterate(&_to_space_closure);
175          }
176        } else {
177          // object is in old generation
178          obj_to_scan->oop_iterate(&_old_gen_closure);
179        }
180      }
181    }
182    // For the  case of compressed oops, we have a private, non-shared
183    // overflow stack, so we eagerly drain it so as to more evenly
184    // distribute load early. Note: this may be good to do in
185    // general rather than delay for the final stealing phase.
186    // If applicable, we'll transfer a set of objects over to our
187    // work queue, allowing them to be stolen and draining our
188    // private overflow stack.
189  } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
190}
191
192bool ParScanThreadState::take_from_overflow_stack() {
193  assert(ParGCUseLocalOverflow, "Else should not call");
194  assert(young_gen()->overflow_list() == NULL, "Error");
195  ObjToScanQueue* queue = work_queue();
196  Stack<oop, mtGC>* const of_stack = overflow_stack();
197  const size_t num_overflow_elems = of_stack->size();
198  const size_t space_available = queue->max_elems() - queue->size();
199  const size_t num_take_elems = MIN3(space_available / 4,
200                                     ParGCDesiredObjsFromOverflowList,
201                                     num_overflow_elems);
202  // Transfer the most recent num_take_elems from the overflow
203  // stack to our work queue.
204  for (size_t i = 0; i != num_take_elems; i++) {
205    oop cur = of_stack->pop();
206    oop obj_to_push = cur->forwardee();
207    assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
208    assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
209    assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
210    if (should_be_partially_scanned(obj_to_push, cur)) {
211      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
212      obj_to_push = cur;
213    }
214    bool ok = queue->push(obj_to_push);
215    assert(ok, "Should have succeeded");
216  }
217  assert(young_gen()->overflow_list() == NULL, "Error");
218  return num_take_elems > 0;  // was something transferred?
219}
220
221void ParScanThreadState::push_on_overflow_stack(oop p) {
222  assert(ParGCUseLocalOverflow, "Else should not call");
223  overflow_stack()->push(p);
224  assert(young_gen()->overflow_list() == NULL, "Error");
225}
226
227HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
228  // If the object is small enough, try to reallocate the buffer.
229  HeapWord* obj = NULL;
230  if (!_to_space_full) {
231    PLAB* const plab = to_space_alloc_buffer();
232    Space* const sp  = to_space();
233    if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
234      // Is small enough; abandon this buffer and start a new one.
235      plab->retire();
236      // The minimum size has to be twice SurvivorAlignmentInBytes to
237      // allow for padding used in the alignment of 1 word.  A padding
238      // of 1 is too small for a filler word so the padding size will
239      // be increased by SurvivorAlignmentInBytes.
240      size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize);
241      size_t buf_size = MAX2(plab->word_sz(), min_usable_size);
242      HeapWord* buf_space = sp->par_allocate(buf_size);
243      if (buf_space == NULL) {
244        const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize;
245        size_t free_bytes = sp->free();
246        while(buf_space == NULL && free_bytes >= min_bytes) {
247          buf_size = free_bytes >> LogHeapWordSize;
248          assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
249          buf_space  = sp->par_allocate(buf_size);
250          free_bytes = sp->free();
251        }
252      }
253      if (buf_space != NULL) {
254        plab->set_buf(buf_space, buf_size);
255        record_survivor_plab(buf_space, buf_size);
256        obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
257        // Note that we cannot compare buf_size < word_sz below
258        // because of AlignmentReserve (see PLAB::allocate()).
259        assert(obj != NULL || plab->words_remaining() < word_sz,
260               "Else should have been able to allocate requested object size "
261               SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes "
262               SIZE_FORMAT ", words_remaining " SIZE_FORMAT,
263               word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining());
264        // It's conceivable that we may be able to use the
265        // buffer we just grabbed for subsequent small requests
266        // even if not for this one.
267      } else {
268        // We're used up.
269        _to_space_full = true;
270      }
271    } else {
272      // Too large; allocate the object individually.
273      obj = sp->par_allocate(word_sz);
274    }
275  }
276  return obj;
277}
278
279void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
280  to_space_alloc_buffer()->undo_allocation(obj, word_sz);
281}
282
283void ParScanThreadState::print_promotion_failure_size() {
284  if (_promotion_failed_info.has_failed()) {
285    log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
286                             _thread_num, _promotion_failed_info.first_size());
287  }
288}
289
290class ParScanThreadStateSet: StackObj {
291public:
292  // Initializes states for the specified number of threads;
293  ParScanThreadStateSet(int                     num_threads,
294                        Space&                  to_space,
295                        ParNewGeneration&       young_gen,
296                        Generation&             old_gen,
297                        ObjToScanQueueSet&      queue_set,
298                        Stack<oop, mtGC>*       overflow_stacks_,
299                        PreservedMarksSet&      preserved_marks_set,
300                        size_t                  desired_plab_sz,
301                        ParallelTaskTerminator& term);
302
303  ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
304
305  inline ParScanThreadState& thread_state(int i);
306
307  void trace_promotion_failed(const YoungGCTracer* gc_tracer);
308  void reset(uint active_workers, bool promotion_failed);
309  void flush();
310
311  #if TASKQUEUE_STATS
312  static void
313    print_termination_stats_hdr(outputStream* const st);
314  void print_termination_stats();
315  static void
316    print_taskqueue_stats_hdr(outputStream* const st);
317  void print_taskqueue_stats();
318  void reset_stats();
319  #endif // TASKQUEUE_STATS
320
321private:
322  ParallelTaskTerminator& _term;
323  ParNewGeneration&       _young_gen;
324  Generation&             _old_gen;
325  ParScanThreadState*     _per_thread_states;
326  const int               _num_threads;
327 public:
328  bool is_valid(int id) const { return id < _num_threads; }
329  ParallelTaskTerminator* terminator() { return &_term; }
330};
331
332ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
333                                             Space& to_space,
334                                             ParNewGeneration& young_gen,
335                                             Generation& old_gen,
336                                             ObjToScanQueueSet& queue_set,
337                                             Stack<oop, mtGC>* overflow_stacks,
338                                             PreservedMarksSet& preserved_marks_set,
339                                             size_t desired_plab_sz,
340                                             ParallelTaskTerminator& term)
341  : _young_gen(young_gen),
342    _old_gen(old_gen),
343    _term(term),
344    _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)),
345    _num_threads(num_threads)
346{
347  assert(num_threads > 0, "sanity check!");
348  assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
349         "overflow_stack allocation mismatch");
350  // Initialize states.
351  for (int i = 0; i < num_threads; ++i) {
352    new(_per_thread_states + i)
353      ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
354                         overflow_stacks, preserved_marks_set.get(i),
355                         desired_plab_sz, term);
356  }
357}
358
359inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
360  assert(i >= 0 && i < _num_threads, "sanity check!");
361  return _per_thread_states[i];
362}
363
364void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
365  for (int i = 0; i < _num_threads; ++i) {
366    if (thread_state(i).promotion_failed()) {
367      gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
368      thread_state(i).promotion_failed_info().reset();
369    }
370  }
371}
372
373void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
374  _term.reset_for_reuse(active_threads);
375  if (promotion_failed) {
376    for (int i = 0; i < _num_threads; ++i) {
377      thread_state(i).print_promotion_failure_size();
378    }
379  }
380}
381
382#if TASKQUEUE_STATS
383void ParScanThreadState::reset_stats() {
384  taskqueue_stats().reset();
385  _term_attempts = 0;
386  _overflow_refills = 0;
387  _overflow_refill_objs = 0;
388}
389
390void ParScanThreadStateSet::reset_stats() {
391  for (int i = 0; i < _num_threads; ++i) {
392    thread_state(i).reset_stats();
393  }
394}
395
396void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
397  st->print_raw_cr("GC Termination Stats");
398  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
399  st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
400  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
401}
402
403void ParScanThreadStateSet::print_termination_stats() {
404  Log(gc, task, stats) log;
405  if (!log.is_debug()) {
406    return;
407  }
408
409  ResourceMark rm;
410  outputStream* st = log.debug_stream();
411
412  print_termination_stats_hdr(st);
413
414  for (int i = 0; i < _num_threads; ++i) {
415    const ParScanThreadState & pss = thread_state(i);
416    const double elapsed_ms = pss.elapsed_time() * 1000.0;
417    const double s_roots_ms = pss.strong_roots_time() * 1000.0;
418    const double term_ms = pss.term_time() * 1000.0;
419    st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
420                 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
421                 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
422  }
423}
424
425// Print stats related to work queue activity.
426void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
427  st->print_raw_cr("GC Task Stats");
428  st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
429  st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
430}
431
432void ParScanThreadStateSet::print_taskqueue_stats() {
433  if (!log_develop_is_enabled(Trace, gc, task, stats)) {
434    return;
435  }
436  Log(gc, task, stats) log;
437  ResourceMark rm;
438  outputStream* st = log.trace_stream();
439  print_taskqueue_stats_hdr(st);
440
441  TaskQueueStats totals;
442  for (int i = 0; i < _num_threads; ++i) {
443    const ParScanThreadState & pss = thread_state(i);
444    const TaskQueueStats & stats = pss.taskqueue_stats();
445    st->print("%3d ", i); stats.print(st); st->cr();
446    totals += stats;
447
448    if (pss.overflow_refills() > 0) {
449      st->print_cr("    " SIZE_FORMAT_W(10) " overflow refills    "
450                   SIZE_FORMAT_W(10) " overflow objects",
451                   pss.overflow_refills(), pss.overflow_refill_objs());
452    }
453  }
454  st->print("tot "); totals.print(st); st->cr();
455
456  DEBUG_ONLY(totals.verify());
457}
458#endif // TASKQUEUE_STATS
459
460void ParScanThreadStateSet::flush() {
461  // Work in this loop should be kept as lightweight as
462  // possible since this might otherwise become a bottleneck
463  // to scaling. Should we add heavy-weight work into this
464  // loop, consider parallelizing the loop into the worker threads.
465  for (int i = 0; i < _num_threads; ++i) {
466    ParScanThreadState& par_scan_state = thread_state(i);
467
468    // Flush stats related to To-space PLAB activity and
469    // retire the last buffer.
470    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
471
472    // Every thread has its own age table.  We need to merge
473    // them all into one.
474    AgeTable *local_table = par_scan_state.age_table();
475    _young_gen.age_table()->merge(local_table);
476
477    // Inform old gen that we're done.
478    _old_gen.par_promote_alloc_done(i);
479  }
480
481  if (UseConcMarkSweepGC) {
482    // We need to call this even when ResizeOldPLAB is disabled
483    // so as to avoid breaking some asserts. While we may be able
484    // to avoid this by reorganizing the code a bit, I am loathe
485    // to do that unless we find cases where ergo leads to bad
486    // performance.
487    CompactibleFreeListSpaceLAB::compute_desired_plab_size();
488  }
489}
490
491ParScanClosure::ParScanClosure(ParNewGeneration* g,
492                               ParScanThreadState* par_scan_state) :
493  OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
494  _boundary = _g->reserved().end();
495}
496
497void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
498void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
499
500void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
501void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
502
503void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
504void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
505
506void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
507void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
508
509ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
510                                             ParScanThreadState* par_scan_state)
511  : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
512{}
513
514void ParScanWeakRefClosure::do_oop(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
515void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
516
517#ifdef WIN32
518#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
519#endif
520
521ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
522    ParScanThreadState* par_scan_state_,
523    ParScanWithoutBarrierClosure* to_space_closure_,
524    ParScanWithBarrierClosure* old_gen_closure_,
525    ParRootScanWithoutBarrierClosure* to_space_root_closure_,
526    ParNewGeneration* par_gen_,
527    ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
528    ObjToScanQueueSet* task_queues_,
529    ParallelTaskTerminator* terminator_) :
530
531    _par_scan_state(par_scan_state_),
532    _to_space_closure(to_space_closure_),
533    _old_gen_closure(old_gen_closure_),
534    _to_space_root_closure(to_space_root_closure_),
535    _old_gen_root_closure(old_gen_root_closure_),
536    _par_gen(par_gen_),
537    _task_queues(task_queues_),
538    _terminator(terminator_)
539{}
540
541void ParEvacuateFollowersClosure::do_void() {
542  ObjToScanQueue* work_q = par_scan_state()->work_queue();
543
544  while (true) {
545    // Scan to-space and old-gen objs until we run out of both.
546    oop obj_to_scan;
547    par_scan_state()->trim_queues(0);
548
549    // We have no local work, attempt to steal from other threads.
550
551    // Attempt to steal work from promoted.
552    if (task_queues()->steal(par_scan_state()->thread_num(),
553                             par_scan_state()->hash_seed(),
554                             obj_to_scan)) {
555      bool res = work_q->push(obj_to_scan);
556      assert(res, "Empty queue should have room for a push.");
557
558      // If successful, goto Start.
559      continue;
560
561      // Try global overflow list.
562    } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
563      continue;
564    }
565
566    // Otherwise, offer termination.
567    par_scan_state()->start_term_time();
568    if (terminator()->offer_termination()) break;
569    par_scan_state()->end_term_time();
570  }
571  assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
572         "Broken overflow list?");
573  // Finish the last termination pause.
574  par_scan_state()->end_term_time();
575}
576
577ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
578                             Generation* old_gen,
579                             HeapWord* young_old_boundary,
580                             ParScanThreadStateSet* state_set,
581                             StrongRootsScope* strong_roots_scope) :
582    AbstractGangTask("ParNewGeneration collection"),
583    _young_gen(young_gen), _old_gen(old_gen),
584    _young_old_boundary(young_old_boundary),
585    _state_set(state_set),
586    _strong_roots_scope(strong_roots_scope)
587{}
588
589void ParNewGenTask::work(uint worker_id) {
590  GenCollectedHeap* gch = GenCollectedHeap::heap();
591  // Since this is being done in a separate thread, need new resource
592  // and handle marks.
593  ResourceMark rm;
594  HandleMark hm;
595
596  ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
597  assert(_state_set->is_valid(worker_id), "Should not have been called");
598
599  par_scan_state.set_young_old_boundary(_young_old_boundary);
600
601  KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
602                                      gch->rem_set()->klass_rem_set());
603  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
604                                           &par_scan_state.to_space_root_closure(),
605                                           false);
606
607  par_scan_state.start_strong_roots();
608  gch->gen_process_roots(_strong_roots_scope,
609                         GenCollectedHeap::YoungGen,
610                         true,  // Process younger gens, if any, as strong roots.
611                         GenCollectedHeap::SO_ScavengeCodeCache,
612                         GenCollectedHeap::StrongAndWeakRoots,
613                         &par_scan_state.to_space_root_closure(),
614                         &par_scan_state.older_gen_closure(),
615                         &cld_scan_closure);
616
617  par_scan_state.end_strong_roots();
618
619  // "evacuate followers".
620  par_scan_state.evacuate_followers_closure().do_void();
621
622  // This will collapse this worker's promoted object list that's
623  // created during the main ParNew parallel phase of ParNew. This has
624  // to be called after all workers have finished promoting objects
625  // and scanning promoted objects. It should be safe calling it from
626  // here, given that we can only reach here after all thread have
627  // offered termination, i.e., after there is no more work to be
628  // done. It will also disable promotion tracking for the rest of
629  // this GC as it's not necessary to be on during reference processing.
630  _old_gen->par_oop_since_save_marks_iterate_done((int) worker_id);
631}
632
633ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
634  : DefNewGeneration(rs, initial_byte_size, "PCopy"),
635  _overflow_list(NULL),
636  _is_alive_closure(this),
637  _plab_stats("Young", YoungPLABSize, PLABWeight)
638{
639  NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
640  NOT_PRODUCT(_num_par_pushes = 0;)
641  _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
642  guarantee(_task_queues != NULL, "task_queues allocation failure.");
643
644  for (uint i = 0; i < ParallelGCThreads; i++) {
645    ObjToScanQueue *q = new ObjToScanQueue();
646    guarantee(q != NULL, "work_queue Allocation failure.");
647    _task_queues->register_queue(i, q);
648  }
649
650  for (uint i = 0; i < ParallelGCThreads; i++) {
651    _task_queues->queue(i)->initialize();
652  }
653
654  _overflow_stacks = NULL;
655  if (ParGCUseLocalOverflow) {
656    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
657    typedef Stack<oop, mtGC> GCOopStack;
658
659    _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
660    for (size_t i = 0; i < ParallelGCThreads; ++i) {
661      new (_overflow_stacks + i) Stack<oop, mtGC>();
662    }
663  }
664
665  if (UsePerfData) {
666    EXCEPTION_MARK;
667    ResourceMark rm;
668
669    const char* cname =
670         PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
671    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
672                                     ParallelGCThreads, CHECK);
673  }
674}
675
676// ParNewGeneration::
677ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
678  DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
679
680template <class T>
681void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
682#ifdef ASSERT
683  {
684    assert(!oopDesc::is_null(*p), "expected non-null ref");
685    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
686    // We never expect to see a null reference being processed
687    // as a weak reference.
688    assert(obj->is_oop(), "expected an oop while scanning weak refs");
689  }
690#endif // ASSERT
691
692  _par_cl->do_oop_nv(p);
693
694  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
695    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
696    _rs->write_ref_field_gc_par(p, obj);
697  }
698}
699
700void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p)       { ParKeepAliveClosure::do_oop_work(p); }
701void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
702
703// ParNewGeneration::
704KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
705  DefNewGeneration::KeepAliveClosure(cl) {}
706
707template <class T>
708void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
709#ifdef ASSERT
710  {
711    assert(!oopDesc::is_null(*p), "expected non-null ref");
712    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
713    // We never expect to see a null reference being processed
714    // as a weak reference.
715    assert(obj->is_oop(), "expected an oop while scanning weak refs");
716  }
717#endif // ASSERT
718
719  _cl->do_oop_nv(p);
720
721  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
722    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
723    _rs->write_ref_field_gc_par(p, obj);
724  }
725}
726
727void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p)       { KeepAliveClosure::do_oop_work(p); }
728void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
729
730template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
731  T heap_oop = oopDesc::load_heap_oop(p);
732  if (!oopDesc::is_null(heap_oop)) {
733    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
734    if ((HeapWord*)obj < _boundary) {
735      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
736      oop new_obj = obj->is_forwarded()
737                      ? obj->forwardee()
738                      : _g->DefNewGeneration::copy_to_survivor_space(obj);
739      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
740    }
741    if (_gc_barrier) {
742      // If p points to a younger generation, mark the card.
743      if ((HeapWord*)obj < _gen_boundary) {
744        _rs->write_ref_field_gc_par(p, obj);
745      }
746    }
747  }
748}
749
750void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
751void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
752
753class ParNewRefProcTaskProxy: public AbstractGangTask {
754  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
755public:
756  ParNewRefProcTaskProxy(ProcessTask& task,
757                         ParNewGeneration& young_gen,
758                         Generation& old_gen,
759                         HeapWord* young_old_boundary,
760                         ParScanThreadStateSet& state_set);
761
762private:
763  virtual void work(uint worker_id);
764private:
765  ParNewGeneration&      _young_gen;
766  ProcessTask&           _task;
767  Generation&            _old_gen;
768  HeapWord*              _young_old_boundary;
769  ParScanThreadStateSet& _state_set;
770};
771
772ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
773                                               ParNewGeneration& young_gen,
774                                               Generation& old_gen,
775                                               HeapWord* young_old_boundary,
776                                               ParScanThreadStateSet& state_set)
777  : AbstractGangTask("ParNewGeneration parallel reference processing"),
778    _young_gen(young_gen),
779    _task(task),
780    _old_gen(old_gen),
781    _young_old_boundary(young_old_boundary),
782    _state_set(state_set)
783{ }
784
785void ParNewRefProcTaskProxy::work(uint worker_id) {
786  ResourceMark rm;
787  HandleMark hm;
788  ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
789  par_scan_state.set_young_old_boundary(_young_old_boundary);
790  _task.work(worker_id, par_scan_state.is_alive_closure(),
791             par_scan_state.keep_alive_closure(),
792             par_scan_state.evacuate_followers_closure());
793}
794
795class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
796  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
797  EnqueueTask& _task;
798
799public:
800  ParNewRefEnqueueTaskProxy(EnqueueTask& task)
801    : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
802      _task(task)
803  { }
804
805  virtual void work(uint worker_id) {
806    _task.work(worker_id);
807  }
808};
809
810void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
811  GenCollectedHeap* gch = GenCollectedHeap::heap();
812  WorkGang* workers = gch->workers();
813  assert(workers != NULL, "Need parallel worker threads.");
814  _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
815  ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
816                                 _young_gen.reserved().end(), _state_set);
817  workers->run_task(&rp_task);
818  _state_set.reset(0 /* bad value in debug if not reset */,
819                   _young_gen.promotion_failed());
820}
821
822void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
823  GenCollectedHeap* gch = GenCollectedHeap::heap();
824  WorkGang* workers = gch->workers();
825  assert(workers != NULL, "Need parallel worker threads.");
826  ParNewRefEnqueueTaskProxy enq_task(task);
827  workers->run_task(&enq_task);
828}
829
830void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
831  _state_set.flush();
832  GenCollectedHeap* gch = GenCollectedHeap::heap();
833  gch->save_marks();
834}
835
836ScanClosureWithParBarrier::
837ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
838  ScanClosure(g, gc_barrier)
839{ }
840
841EvacuateFollowersClosureGeneral::
842EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
843                                OopsInGenClosure* cur,
844                                OopsInGenClosure* older) :
845  _gch(gch),
846  _scan_cur_or_nonheap(cur), _scan_older(older)
847{ }
848
849void EvacuateFollowersClosureGeneral::do_void() {
850  do {
851    // Beware: this call will lead to closure applications via virtual
852    // calls.
853    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
854                                       _scan_cur_or_nonheap,
855                                       _scan_older);
856  } while (!_gch->no_allocs_since_save_marks());
857}
858
859// A Generation that does parallel young-gen collection.
860
861void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
862  assert(_promo_failure_scan_stack.is_empty(), "post condition");
863  _promo_failure_scan_stack.clear(true); // Clear cached segments.
864
865  remove_forwarding_pointers();
866  log_info(gc, promotion)("Promotion failed");
867  // All the spaces are in play for mark-sweep.
868  swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
869  from()->set_next_compaction_space(to());
870  gch->set_incremental_collection_failed();
871  // Inform the next generation that a promotion failure occurred.
872  _old_gen->promotion_failure_occurred();
873
874  // Trace promotion failure in the parallel GC threads
875  thread_state_set.trace_promotion_failed(gc_tracer());
876  // Single threaded code may have reported promotion failure to the global state
877  if (_promotion_failed_info.has_failed()) {
878    _gc_tracer.report_promotion_failed(_promotion_failed_info);
879  }
880  // Reset the PromotionFailureALot counters.
881  NOT_PRODUCT(gch->reset_promotion_should_fail();)
882}
883
884void ParNewGeneration::collect(bool   full,
885                               bool   clear_all_soft_refs,
886                               size_t size,
887                               bool   is_tlab) {
888  assert(full || size > 0, "otherwise we don't want to collect");
889
890  GenCollectedHeap* gch = GenCollectedHeap::heap();
891
892  _gc_timer->register_gc_start();
893
894  AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
895  WorkGang* workers = gch->workers();
896  assert(workers != NULL, "Need workgang for parallel work");
897  uint active_workers =
898       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
899                                               workers->active_workers(),
900                                               Threads::number_of_non_daemon_threads());
901  active_workers = workers->update_active_workers(active_workers);
902  log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
903
904  _old_gen = gch->old_gen();
905
906  // If the next generation is too full to accommodate worst-case promotion
907  // from this generation, pass on collection; let the next generation
908  // do it.
909  if (!collection_attempt_is_safe()) {
910    gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
911    return;
912  }
913  assert(to()->is_empty(), "Else not collection_attempt_is_safe");
914
915  _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
916  gch->trace_heap_before_gc(gc_tracer());
917
918  init_assuming_no_promotion_failure();
919
920  if (UseAdaptiveSizePolicy) {
921    set_survivor_overflow(false);
922    size_policy->minor_collection_begin();
923  }
924
925  GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
926
927  age_table()->clear();
928  to()->clear(SpaceDecorator::Mangle);
929
930  gch->save_marks();
931
932  // Set the correct parallelism (number of queues) in the reference processor
933  ref_processor()->set_active_mt_degree(active_workers);
934
935  // Need to initialize the preserved marks before the ThreadStateSet c'tor.
936  _preserved_marks_set.init(active_workers);
937
938  // Always set the terminator for the active number of workers
939  // because only those workers go through the termination protocol.
940  ParallelTaskTerminator _term(active_workers, task_queues());
941  ParScanThreadStateSet thread_state_set(active_workers,
942                                         *to(), *this, *_old_gen, *task_queues(),
943                                         _overflow_stacks, _preserved_marks_set,
944                                         desired_plab_sz(), _term);
945
946  thread_state_set.reset(active_workers, promotion_failed());
947
948  {
949    StrongRootsScope srs(active_workers);
950
951    ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
952    gch->rem_set()->prepare_for_younger_refs_iterate(true);
953    // It turns out that even when we're using 1 thread, doing the work in a
954    // separate thread causes wide variance in run times.  We can't help this
955    // in the multi-threaded case, but we special-case n=1 here to get
956    // repeatable measurements of the 1-thread overhead of the parallel code.
957    // Might multiple workers ever be used?  If yes, initialization
958    // has been done such that the single threaded path should not be used.
959    if (workers->total_workers() > 1) {
960      workers->run_task(&tsk);
961    } else {
962      tsk.work(0);
963    }
964  }
965
966  thread_state_set.reset(0 /* Bad value in debug if not reset */,
967                         promotion_failed());
968
969  // Trace and reset failed promotion info.
970  if (promotion_failed()) {
971    thread_state_set.trace_promotion_failed(gc_tracer());
972  }
973
974  // Process (weak) reference objects found during scavenge.
975  ReferenceProcessor* rp = ref_processor();
976  IsAliveClosure is_alive(this);
977  ScanWeakRefClosure scan_weak_ref(this);
978  KeepAliveClosure keep_alive(&scan_weak_ref);
979  ScanClosure               scan_without_gc_barrier(this, false);
980  ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
981  set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
982  EvacuateFollowersClosureGeneral evacuate_followers(gch,
983    &scan_without_gc_barrier, &scan_with_gc_barrier);
984  rp->setup_policy(clear_all_soft_refs);
985  // Can  the mt_degree be set later (at run_task() time would be best)?
986  rp->set_active_mt_degree(active_workers);
987  ReferenceProcessorStats stats;
988  if (rp->processing_is_mt()) {
989    ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
990    stats = rp->process_discovered_references(&is_alive, &keep_alive,
991                                              &evacuate_followers, &task_executor,
992                                              _gc_timer);
993  } else {
994    thread_state_set.flush();
995    gch->save_marks();
996    stats = rp->process_discovered_references(&is_alive, &keep_alive,
997                                              &evacuate_followers, NULL,
998                                              _gc_timer);
999  }
1000  _gc_tracer.report_gc_reference_stats(stats);
1001  _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1002
1003  if (!promotion_failed()) {
1004    // Swap the survivor spaces.
1005    eden()->clear(SpaceDecorator::Mangle);
1006    from()->clear(SpaceDecorator::Mangle);
1007    if (ZapUnusedHeapArea) {
1008      // This is now done here because of the piece-meal mangling which
1009      // can check for valid mangling at intermediate points in the
1010      // collection(s).  When a young collection fails to collect
1011      // sufficient space resizing of the young generation can occur
1012      // and redistribute the spaces in the young generation.  Mangle
1013      // here so that unzapped regions don't get distributed to
1014      // other spaces.
1015      to()->mangle_unused_area();
1016    }
1017    swap_spaces();
1018
1019    // A successful scavenge should restart the GC time limit count which is
1020    // for full GC's.
1021    size_policy->reset_gc_overhead_limit_count();
1022
1023    assert(to()->is_empty(), "to space should be empty now");
1024
1025    adjust_desired_tenuring_threshold();
1026  } else {
1027    handle_promotion_failed(gch, thread_state_set);
1028  }
1029  _preserved_marks_set.reclaim();
1030  // set new iteration safe limit for the survivor spaces
1031  from()->set_concurrent_iteration_safe_limit(from()->top());
1032  to()->set_concurrent_iteration_safe_limit(to()->top());
1033
1034  plab_stats()->adjust_desired_plab_sz();
1035
1036  TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1037  TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1038
1039  if (UseAdaptiveSizePolicy) {
1040    size_policy->minor_collection_end(gch->gc_cause());
1041    size_policy->avg_survived()->sample(from()->used());
1042  }
1043
1044  // We need to use a monotonically non-decreasing time in ms
1045  // or we will see time-warp warnings and os::javaTimeMillis()
1046  // does not guarantee monotonicity.
1047  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1048  update_time_of_last_gc(now);
1049
1050  rp->set_enqueuing_is_done(true);
1051  if (rp->processing_is_mt()) {
1052    ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1053    rp->enqueue_discovered_references(&task_executor);
1054  } else {
1055    rp->enqueue_discovered_references(NULL);
1056  }
1057  rp->verify_no_references_recorded();
1058
1059  gch->trace_heap_after_gc(gc_tracer());
1060
1061  _gc_timer->register_gc_end();
1062
1063  _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1064}
1065
1066size_t ParNewGeneration::desired_plab_sz() {
1067  return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
1068}
1069
1070static int sum;
1071void ParNewGeneration::waste_some_time() {
1072  for (int i = 0; i < 100; i++) {
1073    sum += i;
1074  }
1075}
1076
1077static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1078
1079// Because of concurrency, there are times where an object for which
1080// "is_forwarded()" is true contains an "interim" forwarding pointer
1081// value.  Such a value will soon be overwritten with a real value.
1082// This method requires "obj" to have a forwarding pointer, and waits, if
1083// necessary for a real one to be inserted, and returns it.
1084
1085oop ParNewGeneration::real_forwardee(oop obj) {
1086  oop forward_ptr = obj->forwardee();
1087  if (forward_ptr != ClaimedForwardPtr) {
1088    return forward_ptr;
1089  } else {
1090    return real_forwardee_slow(obj);
1091  }
1092}
1093
1094oop ParNewGeneration::real_forwardee_slow(oop obj) {
1095  // Spin-read if it is claimed but not yet written by another thread.
1096  oop forward_ptr = obj->forwardee();
1097  while (forward_ptr == ClaimedForwardPtr) {
1098    waste_some_time();
1099    assert(obj->is_forwarded(), "precondition");
1100    forward_ptr = obj->forwardee();
1101  }
1102  return forward_ptr;
1103}
1104
1105// Multiple GC threads may try to promote an object.  If the object
1106// is successfully promoted, a forwarding pointer will be installed in
1107// the object in the young generation.  This method claims the right
1108// to install the forwarding pointer before it copies the object,
1109// thus avoiding the need to undo the copy as in
1110// copy_to_survivor_space_avoiding_with_undo.
1111
1112oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
1113                                             oop old,
1114                                             size_t sz,
1115                                             markOop m) {
1116  // In the sequential version, this assert also says that the object is
1117  // not forwarded.  That might not be the case here.  It is the case that
1118  // the caller observed it to be not forwarded at some time in the past.
1119  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1120
1121  // The sequential code read "old->age()" below.  That doesn't work here,
1122  // since the age is in the mark word, and that might be overwritten with
1123  // a forwarding pointer by a parallel thread.  So we must save the mark
1124  // word in a local and then analyze it.
1125  oopDesc dummyOld;
1126  dummyOld.set_mark(m);
1127  assert(!dummyOld.is_forwarded(),
1128         "should not be called with forwarding pointer mark word.");
1129
1130  oop new_obj = NULL;
1131  oop forward_ptr;
1132
1133  // Try allocating obj in to-space (unless too old)
1134  if (dummyOld.age() < tenuring_threshold()) {
1135    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1136    if (new_obj == NULL) {
1137      set_survivor_overflow(true);
1138    }
1139  }
1140
1141  if (new_obj == NULL) {
1142    // Either to-space is full or we decided to promote try allocating obj tenured
1143
1144    // Attempt to install a null forwarding pointer (atomically),
1145    // to claim the right to install the real forwarding pointer.
1146    forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1147    if (forward_ptr != NULL) {
1148      // someone else beat us to it.
1149        return real_forwardee(old);
1150    }
1151
1152    if (!_promotion_failed) {
1153      new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
1154                                      old, m, sz);
1155    }
1156
1157    if (new_obj == NULL) {
1158      // promotion failed, forward to self
1159      _promotion_failed = true;
1160      new_obj = old;
1161
1162      par_scan_state->preserved_marks()->push_if_necessary(old, m);
1163      par_scan_state->register_promotion_failure(sz);
1164    }
1165
1166    old->forward_to(new_obj);
1167    forward_ptr = NULL;
1168  } else {
1169    // Is in to-space; do copying ourselves.
1170    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1171    assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
1172    forward_ptr = old->forward_to_atomic(new_obj);
1173    // Restore the mark word copied above.
1174    new_obj->set_mark(m);
1175    // Increment age if obj still in new generation
1176    new_obj->incr_age();
1177    par_scan_state->age_table()->add(new_obj, sz);
1178  }
1179  assert(new_obj != NULL, "just checking");
1180
1181  // This code must come after the CAS test, or it will print incorrect
1182  // information.
1183  log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1184                                  is_in_reserved(new_obj) ? "copying" : "tenuring",
1185                                  new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
1186
1187  if (forward_ptr == NULL) {
1188    oop obj_to_push = new_obj;
1189    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1190      // Length field used as index of next element to be scanned.
1191      // Real length can be obtained from real_forwardee()
1192      arrayOop(old)->set_length(0);
1193      obj_to_push = old;
1194      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1195             "push forwarded object");
1196    }
1197    // Push it on one of the queues of to-be-scanned objects.
1198    bool simulate_overflow = false;
1199    NOT_PRODUCT(
1200      if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1201        // simulate a stack overflow
1202        simulate_overflow = true;
1203      }
1204    )
1205    if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1206      // Add stats for overflow pushes.
1207      log_develop_trace(gc)("Queue Overflow");
1208      push_on_overflow_list(old, par_scan_state);
1209      TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1210    }
1211
1212    return new_obj;
1213  }
1214
1215  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
1216  // allocate it?
1217  if (is_in_reserved(new_obj)) {
1218    // Must be in to_space.
1219    assert(to()->is_in_reserved(new_obj), "Checking");
1220    if (forward_ptr == ClaimedForwardPtr) {
1221      // Wait to get the real forwarding pointer value.
1222      forward_ptr = real_forwardee(old);
1223    }
1224    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1225  }
1226
1227  return forward_ptr;
1228}
1229
1230#ifndef PRODUCT
1231// It's OK to call this multi-threaded;  the worst thing
1232// that can happen is that we'll get a bunch of closely
1233// spaced simulated overflows, but that's OK, in fact
1234// probably good as it would exercise the overflow code
1235// under contention.
1236bool ParNewGeneration::should_simulate_overflow() {
1237  if (_overflow_counter-- <= 0) { // just being defensive
1238    _overflow_counter = ParGCWorkQueueOverflowInterval;
1239    return true;
1240  } else {
1241    return false;
1242  }
1243}
1244#endif
1245
1246// In case we are using compressed oops, we need to be careful.
1247// If the object being pushed is an object array, then its length
1248// field keeps track of the "grey boundary" at which the next
1249// incremental scan will be done (see ParGCArrayScanChunk).
1250// When using compressed oops, this length field is kept in the
1251// lower 32 bits of the erstwhile klass word and cannot be used
1252// for the overflow chaining pointer (OCP below). As such the OCP
1253// would itself need to be compressed into the top 32-bits in this
1254// case. Unfortunately, see below, in the event that we have a
1255// promotion failure, the node to be pushed on the list can be
1256// outside of the Java heap, so the heap-based pointer compression
1257// would not work (we would have potential aliasing between C-heap
1258// and Java-heap pointers). For this reason, when using compressed
1259// oops, we simply use a worker-thread-local, non-shared overflow
1260// list in the form of a growable array, with a slightly different
1261// overflow stack draining strategy. If/when we start using fat
1262// stacks here, we can go back to using (fat) pointer chains
1263// (although some performance comparisons would be useful since
1264// single global lists have their own performance disadvantages
1265// as we were made painfully aware not long ago, see 6786503).
1266#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1267void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1268  assert(is_in_reserved(from_space_obj), "Should be from this generation");
1269  if (ParGCUseLocalOverflow) {
1270    // In the case of compressed oops, we use a private, not-shared
1271    // overflow stack.
1272    par_scan_state->push_on_overflow_stack(from_space_obj);
1273  } else {
1274    assert(!UseCompressedOops, "Error");
1275    // if the object has been forwarded to itself, then we cannot
1276    // use the klass pointer for the linked list.  Instead we have
1277    // to allocate an oopDesc in the C-Heap and use that for the linked list.
1278    // XXX This is horribly inefficient when a promotion failure occurs
1279    // and should be fixed. XXX FIX ME !!!
1280#ifndef PRODUCT
1281    Atomic::inc_ptr(&_num_par_pushes);
1282    assert(_num_par_pushes > 0, "Tautology");
1283#endif
1284    if (from_space_obj->forwardee() == from_space_obj) {
1285      oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1286      listhead->forward_to(from_space_obj);
1287      from_space_obj = listhead;
1288    }
1289    oop observed_overflow_list = _overflow_list;
1290    oop cur_overflow_list;
1291    do {
1292      cur_overflow_list = observed_overflow_list;
1293      if (cur_overflow_list != BUSY) {
1294        from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1295      } else {
1296        from_space_obj->set_klass_to_list_ptr(NULL);
1297      }
1298      observed_overflow_list =
1299        (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
1300    } while (cur_overflow_list != observed_overflow_list);
1301  }
1302}
1303
1304bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1305  bool res;
1306
1307  if (ParGCUseLocalOverflow) {
1308    res = par_scan_state->take_from_overflow_stack();
1309  } else {
1310    assert(!UseCompressedOops, "Error");
1311    res = take_from_overflow_list_work(par_scan_state);
1312  }
1313  return res;
1314}
1315
1316
1317// *NOTE*: The overflow list manipulation code here and
1318// in CMSCollector:: are very similar in shape,
1319// except that in the CMS case we thread the objects
1320// directly into the list via their mark word, and do
1321// not need to deal with special cases below related
1322// to chunking of object arrays and promotion failure
1323// handling.
1324// CR 6797058 has been filed to attempt consolidation of
1325// the common code.
1326// Because of the common code, if you make any changes in
1327// the code below, please check the CMS version to see if
1328// similar changes might be needed.
1329// See CMSCollector::par_take_from_overflow_list() for
1330// more extensive documentation comments.
1331bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1332  ObjToScanQueue* work_q = par_scan_state->work_queue();
1333  // How many to take?
1334  size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1335                                 (size_t)ParGCDesiredObjsFromOverflowList);
1336
1337  assert(!UseCompressedOops, "Error");
1338  assert(par_scan_state->overflow_stack() == NULL, "Error");
1339  if (_overflow_list == NULL) return false;
1340
1341  // Otherwise, there was something there; try claiming the list.
1342  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1343  // Trim off a prefix of at most objsFromOverflow items
1344  Thread* tid = Thread::current();
1345  size_t spin_count = ParallelGCThreads;
1346  size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1347  for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1348    // someone grabbed it before we did ...
1349    // ... we spin for a short while...
1350    os::sleep(tid, sleep_time_millis, false);
1351    if (_overflow_list == NULL) {
1352      // nothing left to take
1353      return false;
1354    } else if (_overflow_list != BUSY) {
1355     // try and grab the prefix
1356     prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1357    }
1358  }
1359  if (prefix == NULL || prefix == BUSY) {
1360     // Nothing to take or waited long enough
1361     if (prefix == NULL) {
1362       // Write back the NULL in case we overwrote it with BUSY above
1363       // and it is still the same value.
1364       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1365     }
1366     return false;
1367  }
1368  assert(prefix != NULL && prefix != BUSY, "Error");
1369  size_t i = 1;
1370  oop cur = prefix;
1371  while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
1372    i++; cur = cur->list_ptr_from_klass();
1373  }
1374
1375  // Reattach remaining (suffix) to overflow list
1376  if (cur->klass_or_null() == NULL) {
1377    // Write back the NULL in lieu of the BUSY we wrote
1378    // above and it is still the same value.
1379    if (_overflow_list == BUSY) {
1380      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1381    }
1382  } else {
1383    assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error");
1384    oop suffix = cur->list_ptr_from_klass();       // suffix will be put back on global list
1385    cur->set_klass_to_list_ptr(NULL);     // break off suffix
1386    // It's possible that the list is still in the empty(busy) state
1387    // we left it in a short while ago; in that case we may be
1388    // able to place back the suffix.
1389    oop observed_overflow_list = _overflow_list;
1390    oop cur_overflow_list = observed_overflow_list;
1391    bool attached = false;
1392    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1393      observed_overflow_list =
1394        (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1395      if (cur_overflow_list == observed_overflow_list) {
1396        attached = true;
1397        break;
1398      } else cur_overflow_list = observed_overflow_list;
1399    }
1400    if (!attached) {
1401      // Too bad, someone else got in in between; we'll need to do a splice.
1402      // Find the last item of suffix list
1403      oop last = suffix;
1404      while (last->klass_or_null() != NULL) {
1405        last = last->list_ptr_from_klass();
1406      }
1407      // Atomically prepend suffix to current overflow list
1408      observed_overflow_list = _overflow_list;
1409      do {
1410        cur_overflow_list = observed_overflow_list;
1411        if (cur_overflow_list != BUSY) {
1412          // Do the splice ...
1413          last->set_klass_to_list_ptr(cur_overflow_list);
1414        } else { // cur_overflow_list == BUSY
1415          last->set_klass_to_list_ptr(NULL);
1416        }
1417        observed_overflow_list =
1418          (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1419      } while (cur_overflow_list != observed_overflow_list);
1420    }
1421  }
1422
1423  // Push objects on prefix list onto this thread's work queue
1424  assert(prefix != NULL && prefix != BUSY, "program logic");
1425  cur = prefix;
1426  ssize_t n = 0;
1427  while (cur != NULL) {
1428    oop obj_to_push = cur->forwardee();
1429    oop next        = cur->list_ptr_from_klass();
1430    cur->set_klass(obj_to_push->klass());
1431    // This may be an array object that is self-forwarded. In that case, the list pointer
1432    // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1433    if (!is_in_reserved(cur)) {
1434      // This can become a scaling bottleneck when there is work queue overflow coincident
1435      // with promotion failure.
1436      oopDesc* f = cur;
1437      FREE_C_HEAP_ARRAY(oopDesc, f);
1438    } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1439      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1440      obj_to_push = cur;
1441    }
1442    bool ok = work_q->push(obj_to_push);
1443    assert(ok, "Should have succeeded");
1444    cur = next;
1445    n++;
1446  }
1447  TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1448#ifndef PRODUCT
1449  assert(_num_par_pushes >= n, "Too many pops?");
1450  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1451#endif
1452  return true;
1453}
1454#undef BUSY
1455
1456void ParNewGeneration::ref_processor_init() {
1457  if (_ref_processor == NULL) {
1458    // Allocate and initialize a reference processor
1459    _ref_processor =
1460      new ReferenceProcessor(_reserved,                  // span
1461                             ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1462                             ParallelGCThreads,          // mt processing degree
1463                             refs_discovery_is_mt(),     // mt discovery
1464                             ParallelGCThreads,          // mt discovery degree
1465                             refs_discovery_is_atomic(), // atomic_discovery
1466                             NULL);                      // is_alive_non_header
1467  }
1468}
1469
1470const char* ParNewGeneration::name() const {
1471  return "par new generation";
1472}
1473