1/*
2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
26#define SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
27
28#include "gc/parallel/parallelScavengeHeap.hpp"
29#include "gc/parallel/parMarkBitMap.inline.hpp"
30#include "gc/parallel/psOldGen.hpp"
31#include "gc/parallel/psPromotionLAB.inline.hpp"
32#include "gc/parallel/psPromotionManager.hpp"
33#include "gc/parallel/psScavenge.hpp"
34#include "gc/shared/taskqueue.inline.hpp"
35#include "logging/log.hpp"
36#include "oops/oop.inline.hpp"
37
38inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
39  assert(_manager_array != NULL, "access of NULL manager_array");
40  assert(index <= ParallelGCThreads, "out of range manager_array access");
41  return &_manager_array[index];
42}
43
44template <class T>
45inline void PSPromotionManager::push_depth(T* p) {
46  claimed_stack_depth()->push(p);
47}
48
49template <class T>
50inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
51  if (p != NULL) { // XXX: error if p != NULL here
52    oop o = oopDesc::load_decode_heap_oop_not_null(p);
53    if (o->is_forwarded()) {
54      o = o->forwardee();
55      // Card mark
56      if (PSScavenge::is_obj_in_young(o)) {
57        PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
58      }
59      oopDesc::encode_store_heap_oop_not_null(p, o);
60    } else {
61      push_depth(p);
62    }
63  }
64}
65
66template <class T>
67inline void PSPromotionManager::claim_or_forward_depth(T* p) {
68  assert(should_scavenge(p, true), "revisiting object?");
69  assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
70
71  claim_or_forward_internal_depth(p);
72}
73
74inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
75                                                      size_t obj_size,
76                                                      uint age, bool tenured,
77                                                      const PSPromotionLAB* lab) {
78  // Skip if memory allocation failed
79  if (new_obj != NULL) {
80    const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
81
82    if (lab != NULL) {
83      // Promotion of object through newly allocated PLAB
84      if (gc_tracer->should_report_promotion_in_new_plab_event()) {
85        size_t obj_bytes = obj_size * HeapWordSize;
86        size_t lab_size = lab->capacity();
87        gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
88                                                      age, tenured, lab_size);
89      }
90    } else {
91      // Promotion of object directly to heap
92      if (gc_tracer->should_report_promotion_outside_plab_event()) {
93        size_t obj_bytes = obj_size * HeapWordSize;
94        gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
95                                                       age, tenured);
96      }
97    }
98  }
99}
100
101inline void PSPromotionManager::push_contents(oop obj) {
102  obj->ps_push_contents(this);
103}
104//
105// This method is pretty bulky. It would be nice to split it up
106// into smaller submethods, but we need to be careful not to hurt
107// performance.
108//
109template<bool promote_immediately>
110inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
111  assert(should_scavenge(&o), "Sanity");
112
113  oop new_obj = NULL;
114
115  // NOTE! We must be very careful with any methods that access the mark
116  // in o. There may be multiple threads racing on it, and it may be forwarded
117  // at any time. Do not use oop methods for accessing the mark!
118  markOop test_mark = o->mark();
119
120  // The same test as "o->is_forwarded()"
121  if (!test_mark->is_marked()) {
122    bool new_obj_is_tenured = false;
123    size_t new_obj_size = o->size();
124
125    // Find the objects age, MT safe.
126    uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
127      test_mark->displaced_mark_helper()->age() : test_mark->age();
128
129    if (!promote_immediately) {
130      // Try allocating obj in to-space (unless too old)
131      if (age < PSScavenge::tenuring_threshold()) {
132        new_obj = (oop) _young_lab.allocate(new_obj_size);
133        if (new_obj == NULL && !_young_gen_is_full) {
134          // Do we allocate directly, or flush and refill?
135          if (new_obj_size > (YoungPLABSize / 2)) {
136            // Allocate this object directly
137            new_obj = (oop)young_space()->cas_allocate(new_obj_size);
138            promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
139          } else {
140            // Flush and fill
141            _young_lab.flush();
142
143            HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
144            if (lab_base != NULL) {
145              _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
146              // Try the young lab allocation again.
147              new_obj = (oop) _young_lab.allocate(new_obj_size);
148              promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
149            } else {
150              _young_gen_is_full = true;
151            }
152          }
153        }
154      }
155    }
156
157    // Otherwise try allocating obj tenured
158    if (new_obj == NULL) {
159#ifndef PRODUCT
160      if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
161        return oop_promotion_failed(o, test_mark);
162      }
163#endif  // #ifndef PRODUCT
164
165      new_obj = (oop) _old_lab.allocate(new_obj_size);
166      new_obj_is_tenured = true;
167
168      if (new_obj == NULL) {
169        if (!_old_gen_is_full) {
170          // Do we allocate directly, or flush and refill?
171          if (new_obj_size > (OldPLABSize / 2)) {
172            // Allocate this object directly
173            new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
174            promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
175          } else {
176            // Flush and fill
177            _old_lab.flush();
178
179            HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
180            if(lab_base != NULL) {
181#ifdef ASSERT
182              // Delay the initialization of the promotion lab (plab).
183              // This exposes uninitialized plabs to card table processing.
184              if (GCWorkerDelayMillis > 0) {
185                os::sleep(Thread::current(), GCWorkerDelayMillis, false);
186              }
187#endif
188              _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
189              // Try the old lab allocation again.
190              new_obj = (oop) _old_lab.allocate(new_obj_size);
191              promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
192            }
193          }
194        }
195
196        // This is the promotion failed test, and code handling.
197        // The code belongs here for two reasons. It is slightly
198        // different than the code below, and cannot share the
199        // CAS testing code. Keeping the code here also minimizes
200        // the impact on the common case fast path code.
201
202        if (new_obj == NULL) {
203          _old_gen_is_full = true;
204          return oop_promotion_failed(o, test_mark);
205        }
206      }
207    }
208
209    assert(new_obj != NULL, "allocation should have succeeded");
210
211    // Copy obj
212    Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
213
214    // Now we have to CAS in the header.
215    if (o->cas_forward_to(new_obj, test_mark)) {
216      // We won any races, we "own" this object.
217      assert(new_obj == o->forwardee(), "Sanity");
218
219      // Increment age if obj still in new generation. Now that
220      // we're dealing with a markOop that cannot change, it is
221      // okay to use the non mt safe oop methods.
222      if (!new_obj_is_tenured) {
223        new_obj->incr_age();
224        assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
225      }
226
227      // Do the size comparison first with new_obj_size, which we
228      // already have. Hopefully, only a few objects are larger than
229      // _min_array_size_for_chunking, and most of them will be arrays.
230      // So, the is->objArray() test would be very infrequent.
231      if (new_obj_size > _min_array_size_for_chunking &&
232          new_obj->is_objArray() &&
233          PSChunkLargeArrays) {
234        // we'll chunk it
235        oop* const masked_o = mask_chunked_array_oop(o);
236        push_depth(masked_o);
237        TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
238      } else {
239        // we'll just push its contents
240        push_contents(new_obj);
241      }
242    }  else {
243      // We lost, someone else "owns" this object
244      guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
245
246      // Try to deallocate the space.  If it was directly allocated we cannot
247      // deallocate it, so we have to test.  If the deallocation fails,
248      // overwrite with a filler object.
249      if (new_obj_is_tenured) {
250        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
251          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
252        }
253      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
254        CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
255      }
256
257      // don't update this before the unallocation!
258      new_obj = o->forwardee();
259    }
260  } else {
261    assert(o->is_forwarded(), "Sanity");
262    new_obj = o->forwardee();
263  }
264
265  // This code must come after the CAS test, or it will print incorrect
266  // information.
267  log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
268                                  should_scavenge(&new_obj) ? "copying" : "tenuring",
269                                  new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
270
271  return new_obj;
272}
273
274// Attempt to "claim" oop at p via CAS, push the new obj if successful
275// This version tests the oop* to make sure it is within the heap before
276// attempting marking.
277template <class T, bool promote_immediately>
278inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
279  assert(should_scavenge(p, true), "revisiting object?");
280
281  oop o = oopDesc::load_decode_heap_oop_not_null(p);
282  oop new_obj = o->is_forwarded()
283        ? o->forwardee()
284        : copy_to_survivor_space<promote_immediately>(o);
285
286  // This code must come after the CAS test, or it will print incorrect
287  // information.
288  if (log_develop_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
289    log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
290                      "forwarding",
291                      new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
292  }
293
294  oopDesc::encode_store_heap_oop_not_null(p, new_obj);
295
296  // We cannot mark without test, as some code passes us pointers
297  // that are outside the heap. These pointers are either from roots
298  // or from metadata.
299  if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
300      ParallelScavengeHeap::heap()->is_in_reserved(p)) {
301    if (PSScavenge::is_obj_in_young(new_obj)) {
302      PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
303    }
304  }
305}
306
307inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
308  if (is_oop_masked(p)) {
309    assert(PSChunkLargeArrays, "invariant");
310    oop const old = unmask_chunked_array_oop(p);
311    process_array_chunk(old);
312  } else {
313    if (p.is_narrow()) {
314      assert(UseCompressedOops, "Error");
315      copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(p);
316    } else {
317      copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(p);
318    }
319  }
320}
321
322inline bool PSPromotionManager::steal_depth(int queue_num, int* seed, StarTask& t) {
323  return stack_array_depth()->steal(queue_num, seed, t);
324}
325
326#if TASKQUEUE_STATS
327void PSPromotionManager::record_steal(StarTask& p) {
328  if (is_oop_masked(p)) {
329    ++_masked_steals;
330  }
331}
332#endif // TASKQUEUE_STATS
333
334#endif // SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
335