1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
26#define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
27
28#include "gc/g1/g1BlockOffsetTable.inline.hpp"
29#include "gc/g1/g1CollectedHeap.inline.hpp"
30#include "gc/g1/heapRegion.hpp"
31#include "gc/shared/space.hpp"
32#include "oops/oop.inline.hpp"
33#include "runtime/atomic.hpp"
34#include "utilities/align.hpp"
35
36inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size,
37                                                  size_t desired_word_size,
38                                                  size_t* actual_size) {
39  HeapWord* obj = top();
40  size_t available = pointer_delta(end(), obj);
41  size_t want_to_allocate = MIN2(available, desired_word_size);
42  if (want_to_allocate >= min_word_size) {
43    HeapWord* new_top = obj + want_to_allocate;
44    set_top(new_top);
45    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
46    *actual_size = want_to_allocate;
47    return obj;
48  } else {
49    return NULL;
50  }
51}
52
53inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size,
54                                                      size_t desired_word_size,
55                                                      size_t* actual_size) {
56  do {
57    HeapWord* obj = top();
58    size_t available = pointer_delta(end(), obj);
59    size_t want_to_allocate = MIN2(available, desired_word_size);
60    if (want_to_allocate >= min_word_size) {
61      HeapWord* new_top = obj + want_to_allocate;
62      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
63      // result can be one of two:
64      //  the old top value: the exchange succeeded
65      //  otherwise: the new value of the top is returned.
66      if (result == obj) {
67        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
68        *actual_size = want_to_allocate;
69        return obj;
70      }
71    } else {
72      return NULL;
73    }
74  } while (true);
75}
76
77inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size,
78                                             size_t desired_word_size,
79                                             size_t* actual_size) {
80  HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
81  if (res != NULL) {
82    _bot_part.alloc_block(res, *actual_size);
83  }
84  return res;
85}
86
87inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) {
88  size_t temp;
89  return allocate(word_size, word_size, &temp);
90}
91
92inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) {
93  size_t temp;
94  return par_allocate(word_size, word_size, &temp);
95}
96
97// Because of the requirement of keeping "_offsets" up to date with the
98// allocations, we sequentialize these with a lock.  Therefore, best if
99// this is used for larger LAB allocations only.
100inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size,
101                                                 size_t desired_word_size,
102                                                 size_t* actual_size) {
103  MutexLocker x(&_par_alloc_lock);
104  return allocate(min_word_size, desired_word_size, actual_size);
105}
106
107inline HeapWord* G1ContiguousSpace::block_start(const void* p) {
108  return _bot_part.block_start(p);
109}
110
111inline HeapWord*
112G1ContiguousSpace::block_start_const(const void* p) const {
113  return _bot_part.block_start_const(p);
114}
115
116inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const {
117  HeapWord* addr = (HeapWord*) obj;
118
119  assert(addr < top(), "must be");
120  assert(!is_closed_archive(),
121         "Closed archive regions should not have references into other regions");
122  assert(!is_humongous(), "Humongous objects not handled here");
123  bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
124
125  if (ClassUnloadingWithConcurrentMark && obj_is_dead) {
126    assert(!block_is_obj(addr), "must be");
127    *size = block_size_using_bitmap(addr, prev_bitmap);
128  } else {
129    assert(block_is_obj(addr), "must be");
130    *size = obj->size();
131  }
132  return obj_is_dead;
133}
134
135inline bool
136HeapRegion::block_is_obj(const HeapWord* p) const {
137  G1CollectedHeap* g1h = G1CollectedHeap::heap();
138
139  if (!this->is_in(p)) {
140    assert(is_continues_humongous(), "This case can only happen for humongous regions");
141    return (p == humongous_start_region()->bottom());
142  }
143  if (ClassUnloadingWithConcurrentMark) {
144    return !g1h->is_obj_dead(oop(p), this);
145  }
146  return p < top();
147}
148
149inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const {
150  assert(ClassUnloadingWithConcurrentMark,
151         "All blocks should be objects if class unloading isn't used, so this method should not be called. "
152         "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
153         "addr: " PTR_FORMAT,
154         p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
155
156  // Old regions' dead objects may have dead classes
157  // We need to find the next live object using the bitmap
158  HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start());
159
160  assert(next > addr, "must get the next live object");
161  return pointer_delta(next, addr);
162}
163
164inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const {
165  assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
166  return !obj_allocated_since_prev_marking(obj) &&
167         !prev_bitmap->is_marked((HeapWord*)obj) &&
168         !is_open_archive();
169}
170
171inline size_t HeapRegion::block_size(const HeapWord *addr) const {
172  if (addr == top()) {
173    return pointer_delta(end(), addr);
174  }
175
176  if (block_is_obj(addr)) {
177    return oop(addr)->size();
178  }
179
180  return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap());
181}
182
183inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
184                                                         size_t desired_word_size,
185                                                         size_t* actual_word_size) {
186  assert(is_young(), "we can only skip BOT updates on young regions");
187  return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
188}
189
190inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
191  size_t temp;
192  return allocate_no_bot_updates(word_size, word_size, &temp);
193}
194
195inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
196                                                     size_t desired_word_size,
197                                                     size_t* actual_word_size) {
198  assert(is_young(), "we can only skip BOT updates on young regions");
199  return allocate_impl(min_word_size, desired_word_size, actual_word_size);
200}
201
202inline void HeapRegion::note_start_of_marking() {
203  _next_marked_bytes = 0;
204  _next_top_at_mark_start = top();
205}
206
207inline void HeapRegion::note_end_of_marking() {
208  _prev_top_at_mark_start = _next_top_at_mark_start;
209  _prev_marked_bytes = _next_marked_bytes;
210  _next_marked_bytes = 0;
211}
212
213inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
214  if (is_survivor()) {
215    // This is how we always allocate survivors.
216    assert(_next_top_at_mark_start == bottom(), "invariant");
217  } else {
218    if (during_initial_mark) {
219      // During initial-mark we'll explicitly mark any objects on old
220      // regions that are pointed to by roots. Given that explicit
221      // marks only make sense under NTAMS it'd be nice if we could
222      // check that condition if we wanted to. Given that we don't
223      // know where the top of this region will end up, we simply set
224      // NTAMS to the end of the region so all marks will be below
225      // NTAMS. We'll set it to the actual top when we retire this region.
226      _next_top_at_mark_start = end();
227    } else {
228      // We could have re-used this old region as to-space over a
229      // couple of GCs since the start of the concurrent marking
230      // cycle. This means that [bottom,NTAMS) will contain objects
231      // copied up to and including initial-mark and [NTAMS, top)
232      // will contain objects copied during the concurrent marking cycle.
233      assert(top() >= _next_top_at_mark_start, "invariant");
234    }
235  }
236}
237
238inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
239  if (is_survivor()) {
240    // This is how we always allocate survivors.
241    assert(_next_top_at_mark_start == bottom(), "invariant");
242  } else {
243    if (during_initial_mark) {
244      // See the comment for note_start_of_copying() for the details
245      // on this.
246      assert(_next_top_at_mark_start == end(), "pre-condition");
247      _next_top_at_mark_start = top();
248    } else {
249      // See the comment for note_start_of_copying() for the details
250      // on this.
251      assert(top() >= _next_top_at_mark_start, "invariant");
252    }
253  }
254}
255
256inline bool HeapRegion::in_collection_set() const {
257  return G1CollectedHeap::heap()->is_in_cset(this);
258}
259
260template <class Closure, bool is_gc_active>
261bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr,
262                                              Closure* cl,
263                                              G1CollectedHeap* g1h) {
264  assert(is_humongous(), "precondition");
265  HeapRegion* sr = humongous_start_region();
266  oop obj = oop(sr->bottom());
267
268  // If concurrent and klass_or_null is NULL, then space has been
269  // allocated but the object has not yet been published by setting
270  // the klass.  That can only happen if the card is stale.  However,
271  // we've already set the card clean, so we must return failure,
272  // since the allocating thread could have performed a write to the
273  // card that might be missed otherwise.
274  if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) {
275    return false;
276  }
277
278  // We have a well-formed humongous object at the start of sr.
279  // Only filler objects follow a humongous object in the containing
280  // regions, and we can ignore those.  So only process the one
281  // humongous object.
282  if (!g1h->is_obj_dead(obj, sr)) {
283    if (obj->is_objArray() || (sr->bottom() < mr.start())) {
284      // objArrays are always marked precisely, so limit processing
285      // with mr.  Non-objArrays might be precisely marked, and since
286      // it's humongous it's worthwhile avoiding full processing.
287      // However, the card could be stale and only cover filler
288      // objects.  That should be rare, so not worth checking for;
289      // instead let it fall out from the bounded iteration.
290      obj->oop_iterate(cl, mr);
291    } else {
292      // If obj is not an objArray and mr contains the start of the
293      // obj, then this could be an imprecise mark, and we need to
294      // process the entire object.
295      obj->oop_iterate(cl);
296    }
297  }
298  return true;
299}
300
301template <bool is_gc_active, class Closure>
302bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
303                                                  Closure* cl) {
304  assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
305  G1CollectedHeap* g1h = G1CollectedHeap::heap();
306
307  // Special handling for humongous regions.
308  if (is_humongous()) {
309    return do_oops_on_card_in_humongous<Closure, is_gc_active>(mr, cl, g1h);
310  }
311  assert(is_old(), "precondition");
312
313  // Because mr has been trimmed to what's been allocated in this
314  // region, the parts of the heap that are examined here are always
315  // parsable; there's no need to use klass_or_null to detect
316  // in-progress allocation.
317
318  // Cache the boundaries of the memory region in some const locals
319  HeapWord* const start = mr.start();
320  HeapWord* const end = mr.end();
321
322  // Find the obj that extends onto mr.start().
323  // Update BOT as needed while finding start of (possibly dead)
324  // object containing the start of the region.
325  HeapWord* cur = block_start(start);
326
327#ifdef ASSERT
328  {
329    assert(cur <= start,
330           "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
331    HeapWord* next = cur + block_size(cur);
332    assert(start < next,
333           "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
334  }
335#endif
336
337  const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prevMarkBitMap();
338  do {
339    oop obj = oop(cur);
340    assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
341    assert(obj->klass_or_null() != NULL,
342           "Unparsable heap at " PTR_FORMAT, p2i(cur));
343
344    size_t size;
345    bool is_dead = is_obj_dead_with_size(obj, bitmap, &size);
346
347    cur += size;
348    if (!is_dead) {
349      // Process live object's references.
350
351      // Non-objArrays are usually marked imprecise at the object
352      // start, in which case we need to iterate over them in full.
353      // objArrays are precisely marked, but can still be iterated
354      // over in full if completely covered.
355      if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
356        obj->oop_iterate(cl);
357      } else {
358        obj->oop_iterate(cl, mr);
359      }
360    }
361  } while (cur < end);
362
363  return true;
364}
365
366#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
367