heapRegion.inline.hpp revision 8413:92457dfb91bd
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
26#define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
27
28#include "gc/g1/g1BlockOffsetTable.inline.hpp"
29#include "gc/g1/g1CollectedHeap.hpp"
30#include "gc/g1/heapRegion.hpp"
31#include "gc/shared/space.hpp"
32#include "oops/oop.inline.hpp"
33#include "runtime/atomic.inline.hpp"
34
35// This version requires locking.
36inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
37                                                HeapWord* const end_value) {
38  HeapWord* obj = top();
39  if (pointer_delta(end_value, obj) >= size) {
40    HeapWord* new_top = obj + size;
41    set_top(new_top);
42    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
43    return obj;
44  } else {
45    return NULL;
46  }
47}
48
49// This version is lock-free.
50inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
51                                                    HeapWord* const end_value) {
52  do {
53    HeapWord* obj = top();
54    if (pointer_delta(end_value, obj) >= size) {
55      HeapWord* new_top = obj + size;
56      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
57      // result can be one of two:
58      //  the old top value: the exchange succeeded
59      //  otherwise: the new value of the top is returned.
60      if (result == obj) {
61        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
62        return obj;
63      }
64    } else {
65      return NULL;
66    }
67  } while (true);
68}
69
70inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
71  HeapWord* res = allocate_impl(size, end());
72  if (res != NULL) {
73    _offsets.alloc_block(res, size);
74  }
75  return res;
76}
77
78// Because of the requirement of keeping "_offsets" up to date with the
79// allocations, we sequentialize these with a lock.  Therefore, best if
80// this is used for larger LAB allocations only.
81inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
82  MutexLocker x(&_par_alloc_lock);
83  return allocate(size);
84}
85
86inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
87  return _offsets.block_start(p);
88}
89
90inline HeapWord*
91G1OffsetTableContigSpace::block_start_const(const void* p) const {
92  return _offsets.block_start_const(p);
93}
94
95inline bool
96HeapRegion::block_is_obj(const HeapWord* p) const {
97  G1CollectedHeap* g1h = G1CollectedHeap::heap();
98  if (ClassUnloadingWithConcurrentMark) {
99    return !g1h->is_obj_dead(oop(p), this);
100  }
101  return p < top();
102}
103
104inline size_t
105HeapRegion::block_size(const HeapWord *addr) const {
106  if (addr == top()) {
107    return pointer_delta(end(), addr);
108  }
109
110  if (block_is_obj(addr)) {
111    return oop(addr)->size();
112  }
113
114  assert(ClassUnloadingWithConcurrentMark,
115      err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
116              "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
117              "addr: " PTR_FORMAT,
118              p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
119
120  // Old regions' dead objects may have dead classes
121  // We need to find the next live object in some other
122  // manner than getting the oop size
123  G1CollectedHeap* g1h = G1CollectedHeap::heap();
124  HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
125      getNextMarkedWordAddress(addr, prev_top_at_mark_start());
126
127  assert(next > addr, "must get the next live object");
128  return pointer_delta(next, addr);
129}
130
131inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
132  assert(is_young(), "we can only skip BOT updates on young regions");
133  return par_allocate_impl(word_size, end());
134}
135
136inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
137  assert(is_young(), "we can only skip BOT updates on young regions");
138  return allocate_impl(word_size, end());
139}
140
141inline void HeapRegion::note_start_of_marking() {
142  _next_marked_bytes = 0;
143  _next_top_at_mark_start = top();
144}
145
146inline void HeapRegion::note_end_of_marking() {
147  _prev_top_at_mark_start = _next_top_at_mark_start;
148  _prev_marked_bytes = _next_marked_bytes;
149  _next_marked_bytes = 0;
150
151  assert(_prev_marked_bytes <=
152         (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
153         HeapWordSize, "invariant");
154}
155
156inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
157  if (is_survivor()) {
158    // This is how we always allocate survivors.
159    assert(_next_top_at_mark_start == bottom(), "invariant");
160  } else {
161    if (during_initial_mark) {
162      // During initial-mark we'll explicitly mark any objects on old
163      // regions that are pointed to by roots. Given that explicit
164      // marks only make sense under NTAMS it'd be nice if we could
165      // check that condition if we wanted to. Given that we don't
166      // know where the top of this region will end up, we simply set
167      // NTAMS to the end of the region so all marks will be below
168      // NTAMS. We'll set it to the actual top when we retire this region.
169      _next_top_at_mark_start = end();
170    } else {
171      // We could have re-used this old region as to-space over a
172      // couple of GCs since the start of the concurrent marking
173      // cycle. This means that [bottom,NTAMS) will contain objects
174      // copied up to and including initial-mark and [NTAMS, top)
175      // will contain objects copied during the concurrent marking cycle.
176      assert(top() >= _next_top_at_mark_start, "invariant");
177    }
178  }
179}
180
181inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
182  if (is_survivor()) {
183    // This is how we always allocate survivors.
184    assert(_next_top_at_mark_start == bottom(), "invariant");
185  } else {
186    if (during_initial_mark) {
187      // See the comment for note_start_of_copying() for the details
188      // on this.
189      assert(_next_top_at_mark_start == end(), "pre-condition");
190      _next_top_at_mark_start = top();
191    } else {
192      // See the comment for note_start_of_copying() for the details
193      // on this.
194      assert(top() >= _next_top_at_mark_start, "invariant");
195    }
196  }
197}
198
199inline bool HeapRegion::in_collection_set() const {
200  return G1CollectedHeap::heap()->is_in_cset(this);
201}
202
203#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
204