1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
26#define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
27
28#include "gc/g1/g1BlockOffsetTable.inline.hpp"
29#include "gc/g1/g1CollectedHeap.inline.hpp"
30#include "gc/g1/heapRegion.hpp"
31#include "gc/shared/space.hpp"
32#include "oops/oop.inline.hpp"
33#include "runtime/atomic.hpp"
34
35inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size,
36                                                  size_t desired_word_size,
37                                                  size_t* actual_size) {
38  HeapWord* obj = top();
39  size_t available = pointer_delta(end(), obj);
40  size_t want_to_allocate = MIN2(available, desired_word_size);
41  if (want_to_allocate >= min_word_size) {
42    HeapWord* new_top = obj + want_to_allocate;
43    set_top(new_top);
44    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
45    *actual_size = want_to_allocate;
46    return obj;
47  } else {
48    return NULL;
49  }
50}
51
52inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size,
53                                                      size_t desired_word_size,
54                                                      size_t* actual_size) {
55  do {
56    HeapWord* obj = top();
57    size_t available = pointer_delta(end(), obj);
58    size_t want_to_allocate = MIN2(available, desired_word_size);
59    if (want_to_allocate >= min_word_size) {
60      HeapWord* new_top = obj + want_to_allocate;
61      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
62      // result can be one of two:
63      //  the old top value: the exchange succeeded
64      //  otherwise: the new value of the top is returned.
65      if (result == obj) {
66        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
67        *actual_size = want_to_allocate;
68        return obj;
69      }
70    } else {
71      return NULL;
72    }
73  } while (true);
74}
75
76inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size,
77                                             size_t desired_word_size,
78                                             size_t* actual_size) {
79  HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
80  if (res != NULL) {
81    _bot_part.alloc_block(res, *actual_size);
82  }
83  return res;
84}
85
86inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) {
87  size_t temp;
88  return allocate(word_size, word_size, &temp);
89}
90
91inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) {
92  size_t temp;
93  return par_allocate(word_size, word_size, &temp);
94}
95
96// Because of the requirement of keeping "_offsets" up to date with the
97// allocations, we sequentialize these with a lock.  Therefore, best if
98// this is used for larger LAB allocations only.
99inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size,
100                                                 size_t desired_word_size,
101                                                 size_t* actual_size) {
102  MutexLocker x(&_par_alloc_lock);
103  return allocate(min_word_size, desired_word_size, actual_size);
104}
105
106inline HeapWord* G1ContiguousSpace::block_start(const void* p) {
107  return _bot_part.block_start(p);
108}
109
110inline HeapWord*
111G1ContiguousSpace::block_start_const(const void* p) const {
112  return _bot_part.block_start_const(p);
113}
114
115inline bool
116HeapRegion::block_is_obj(const HeapWord* p) const {
117  G1CollectedHeap* g1h = G1CollectedHeap::heap();
118
119  if (!this->is_in(p)) {
120    assert(is_continues_humongous(), "This case can only happen for humongous regions");
121    return (p == humongous_start_region()->bottom());
122  }
123  if (ClassUnloadingWithConcurrentMark) {
124    return !g1h->is_obj_dead(oop(p), this);
125  }
126  return p < top();
127}
128
129inline size_t
130HeapRegion::block_size(const HeapWord *addr) const {
131  if (addr == top()) {
132    return pointer_delta(end(), addr);
133  }
134
135  if (block_is_obj(addr)) {
136    return oop(addr)->size();
137  }
138
139  assert(ClassUnloadingWithConcurrentMark,
140         "All blocks should be objects if G1 Class Unloading isn't used. "
141         "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
142         "addr: " PTR_FORMAT,
143         p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
144
145  // Old regions' dead objects may have dead classes
146  // We need to find the next live object in some other
147  // manner than getting the oop size
148  G1CollectedHeap* g1h = G1CollectedHeap::heap();
149  HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
150      getNextMarkedWordAddress(addr, prev_top_at_mark_start());
151
152  assert(next > addr, "must get the next live object");
153  return pointer_delta(next, addr);
154}
155
156inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
157                                                         size_t desired_word_size,
158                                                         size_t* actual_word_size) {
159  assert(is_young(), "we can only skip BOT updates on young regions");
160  return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
161}
162
163inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
164  size_t temp;
165  return allocate_no_bot_updates(word_size, word_size, &temp);
166}
167
168inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
169                                                     size_t desired_word_size,
170                                                     size_t* actual_word_size) {
171  assert(is_young(), "we can only skip BOT updates on young regions");
172  return allocate_impl(min_word_size, desired_word_size, actual_word_size);
173}
174
175inline void HeapRegion::note_start_of_marking() {
176  _next_marked_bytes = 0;
177  _next_top_at_mark_start = top();
178}
179
180inline void HeapRegion::note_end_of_marking() {
181  _prev_top_at_mark_start = _next_top_at_mark_start;
182  _prev_marked_bytes = _next_marked_bytes;
183  _next_marked_bytes = 0;
184}
185
186inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
187  if (is_survivor()) {
188    // This is how we always allocate survivors.
189    assert(_next_top_at_mark_start == bottom(), "invariant");
190  } else {
191    if (during_initial_mark) {
192      // During initial-mark we'll explicitly mark any objects on old
193      // regions that are pointed to by roots. Given that explicit
194      // marks only make sense under NTAMS it'd be nice if we could
195      // check that condition if we wanted to. Given that we don't
196      // know where the top of this region will end up, we simply set
197      // NTAMS to the end of the region so all marks will be below
198      // NTAMS. We'll set it to the actual top when we retire this region.
199      _next_top_at_mark_start = end();
200    } else {
201      // We could have re-used this old region as to-space over a
202      // couple of GCs since the start of the concurrent marking
203      // cycle. This means that [bottom,NTAMS) will contain objects
204      // copied up to and including initial-mark and [NTAMS, top)
205      // will contain objects copied during the concurrent marking cycle.
206      assert(top() >= _next_top_at_mark_start, "invariant");
207    }
208  }
209}
210
211inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
212  if (is_survivor()) {
213    // This is how we always allocate survivors.
214    assert(_next_top_at_mark_start == bottom(), "invariant");
215  } else {
216    if (during_initial_mark) {
217      // See the comment for note_start_of_copying() for the details
218      // on this.
219      assert(_next_top_at_mark_start == end(), "pre-condition");
220      _next_top_at_mark_start = top();
221    } else {
222      // See the comment for note_start_of_copying() for the details
223      // on this.
224      assert(top() >= _next_top_at_mark_start, "invariant");
225    }
226  }
227}
228
229inline bool HeapRegion::in_collection_set() const {
230  return G1CollectedHeap::heap()->is_in_cset(this);
231}
232
233#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
234