1/*
2 * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/g1CollectedHeap.inline.hpp"
27#include "gc/g1/heapRegionRemSet.hpp"
28#include "gc/g1/heapRegionSet.inline.hpp"
29
30uint FreeRegionList::_unrealistically_long_length = 0;
31
32#ifndef PRODUCT
33void HeapRegionSetBase::verify_region(HeapRegion* hr) {
34  assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index());
35  assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions
36  assert(hr->is_humongous() == regions_humongous(), "Wrong humongous state for region %u and set %s", hr->hrm_index(), name());
37  assert(hr->is_free() == regions_free(), "Wrong free state for region %u and set %s", hr->hrm_index(), name());
38  assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name());
39  assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
40         "Empty region %u is not free or archive for set %s", hr->hrm_index(), name());
41}
42#endif
43
44void HeapRegionSetBase::verify() {
45  // It's important that we also observe the MT safety protocol even
46  // for the verification calls. If we do verification without the
47  // appropriate locks and the set changes underneath our feet
48  // verification might fail and send us on a wild goose chase.
49  check_mt_safety();
50
51  guarantee_heap_region_set(( is_empty() && length() == 0) ||
52                            (!is_empty() && length() > 0),
53                            "invariant");
54}
55
56void HeapRegionSetBase::verify_start() {
57  // See comment in verify() about MT safety and verification.
58  check_mt_safety();
59  assert_heap_region_set(!_verify_in_progress, "verification should not be in progress");
60
61  // Do the basic verification first before we do the checks over the regions.
62  HeapRegionSetBase::verify();
63
64  _verify_in_progress = true;
65}
66
67void HeapRegionSetBase::verify_end() {
68  // See comment in verify() about MT safety and verification.
69  check_mt_safety();
70  assert_heap_region_set(_verify_in_progress, "verification should be in progress");
71
72  _verify_in_progress = false;
73}
74
75void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
76  out->cr();
77  out->print_cr("Set: %s (" PTR_FORMAT ")", name(), p2i(this));
78  out->print_cr("  Region Assumptions");
79  out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
80  out->print_cr("    free              : %s", BOOL_TO_STR(regions_free()));
81  out->print_cr("  Attributes");
82  out->print_cr("    length            : %14u", length());
83}
84
85HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker)
86  : _name(name), _verify_in_progress(false),
87    _is_humongous(humongous), _is_free(free), _mt_safety_checker(mt_safety_checker),
88    _length(0)
89{ }
90
91void FreeRegionList::set_unrealistically_long_length(uint len) {
92  guarantee(_unrealistically_long_length == 0, "should only be set once");
93  _unrealistically_long_length = len;
94}
95
96void FreeRegionList::remove_all() {
97  check_mt_safety();
98  verify_optional();
99
100  HeapRegion* curr = _head;
101  while (curr != NULL) {
102    verify_region(curr);
103
104    HeapRegion* next = curr->next();
105    curr->set_next(NULL);
106    curr->set_prev(NULL);
107    curr->set_containing_set(NULL);
108    curr = next;
109  }
110  clear();
111
112  verify_optional();
113}
114
115void FreeRegionList::add_ordered(FreeRegionList* from_list) {
116  check_mt_safety();
117  from_list->check_mt_safety();
118
119  verify_optional();
120  from_list->verify_optional();
121
122  if (from_list->is_empty()) {
123    return;
124  }
125
126  #ifdef ASSERT
127  FreeRegionListIterator iter(from_list);
128  while (iter.more_available()) {
129    HeapRegion* hr = iter.get_next();
130    // In set_containing_set() we check that we either set the value
131    // from NULL to non-NULL or vice versa to catch bugs. So, we have
132    // to NULL it first before setting it to the value.
133    hr->set_containing_set(NULL);
134    hr->set_containing_set(this);
135  }
136  #endif // ASSERT
137
138  if (is_empty()) {
139    assert_free_region_list(length() == 0 && _tail == NULL, "invariant");
140    _head = from_list->_head;
141    _tail = from_list->_tail;
142  } else {
143    HeapRegion* curr_to = _head;
144    HeapRegion* curr_from = from_list->_head;
145
146    while (curr_from != NULL) {
147      while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) {
148        curr_to = curr_to->next();
149      }
150
151      if (curr_to == NULL) {
152        // The rest of the from list should be added as tail
153        _tail->set_next(curr_from);
154        curr_from->set_prev(_tail);
155        curr_from = NULL;
156      } else {
157        HeapRegion* next_from = curr_from->next();
158
159        curr_from->set_next(curr_to);
160        curr_from->set_prev(curr_to->prev());
161        if (curr_to->prev() == NULL) {
162          _head = curr_from;
163        } else {
164          curr_to->prev()->set_next(curr_from);
165        }
166        curr_to->set_prev(curr_from);
167
168        curr_from = next_from;
169      }
170    }
171
172    if (_tail->hrm_index() < from_list->_tail->hrm_index()) {
173      _tail = from_list->_tail;
174    }
175  }
176
177  _length += from_list->length();
178  from_list->clear();
179
180  verify_optional();
181  from_list->verify_optional();
182}
183
184void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
185  check_mt_safety();
186  assert_free_region_list(num_regions >= 1, "pre-condition");
187  assert_free_region_list(!is_empty(), "pre-condition");
188
189  verify_optional();
190  DEBUG_ONLY(uint old_length = length();)
191
192  HeapRegion* curr = first;
193  uint count = 0;
194  while (count < num_regions) {
195    verify_region(curr);
196    HeapRegion* next = curr->next();
197    HeapRegion* prev = curr->prev();
198
199    assert(count < num_regions,
200           "[%s] should not come across more regions "
201           "pending for removal than num_regions: %u",
202           name(), num_regions);
203
204    if (prev == NULL) {
205      assert_free_region_list(_head == curr, "invariant");
206      _head = next;
207    } else {
208      assert_free_region_list(_head != curr, "invariant");
209      prev->set_next(next);
210    }
211    if (next == NULL) {
212      assert_free_region_list(_tail == curr, "invariant");
213      _tail = prev;
214    } else {
215      assert_free_region_list(_tail != curr, "invariant");
216      next->set_prev(prev);
217    }
218    if (_last == curr) {
219      _last = NULL;
220    }
221
222    curr->set_next(NULL);
223    curr->set_prev(NULL);
224    remove(curr);
225
226    count++;
227    curr = next;
228  }
229
230  assert(count == num_regions,
231         "[%s] count: %u should be == num_regions: %u",
232         name(), count, num_regions);
233  assert(length() + num_regions == old_length,
234         "[%s] new length should be consistent "
235         "new length: %u old length: %u num_regions: %u",
236         name(), length(), old_length, num_regions);
237
238  verify_optional();
239}
240
241void FreeRegionList::verify() {
242  // See comment in HeapRegionSetBase::verify() about MT safety and
243  // verification.
244  check_mt_safety();
245
246  // This will also do the basic verification too.
247  verify_start();
248
249  verify_list();
250
251  verify_end();
252}
253
254void FreeRegionList::clear() {
255  _length = 0;
256  _head = NULL;
257  _tail = NULL;
258  _last = NULL;
259}
260
261void FreeRegionList::verify_list() {
262  HeapRegion* curr = _head;
263  HeapRegion* prev1 = NULL;
264  HeapRegion* prev0 = NULL;
265  uint count = 0;
266  size_t capacity = 0;
267  uint last_index = 0;
268
269  guarantee(_head == NULL || _head->prev() == NULL, "_head should not have a prev");
270  while (curr != NULL) {
271    verify_region(curr);
272
273    count++;
274    guarantee(count < _unrealistically_long_length,
275              "[%s] the calculated length: %u seems very long, is there maybe a cycle? curr: " PTR_FORMAT " prev0: " PTR_FORMAT " " "prev1: " PTR_FORMAT " length: %u",
276              name(), count, p2i(curr), p2i(prev0), p2i(prev1), length());
277
278    if (curr->next() != NULL) {
279      guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up");
280    }
281    guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted");
282    last_index = curr->hrm_index();
283
284    capacity += curr->capacity();
285
286    prev1 = prev0;
287    prev0 = curr;
288    curr = curr->next();
289  }
290
291  guarantee(_tail == prev0, "Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index());
292  guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
293  guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count);
294}
295
296// Note on the check_mt_safety() methods below:
297//
298// Verification of the "master" heap region sets / lists that are
299// maintained by G1CollectedHeap is always done during a STW pause and
300// by the VM thread at the start / end of the pause. The standard
301// verification methods all assert check_mt_safety(). This is
302// important as it ensures that verification is done without
303// concurrent updates taking place at the same time. It follows, that,
304// for the "master" heap region sets / lists, the check_mt_safety()
305// method should include the VM thread / STW case.
306
307void MasterFreeRegionListMtSafeChecker::check() {
308  // Master Free List MT safety protocol:
309  // (a) If we're at a safepoint, operations on the master free list
310  // should be invoked by either the VM thread (which will serialize
311  // them) or by the GC workers while holding the
312  // FreeList_lock.
313  // (b) If we're not at a safepoint, operations on the master free
314  // list should be invoked while holding the Heap_lock.
315
316  if (SafepointSynchronize::is_at_safepoint()) {
317    guarantee(Thread::current()->is_VM_thread() ||
318              FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
319  } else {
320    guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
321  }
322}
323
324void SecondaryFreeRegionListMtSafeChecker::check() {
325  // Secondary Free List MT safety protocol:
326  // Operations on the secondary free list should always be invoked
327  // while holding the SecondaryFreeList_lock.
328
329  guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol");
330}
331
332void OldRegionSetMtSafeChecker::check() {
333  // Master Old Set MT safety protocol:
334  // (a) If we're at a safepoint, operations on the master old set
335  // should be invoked:
336  // - by the VM thread (which will serialize them), or
337  // - by the GC workers while holding the FreeList_lock, if we're
338  //   at a safepoint for an evacuation pause (this lock is taken
339  //   anyway when an GC alloc region is retired so that a new one
340  //   is allocated from the free list), or
341  // - by the GC workers while holding the OldSets_lock, if we're at a
342  //   safepoint for a cleanup pause.
343  // (b) If we're not at a safepoint, operations on the master old set
344  // should be invoked while holding the Heap_lock.
345
346  if (SafepointSynchronize::is_at_safepoint()) {
347    guarantee(Thread::current()->is_VM_thread()
348        || FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
349        "master old set MT safety protocol at a safepoint");
350  } else {
351    guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
352  }
353}
354
355void HumongousRegionSetMtSafeChecker::check() {
356  // Humongous Set MT safety protocol:
357  // (a) If we're at a safepoint, operations on the master humongous
358  // set should be invoked by either the VM thread (which will
359  // serialize them) or by the GC workers while holding the
360  // OldSets_lock.
361  // (b) If we're not at a safepoint, operations on the master
362  // humongous set should be invoked while holding the Heap_lock.
363
364  if (SafepointSynchronize::is_at_safepoint()) {
365    guarantee(Thread::current()->is_VM_thread() ||
366              OldSets_lock->owned_by_self(),
367              "master humongous set MT safety protocol at a safepoint");
368  } else {
369    guarantee(Heap_lock->owned_by_self(),
370              "master humongous set MT safety protocol outside a safepoint");
371  }
372}
373