1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/heapRegion.hpp"
27#include "gc/g1/heapRegionBounds.inline.hpp"
28#include "gc/g1/heapRegionRemSet.hpp"
29#include "gc/g1/sparsePRT.hpp"
30#include "gc/shared/cardTableModRefBS.hpp"
31#include "gc/shared/space.inline.hpp"
32#include "memory/allocation.inline.hpp"
33#include "runtime/atomic.hpp"
34#include "runtime/mutexLocker.hpp"
35
36// Check that the size of the SparsePRTEntry is evenly divisible by the maximum
37// member type to avoid SIGBUS when accessing them.
38STATIC_ASSERT(sizeof(SparsePRTEntry) % sizeof(int) == 0);
39
40void SparsePRTEntry::init(RegionIdx_t region_ind) {
41  // Check that the card array element type can represent all cards in the region.
42  // Choose a large SparsePRTEntry::card_elem_t (e.g. CardIdx_t) if required.
43  assert(((size_t)1 << (sizeof(SparsePRTEntry::card_elem_t) * BitsPerByte)) *
44         G1SATBCardTableModRefBS::card_size >= HeapRegionBounds::max_size(), "precondition");
45  assert(G1RSetSparseRegionEntries > 0, "precondition");
46  _region_ind = region_ind;
47  _next_index = RSHashTable::NullEntry;
48  _next_null = 0;
49}
50
51bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
52  for (int i = 0; i < num_valid_cards(); i++) {
53    if (card(i) == card_index) {
54      return true;
55    }
56  }
57  return false;
58}
59
60SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
61  for (int i = 0; i < num_valid_cards(); i++) {
62    if (card(i) == card_index) {
63      return found;
64    }
65  }
66  if (num_valid_cards() < cards_num() - 1) {
67    _cards[_next_null] = (card_elem_t)card_index;
68    _next_null++;
69    return added;
70   }
71  // Otherwise, we're full.
72  return overflow;
73}
74
75void SparsePRTEntry::copy_cards(card_elem_t* cards) const {
76  memcpy(cards, _cards, cards_num() * sizeof(card_elem_t));
77}
78
79void SparsePRTEntry::copy_cards(SparsePRTEntry* e) const {
80  copy_cards(e->_cards);
81  assert(_next_null >= 0, "invariant");
82  assert(_next_null <= cards_num(), "invariant");
83  e->_next_null = _next_null;
84}
85
86// ----------------------------------------------------------------------
87
88float RSHashTable::TableOccupancyFactor = 0.5f;
89
90RSHashTable::RSHashTable(size_t capacity) :
91  _capacity(capacity), _capacity_mask(capacity-1),
92  _occupied_entries(0), _occupied_cards(0),
93  _entries(NULL),
94  _buckets(NEW_C_HEAP_ARRAY(int, capacity, mtGC)),
95  _free_list(NullEntry), _free_region(0)
96{
97  _num_entries = (capacity * TableOccupancyFactor) + 1;
98  _entries = (SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, _num_entries * SparsePRTEntry::size(), mtGC);
99  clear();
100}
101
102RSHashTable::~RSHashTable() {
103  if (_entries != NULL) {
104    FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries);
105    _entries = NULL;
106  }
107  if (_buckets != NULL) {
108    FREE_C_HEAP_ARRAY(int, _buckets);
109    _buckets = NULL;
110  }
111}
112
113void RSHashTable::clear() {
114  _occupied_entries = 0;
115  _occupied_cards = 0;
116  guarantee(_entries != NULL, "INV");
117  guarantee(_buckets != NULL, "INV");
118
119  guarantee(_capacity <= ((size_t)1 << (sizeof(int)*BitsPerByte-1)) - 1,
120                "_capacity too large");
121
122  // This will put -1 == NullEntry in the key field of all entries.
123  memset(_entries, NullEntry, _num_entries * SparsePRTEntry::size());
124  memset(_buckets, NullEntry, _capacity * sizeof(int));
125  _free_list = NullEntry;
126  _free_region = 0;
127}
128
129bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
130  SparsePRTEntry* e = entry_for_region_ind_create(region_ind);
131  assert(e != NULL && e->r_ind() == region_ind,
132         "Postcondition of call above.");
133  SparsePRTEntry::AddCardResult res = e->add_card(card_index);
134  if (res == SparsePRTEntry::added) _occupied_cards++;
135  assert(e->num_valid_cards() > 0, "Postcondition");
136  return res != SparsePRTEntry::overflow;
137}
138
139SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) const {
140  int ind = (int) (region_ind & capacity_mask());
141  int cur_ind = _buckets[ind];
142  SparsePRTEntry* cur;
143  while (cur_ind != NullEntry &&
144         (cur = entry(cur_ind))->r_ind() != region_ind) {
145    cur_ind = cur->next_index();
146  }
147
148  if (cur_ind == NullEntry) return NULL;
149  // Otherwise...
150  assert(cur->r_ind() == region_ind, "Postcondition of loop + test above.");
151  assert(cur->num_valid_cards() > 0, "Inv");
152  return cur;
153}
154
155bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
156  int ind = (int) (region_ind & capacity_mask());
157  int* prev_loc = &_buckets[ind];
158  int cur_ind = *prev_loc;
159  SparsePRTEntry* cur;
160  while (cur_ind != NullEntry &&
161         (cur = entry(cur_ind))->r_ind() != region_ind) {
162    prev_loc = cur->next_index_addr();
163    cur_ind = *prev_loc;
164  }
165
166  if (cur_ind == NullEntry) return false;
167  // Otherwise, splice out "cur".
168  *prev_loc = cur->next_index();
169  _occupied_cards -= cur->num_valid_cards();
170  free_entry(cur_ind);
171  _occupied_entries--;
172  return true;
173}
174
175SparsePRTEntry*
176RSHashTable::entry_for_region_ind_create(RegionIdx_t region_ind) {
177  SparsePRTEntry* res = get_entry(region_ind);
178  if (res == NULL) {
179    int new_ind = alloc_entry();
180    res = entry(new_ind);
181    res->init(region_ind);
182    // Insert at front.
183    int ind = (int) (region_ind & capacity_mask());
184    res->set_next_index(_buckets[ind]);
185    _buckets[ind] = new_ind;
186    _occupied_entries++;
187  }
188  return res;
189}
190
191int RSHashTable::alloc_entry() {
192  int res;
193  if (_free_list != NullEntry) {
194    res = _free_list;
195    _free_list = entry(res)->next_index();
196    return res;
197  } else if ((size_t)_free_region < _num_entries) {
198    res = _free_region;
199    _free_region++;
200    return res;
201  } else {
202    return NullEntry;
203  }
204}
205
206void RSHashTable::free_entry(int fi) {
207  entry(fi)->set_next_index(_free_list);
208  _free_list = fi;
209}
210
211void RSHashTable::add_entry(SparsePRTEntry* e) {
212  assert(e->num_valid_cards() > 0, "Precondition.");
213  SparsePRTEntry* e2 = entry_for_region_ind_create(e->r_ind());
214  e->copy_cards(e2);
215  _occupied_cards += e2->num_valid_cards();
216  assert(e2->num_valid_cards() > 0, "Postcondition.");
217}
218
219CardIdx_t RSHashTableIter::find_first_card_in_list() {
220  while (_bl_ind != RSHashTable::NullEntry) {
221    SparsePRTEntry* sparse_entry = _rsht->entry(_bl_ind);
222    if (sparse_entry->num_valid_cards() > 0) {
223      return sparse_entry->card(0);
224    } else {
225      _bl_ind = sparse_entry->next_index();
226    }
227  }
228  // Otherwise, none found:
229  return NoCardFound;
230}
231
232size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
233  return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
234}
235
236bool RSHashTableIter::has_next(size_t& card_index) {
237  _card_ind++;
238  if (_bl_ind >= 0) {
239    SparsePRTEntry* e = _rsht->entry(_bl_ind);
240    if (_card_ind < e->num_valid_cards()) {
241      CardIdx_t ci = e->card(_card_ind);
242      card_index = compute_card_ind(ci);
243      return true;
244    }
245  }
246
247  // Otherwise, must find the next valid entry.
248  _card_ind = 0;
249
250  if (_bl_ind != RSHashTable::NullEntry) {
251      _bl_ind = _rsht->entry(_bl_ind)->next_index();
252      CardIdx_t ci = find_first_card_in_list();
253      if (ci != NoCardFound) {
254        card_index = compute_card_ind(ci);
255        return true;
256      }
257  }
258  // If we didn't return above, must go to the next non-null table index.
259  _tbl_ind++;
260  while ((size_t)_tbl_ind < _rsht->capacity()) {
261    _bl_ind = _rsht->_buckets[_tbl_ind];
262    CardIdx_t ci = find_first_card_in_list();
263    if (ci != NoCardFound) {
264      card_index = compute_card_ind(ci);
265      return true;
266    }
267    // Otherwise, try next entry.
268    _tbl_ind++;
269  }
270  // Otherwise, there were no entry.
271  return false;
272}
273
274bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index) const {
275  SparsePRTEntry* e = get_entry(region_index);
276  return (e != NULL && e->contains_card(card_index));
277}
278
279size_t RSHashTable::mem_size() const {
280  return sizeof(RSHashTable) +
281    _num_entries * (SparsePRTEntry::size() + sizeof(int));
282}
283
284// ----------------------------------------------------------------------
285
286SparsePRT* volatile SparsePRT::_head_expanded_list = NULL;
287
288void SparsePRT::add_to_expanded_list(SparsePRT* sprt) {
289  // We could expand multiple times in a pause -- only put on list once.
290  if (sprt->expanded()) return;
291  sprt->set_expanded(true);
292  SparsePRT* hd = _head_expanded_list;
293  while (true) {
294    sprt->_next_expanded = hd;
295    SparsePRT* res =
296      (SparsePRT*)
297      Atomic::cmpxchg_ptr(sprt, &_head_expanded_list, hd);
298    if (res == hd) return;
299    else hd = res;
300  }
301}
302
303
304SparsePRT* SparsePRT::get_from_expanded_list() {
305  SparsePRT* hd = _head_expanded_list;
306  while (hd != NULL) {
307    SparsePRT* next = hd->next_expanded();
308    SparsePRT* res =
309      (SparsePRT*)
310      Atomic::cmpxchg_ptr(next, &_head_expanded_list, hd);
311    if (res == hd) {
312      hd->set_next_expanded(NULL);
313      return hd;
314    } else {
315      hd = res;
316    }
317  }
318  return NULL;
319}
320
321void SparsePRT::reset_for_cleanup_tasks() {
322  _head_expanded_list = NULL;
323}
324
325void SparsePRT::do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task) {
326  if (should_be_on_expanded_list()) {
327    sprt_cleanup_task->add(this);
328  }
329}
330
331void SparsePRT::finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task) {
332  assert(ParGCRareEvent_lock->owned_by_self(), "pre-condition");
333  SparsePRT* head = sprt_cleanup_task->head();
334  SparsePRT* tail = sprt_cleanup_task->tail();
335  if (head != NULL) {
336    assert(tail != NULL, "if head is not NULL, so should tail");
337
338    tail->set_next_expanded(_head_expanded_list);
339    _head_expanded_list = head;
340  } else {
341    assert(tail == NULL, "if head is NULL, so should tail");
342  }
343}
344
345bool SparsePRT::should_be_on_expanded_list() {
346  if (_expanded) {
347    assert(_cur != _next, "if _expanded is true, cur should be != _next");
348  } else {
349    assert(_cur == _next, "if _expanded is false, cur should be == _next");
350  }
351  return expanded();
352}
353
354void SparsePRT::cleanup_all() {
355  // First clean up all expanded tables so they agree on next and cur.
356  SparsePRT* sprt = get_from_expanded_list();
357  while (sprt != NULL) {
358    sprt->cleanup();
359    sprt = get_from_expanded_list();
360  }
361}
362
363
364SparsePRT::SparsePRT(HeapRegion* hr) :
365  _hr(hr), _expanded(false), _next_expanded(NULL)
366{
367  _cur = new RSHashTable(InitialCapacity);
368  _next = _cur;
369}
370
371
372SparsePRT::~SparsePRT() {
373  assert(_next != NULL && _cur != NULL, "Inv");
374  if (_cur != _next) { delete _cur; }
375  delete _next;
376}
377
378
379size_t SparsePRT::mem_size() const {
380  // We ignore "_cur" here, because it either = _next, or else it is
381  // on the deleted list.
382  return sizeof(SparsePRT) + _next->mem_size();
383}
384
385bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
386  if (_next->should_expand()) {
387    expand();
388  }
389  return _next->add_card(region_id, card_index);
390}
391
392SparsePRTEntry* SparsePRT::get_entry(RegionIdx_t region_id) {
393  return _next->get_entry(region_id);
394}
395
396bool SparsePRT::delete_entry(RegionIdx_t region_id) {
397  return _next->delete_entry(region_id);
398}
399
400void SparsePRT::clear() {
401  // If they differ, _next is bigger then cur, so next has no chance of
402  // being the initial size.
403  if (_next != _cur) {
404    delete _next;
405  }
406
407  if (_cur->capacity() != InitialCapacity) {
408    delete _cur;
409    _cur = new RSHashTable(InitialCapacity);
410  } else {
411    _cur->clear();
412  }
413  _next = _cur;
414  _expanded = false;
415}
416
417void SparsePRT::cleanup() {
418  // Make sure that the current and next tables agree.
419  if (_cur != _next) {
420    delete _cur;
421  }
422  _cur = _next;
423  set_expanded(false);
424}
425
426void SparsePRT::expand() {
427  RSHashTable* last = _next;
428  _next = new RSHashTable(last->capacity() * 2);
429  for (size_t i = 0; i < last->num_entries(); i++) {
430    SparsePRTEntry* e = last->entry((int)i);
431    if (e->valid_entry()) {
432      _next->add_entry(e);
433    }
434  }
435  if (last != _cur) {
436    delete last;
437  }
438  add_to_expanded_list(this);
439}
440
441void SparsePRTCleanupTask::add(SparsePRT* sprt) {
442  assert(sprt->should_be_on_expanded_list(), "pre-condition");
443
444  sprt->set_next_expanded(NULL);
445  if (_tail != NULL) {
446    _tail->set_next_expanded(sprt);
447  } else {
448    _head = sprt;
449  }
450  _tail = sprt;
451}
452