heapRegionRemSet.cpp revision 9244:825cee2cd7a6
1212339Sjilles/*
2212339Sjilles * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3212339Sjilles * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4212339Sjilles *
5212339Sjilles * This code is free software; you can redistribute it and/or modify it
6212339Sjilles * under the terms of the GNU General Public License version 2 only, as
7212339Sjilles * published by the Free Software Foundation.
8212339Sjilles *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/concurrentG1Refine.hpp"
27#include "gc/g1/g1BlockOffsetTable.inline.hpp"
28#include "gc/g1/g1CollectedHeap.inline.hpp"
29#include "gc/g1/heapRegionManager.inline.hpp"
30#include "gc/g1/heapRegionRemSet.hpp"
31#include "gc/shared/space.inline.hpp"
32#include "memory/allocation.hpp"
33#include "memory/padded.inline.hpp"
34#include "oops/oop.inline.hpp"
35#include "runtime/atomic.inline.hpp"
36#include "utilities/bitMap.inline.hpp"
37#include "utilities/globalDefinitions.hpp"
38#include "utilities/growableArray.hpp"
39
40class PerRegionTable: public CHeapObj<mtGC> {
41  friend class OtherRegionsTable;
42  friend class HeapRegionRemSetIterator;
43
44  HeapRegion*     _hr;
45  BitMap          _bm;
46  jint            _occupied;
47
48  // next pointer for free/allocated 'all' list
49  PerRegionTable* _next;
50
51  // prev pointer for the allocated 'all' list
52  PerRegionTable* _prev;
53
54  // next pointer in collision list
55  PerRegionTable * _collision_list_next;
56
57  // Global free list of PRTs
58  static PerRegionTable* _free_list;
59
60protected:
61  // We need access in order to union things into the base table.
62  BitMap* bm() { return &_bm; }
63
64  void recount_occupied() {
65    _occupied = (jint) bm()->count_one_bits();
66  }
67
68  PerRegionTable(HeapRegion* hr) :
69    _hr(hr),
70    _occupied(0),
71    _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
72    _collision_list_next(NULL), _next(NULL), _prev(NULL)
73  {}
74
75  void add_card_work(CardIdx_t from_card, bool par) {
76    if (!_bm.at(from_card)) {
77      if (par) {
78        if (_bm.par_at_put(from_card, 1)) {
79          Atomic::inc(&_occupied);
80        }
81      } else {
82        _bm.at_put(from_card, 1);
83        _occupied++;
84      }
85    }
86  }
87
88  void add_reference_work(OopOrNarrowOopStar from, bool par) {
89    // Must make this robust in case "from" is not in "_hr", because of
90    // concurrency.
91
92    if (G1TraceHeapRegionRememberedSet) {
93      gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
94                             p2i(from),
95                             UseCompressedOops
96                             ? p2i(oopDesc::load_decode_heap_oop((narrowOop*)from))
97                             : p2i(oopDesc::load_decode_heap_oop((oop*)from)));
98    }
99
100    HeapRegion* loc_hr = hr();
101    // If the test below fails, then this table was reused concurrently
102    // with this operation.  This is OK, since the old table was coarsened,
103    // and adding a bit to the new table is never incorrect.
104    // If the table used to belong to a continues humongous region and is
105    // now reused for the corresponding start humongous region, we need to
106    // make sure that we detect this. Thus, we call is_in_reserved_raw()
107    // instead of just is_in_reserved() here.
108    if (loc_hr->is_in_reserved_raw(from)) {
109      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
110      CardIdx_t from_card = (CardIdx_t)
111          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
112
113      assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
114             "Must be in range.");
115      add_card_work(from_card, par);
116    }
117  }
118
119public:
120
121  HeapRegion* hr() const { return _hr; }
122
123  jint occupied() const {
124    // Overkill, but if we ever need it...
125    // guarantee(_occupied == _bm.count_one_bits(), "Check");
126    return _occupied;
127  }
128
129  void init(HeapRegion* hr, bool clear_links_to_all_list) {
130    if (clear_links_to_all_list) {
131      set_next(NULL);
132      set_prev(NULL);
133    }
134    _hr = hr;
135    _collision_list_next = NULL;
136    _occupied = 0;
137    _bm.clear();
138  }
139
140  void add_reference(OopOrNarrowOopStar from) {
141    add_reference_work(from, /*parallel*/ true);
142  }
143
144  void seq_add_reference(OopOrNarrowOopStar from) {
145    add_reference_work(from, /*parallel*/ false);
146  }
147
148  void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
149    HeapWord* hr_bot = hr()->bottom();
150    size_t hr_first_card_index = ctbs->index_for(hr_bot);
151    bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
152    recount_occupied();
153  }
154
155  void add_card(CardIdx_t from_card_index) {
156    add_card_work(from_card_index, /*parallel*/ true);
157  }
158
159  void seq_add_card(CardIdx_t from_card_index) {
160    add_card_work(from_card_index, /*parallel*/ false);
161  }
162
163  // (Destructively) union the bitmap of the current table into the given
164  // bitmap (which is assumed to be of the same size.)
165  void union_bitmap_into(BitMap* bm) {
166    bm->set_union(_bm);
167  }
168
169  // Mem size in bytes.
170  size_t mem_size() const {
171    return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
172  }
173
174  // Requires "from" to be in "hr()".
175  bool contains_reference(OopOrNarrowOopStar from) const {
176    assert(hr()->is_in_reserved(from), "Precondition.");
177    size_t card_ind = pointer_delta(from, hr()->bottom(),
178                                    CardTableModRefBS::card_size);
179    return _bm.at(card_ind);
180  }
181
182  // Bulk-free the PRTs from prt to last, assumes that they are
183  // linked together using their _next field.
184  static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
185    while (true) {
186      PerRegionTable* fl = _free_list;
187      last->set_next(fl);
188      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
189      if (res == fl) {
190        return;
191      }
192    }
193    ShouldNotReachHere();
194  }
195
196  static void free(PerRegionTable* prt) {
197    bulk_free(prt, prt);
198  }
199
200  // Returns an initialized PerRegionTable instance.
201  static PerRegionTable* alloc(HeapRegion* hr) {
202    PerRegionTable* fl = _free_list;
203    while (fl != NULL) {
204      PerRegionTable* nxt = fl->next();
205      PerRegionTable* res =
206        (PerRegionTable*)
207        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
208      if (res == fl) {
209        fl->init(hr, true);
210        return fl;
211      } else {
212        fl = _free_list;
213      }
214    }
215    assert(fl == NULL, "Loop condition.");
216    return new PerRegionTable(hr);
217  }
218
219  PerRegionTable* next() const { return _next; }
220  void set_next(PerRegionTable* next) { _next = next; }
221  PerRegionTable* prev() const { return _prev; }
222  void set_prev(PerRegionTable* prev) { _prev = prev; }
223
224  // Accessor and Modification routines for the pointer for the
225  // singly linked collision list that links the PRTs within the
226  // OtherRegionsTable::_fine_grain_regions hash table.
227  //
228  // It might be useful to also make the collision list doubly linked
229  // to avoid iteration over the collisions list during scrubbing/deletion.
230  // OTOH there might not be many collisions.
231
232  PerRegionTable* collision_list_next() const {
233    return _collision_list_next;
234  }
235
236  void set_collision_list_next(PerRegionTable* next) {
237    _collision_list_next = next;
238  }
239
240  PerRegionTable** collision_list_next_addr() {
241    return &_collision_list_next;
242  }
243
244  static size_t fl_mem_size() {
245    PerRegionTable* cur = _free_list;
246    size_t res = 0;
247    while (cur != NULL) {
248      res += cur->mem_size();
249      cur = cur->next();
250    }
251    return res;
252  }
253
254  static void test_fl_mem_size();
255};
256
257PerRegionTable* PerRegionTable::_free_list = NULL;
258
259size_t OtherRegionsTable::_max_fine_entries = 0;
260size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
261size_t OtherRegionsTable::_fine_eviction_stride = 0;
262size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
263
264OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
265  _g1h(G1CollectedHeap::heap()),
266  _hr(hr), _m(m),
267  _coarse_map(G1CollectedHeap::heap()->max_regions(),
268              false /* in-resource-area */),
269  _fine_grain_regions(NULL),
270  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
271  _n_fine_entries(0), _n_coarse_entries(0),
272  _fine_eviction_start(0),
273  _sparse_table(hr)
274{
275  typedef PerRegionTable* PerRegionTablePtr;
276
277  if (_max_fine_entries == 0) {
278    assert(_mod_max_fine_entries_mask == 0, "Both or none.");
279    size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
280    _max_fine_entries = (size_t)1 << max_entries_log;
281    _mod_max_fine_entries_mask = _max_fine_entries - 1;
282
283    assert(_fine_eviction_sample_size == 0
284           && _fine_eviction_stride == 0, "All init at same time.");
285    _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
286    _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
287  }
288
289  _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
290                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
291
292  if (_fine_grain_regions == NULL) {
293    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
294                          "Failed to allocate _fine_grain_entries.");
295  }
296
297  for (size_t i = 0; i < _max_fine_entries; i++) {
298    _fine_grain_regions[i] = NULL;
299  }
300}
301
302void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
303  // We always append to the beginning of the list for convenience;
304  // the order of entries in this list does not matter.
305  if (_first_all_fine_prts != NULL) {
306    assert(_first_all_fine_prts->prev() == NULL, "invariant");
307    _first_all_fine_prts->set_prev(prt);
308    prt->set_next(_first_all_fine_prts);
309  } else {
310    // this is the first element we insert. Adjust the "last" pointer
311    _last_all_fine_prts = prt;
312    assert(prt->next() == NULL, "just checking");
313  }
314  // the new element is always the first element without a predecessor
315  prt->set_prev(NULL);
316  _first_all_fine_prts = prt;
317
318  assert(prt->prev() == NULL, "just checking");
319  assert(_first_all_fine_prts == prt, "just checking");
320  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
321         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
322         "just checking");
323  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
324         "just checking");
325  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
326         "just checking");
327}
328
329void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
330  if (prt->prev() != NULL) {
331    assert(_first_all_fine_prts != prt, "just checking");
332    prt->prev()->set_next(prt->next());
333    // removing the last element in the list?
334    if (_last_all_fine_prts == prt) {
335      _last_all_fine_prts = prt->prev();
336    }
337  } else {
338    assert(_first_all_fine_prts == prt, "just checking");
339    _first_all_fine_prts = prt->next();
340    // list is empty now?
341    if (_first_all_fine_prts == NULL) {
342      _last_all_fine_prts = NULL;
343    }
344  }
345
346  if (prt->next() != NULL) {
347    prt->next()->set_prev(prt->prev());
348  }
349
350  prt->set_next(NULL);
351  prt->set_prev(NULL);
352
353  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
354         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
355         "just checking");
356  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
357         "just checking");
358  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
359         "just checking");
360}
361
362int**  FromCardCache::_cache = NULL;
363uint   FromCardCache::_max_regions = 0;
364size_t FromCardCache::_static_mem_size = 0;
365
366void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
367  guarantee(_cache == NULL, "Should not call this multiple times");
368
369  _max_regions = max_num_regions;
370  _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
371                                                       _max_regions,
372                                                       &_static_mem_size);
373
374  invalidate(0, _max_regions);
375}
376
377void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
378  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
379            "Trying to invalidate beyond maximum region, from %u size " SIZE_FORMAT,
380            start_idx, new_num_regions);
381  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
382    uint end_idx = (start_idx + (uint)new_num_regions);
383    assert(end_idx <= _max_regions, "Must be within max.");
384    for (uint j = start_idx; j < end_idx; j++) {
385      set(i, j, InvalidCard);
386    }
387  }
388}
389
390#ifndef PRODUCT
391void FromCardCache::print(outputStream* out) {
392  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
393    for (uint j = 0; j < _max_regions; j++) {
394      out->print_cr("_from_card_cache[%u][%u] = %d.",
395                    i, j, at(i, j));
396    }
397  }
398}
399#endif
400
401void FromCardCache::clear(uint region_idx) {
402  uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
403  for (uint i = 0; i < num_par_remsets; i++) {
404    set(i, region_idx, InvalidCard);
405  }
406}
407
408void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
409  uint cur_hrm_ind = _hr->hrm_index();
410
411  if (G1TraceHeapRegionRememberedSet) {
412    gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
413                                                    p2i(from),
414                                                    UseCompressedOops
415                                                    ? p2i(oopDesc::load_decode_heap_oop((narrowOop*)from))
416                                                    : p2i(oopDesc::load_decode_heap_oop((oop*)from)));
417  }
418
419  int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
420
421  if (G1TraceHeapRegionRememberedSet) {
422    gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
423                  p2i(_hr->bottom()), from_card,
424                  FromCardCache::at(tid, cur_hrm_ind));
425  }
426
427  if (FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
428    if (G1TraceHeapRegionRememberedSet) {
429      gclog_or_tty->print_cr("  from-card cache hit.");
430    }
431    assert(contains_reference(from), "We just added it!");
432    return;
433  }
434
435  // Note that this may be a continued H region.
436  HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
437  RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
438
439  // If the region is already coarsened, return.
440  if (_coarse_map.at(from_hrm_ind)) {
441    if (G1TraceHeapRegionRememberedSet) {
442      gclog_or_tty->print_cr("  coarse map hit.");
443    }
444    assert(contains_reference(from), "We just added it!");
445    return;
446  }
447
448  // Otherwise find a per-region table to add it to.
449  size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
450  PerRegionTable* prt = find_region_table(ind, from_hr);
451  if (prt == NULL) {
452    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
453    // Confirm that it's really not there...
454    prt = find_region_table(ind, from_hr);
455    if (prt == NULL) {
456
457      uintptr_t from_hr_bot_card_index =
458        uintptr_t(from_hr->bottom())
459          >> CardTableModRefBS::card_shift;
460      CardIdx_t card_index = from_card - from_hr_bot_card_index;
461      assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
462             "Must be in range.");
463      if (G1HRRSUseSparseTable &&
464          _sparse_table.add_card(from_hrm_ind, card_index)) {
465        if (G1RecordHRRSOops) {
466          HeapRegionRemSet::record(_hr, from);
467          if (G1TraceHeapRegionRememberedSet) {
468            gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
469                                "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
470                                align_size_down(uintptr_t(from),
471                                                CardTableModRefBS::card_size),
472                                p2i(_hr->bottom()), p2i(from));
473          }
474        }
475        if (G1TraceHeapRegionRememberedSet) {
476          gclog_or_tty->print_cr("   added card to sparse table.");
477        }
478        assert(contains_reference_locked(from), "We just added it!");
479        return;
480      } else {
481        if (G1TraceHeapRegionRememberedSet) {
482          gclog_or_tty->print_cr("   [tid %u] sparse table entry "
483                        "overflow(f: %d, t: %u)",
484                        tid, from_hrm_ind, cur_hrm_ind);
485        }
486      }
487
488      if (_n_fine_entries == _max_fine_entries) {
489        prt = delete_region_table();
490        // There is no need to clear the links to the 'all' list here:
491        // prt will be reused immediately, i.e. remain in the 'all' list.
492        prt->init(from_hr, false /* clear_links_to_all_list */);
493      } else {
494        prt = PerRegionTable::alloc(from_hr);
495        link_to_all(prt);
496      }
497
498      PerRegionTable* first_prt = _fine_grain_regions[ind];
499      prt->set_collision_list_next(first_prt);
500      // The assignment into _fine_grain_regions allows the prt to
501      // start being used concurrently. In addition to
502      // collision_list_next which must be visible (else concurrent
503      // parsing of the list, if any, may fail to see other entries),
504      // the content of the prt must be visible (else for instance
505      // some mark bits may not yet seem cleared or a 'later' update
506      // performed by a concurrent thread could be undone when the
507      // zeroing becomes visible). This requires store ordering.
508      OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
509      _n_fine_entries++;
510
511      if (G1HRRSUseSparseTable) {
512        // Transfer from sparse to fine-grain.
513        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
514        assert(sprt_entry != NULL, "There should have been an entry");
515        for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
516          CardIdx_t c = sprt_entry->card(i);
517          if (c != SparsePRTEntry::NullEntry) {
518            prt->add_card(c);
519          }
520        }
521        // Now we can delete the sparse entry.
522        bool res = _sparse_table.delete_entry(from_hrm_ind);
523        assert(res, "It should have been there.");
524      }
525    }
526    assert(prt != NULL && prt->hr() == from_hr, "consequence");
527  }
528  // Note that we can't assert "prt->hr() == from_hr", because of the
529  // possibility of concurrent reuse.  But see head comment of
530  // OtherRegionsTable for why this is OK.
531  assert(prt != NULL, "Inv");
532
533  prt->add_reference(from);
534
535  if (G1RecordHRRSOops) {
536    HeapRegionRemSet::record(_hr, from);
537    if (G1TraceHeapRegionRememberedSet) {
538      gclog_or_tty->print("Added card " PTR_FORMAT " to region "
539                          "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
540                          align_size_down(uintptr_t(from),
541                                          CardTableModRefBS::card_size),
542                          p2i(_hr->bottom()), p2i(from));
543    }
544  }
545  assert(contains_reference(from), "We just added it!");
546}
547
548PerRegionTable*
549OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
550  assert(ind < _max_fine_entries, "Preconditions.");
551  PerRegionTable* prt = _fine_grain_regions[ind];
552  while (prt != NULL && prt->hr() != hr) {
553    prt = prt->collision_list_next();
554  }
555  // Loop postcondition is the method postcondition.
556  return prt;
557}
558
559jint OtherRegionsTable::_n_coarsenings = 0;
560
561PerRegionTable* OtherRegionsTable::delete_region_table() {
562  assert(_m->owned_by_self(), "Precondition");
563  assert(_n_fine_entries == _max_fine_entries, "Precondition");
564  PerRegionTable* max = NULL;
565  jint max_occ = 0;
566  PerRegionTable** max_prev = NULL;
567  size_t max_ind;
568
569  size_t i = _fine_eviction_start;
570  for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
571    size_t ii = i;
572    // Make sure we get a non-NULL sample.
573    while (_fine_grain_regions[ii] == NULL) {
574      ii++;
575      if (ii == _max_fine_entries) ii = 0;
576      guarantee(ii != i, "We must find one.");
577    }
578    PerRegionTable** prev = &_fine_grain_regions[ii];
579    PerRegionTable* cur = *prev;
580    while (cur != NULL) {
581      jint cur_occ = cur->occupied();
582      if (max == NULL || cur_occ > max_occ) {
583        max = cur;
584        max_prev = prev;
585        max_ind = i;
586        max_occ = cur_occ;
587      }
588      prev = cur->collision_list_next_addr();
589      cur = cur->collision_list_next();
590    }
591    i = i + _fine_eviction_stride;
592    if (i >= _n_fine_entries) i = i - _n_fine_entries;
593  }
594
595  _fine_eviction_start++;
596
597  if (_fine_eviction_start >= _n_fine_entries) {
598    _fine_eviction_start -= _n_fine_entries;
599  }
600
601  guarantee(max != NULL, "Since _n_fine_entries > 0");
602  guarantee(max_prev != NULL, "Since max != NULL.");
603
604  // Set the corresponding coarse bit.
605  size_t max_hrm_index = (size_t) max->hr()->hrm_index();
606  if (!_coarse_map.at(max_hrm_index)) {
607    _coarse_map.at_put(max_hrm_index, true);
608    _n_coarse_entries++;
609    if (G1TraceHeapRegionRememberedSet) {
610      gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
611                 "for region [" PTR_FORMAT "...] (" SIZE_FORMAT " coarse entries).\n",
612                 p2i(_hr->bottom()),
613                 p2i(max->hr()->bottom()),
614                 _n_coarse_entries);
615    }
616  }
617
618  // Unsplice.
619  *max_prev = max->collision_list_next();
620  Atomic::inc(&_n_coarsenings);
621  _n_fine_entries--;
622  return max;
623}
624
625void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
626                              BitMap* region_bm, BitMap* card_bm) {
627  // First eliminated garbage regions from the coarse map.
628  if (G1RSScrubVerbose) {
629    gclog_or_tty->print_cr("Scrubbing region %u:", _hr->hrm_index());
630  }
631
632  assert(_coarse_map.size() == region_bm->size(), "Precondition");
633  if (G1RSScrubVerbose) {
634    gclog_or_tty->print("   Coarse map: before = " SIZE_FORMAT "...",
635                        _n_coarse_entries);
636  }
637  _coarse_map.set_intersection(*region_bm);
638  _n_coarse_entries = _coarse_map.count_one_bits();
639  if (G1RSScrubVerbose) {
640    gclog_or_tty->print_cr("   after = " SIZE_FORMAT ".", _n_coarse_entries);
641  }
642
643  // Now do the fine-grained maps.
644  for (size_t i = 0; i < _max_fine_entries; i++) {
645    PerRegionTable* cur = _fine_grain_regions[i];
646    PerRegionTable** prev = &_fine_grain_regions[i];
647    while (cur != NULL) {
648      PerRegionTable* nxt = cur->collision_list_next();
649      // If the entire region is dead, eliminate.
650      if (G1RSScrubVerbose) {
651        gclog_or_tty->print_cr("     For other region %u:",
652                               cur->hr()->hrm_index());
653      }
654      if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
655        *prev = nxt;
656        cur->set_collision_list_next(NULL);
657        _n_fine_entries--;
658        if (G1RSScrubVerbose) {
659          gclog_or_tty->print_cr("          deleted via region map.");
660        }
661        unlink_from_all(cur);
662        PerRegionTable::free(cur);
663      } else {
664        // Do fine-grain elimination.
665        if (G1RSScrubVerbose) {
666          gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
667        }
668        cur->scrub(ctbs, card_bm);
669        if (G1RSScrubVerbose) {
670          gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
671        }
672        // Did that empty the table completely?
673        if (cur->occupied() == 0) {
674          *prev = nxt;
675          cur->set_collision_list_next(NULL);
676          _n_fine_entries--;
677          unlink_from_all(cur);
678          PerRegionTable::free(cur);
679        } else {
680          prev = cur->collision_list_next_addr();
681        }
682      }
683      cur = nxt;
684    }
685  }
686  // Since we may have deleted a from_card_cache entry from the RS, clear
687  // the FCC.
688  clear_fcc();
689}
690
691bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
692  if (limit <= (size_t)G1RSetSparseRegionEntries) {
693    return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
694  } else {
695    // Current uses of this method may only use values less than G1RSetSparseRegionEntries
696    // for the limit. The solution, comparing against occupied() would be too slow
697    // at this time.
698    Unimplemented();
699    return false;
700  }
701}
702
703bool OtherRegionsTable::is_empty() const {
704  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
705}
706
707size_t OtherRegionsTable::occupied() const {
708  size_t sum = occ_fine();
709  sum += occ_sparse();
710  sum += occ_coarse();
711  return sum;
712}
713
714size_t OtherRegionsTable::occ_fine() const {
715  size_t sum = 0;
716
717  size_t num = 0;
718  PerRegionTable * cur = _first_all_fine_prts;
719  while (cur != NULL) {
720    sum += cur->occupied();
721    cur = cur->next();
722    num++;
723  }
724  guarantee(num == _n_fine_entries, "just checking");
725  return sum;
726}
727
728size_t OtherRegionsTable::occ_coarse() const {
729  return (_n_coarse_entries * HeapRegion::CardsPerRegion);
730}
731
732size_t OtherRegionsTable::occ_sparse() const {
733  return _sparse_table.occupied();
734}
735
736size_t OtherRegionsTable::mem_size() const {
737  size_t sum = 0;
738  // all PRTs are of the same size so it is sufficient to query only one of them.
739  if (_first_all_fine_prts != NULL) {
740    assert(_last_all_fine_prts != NULL &&
741      _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
742    sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
743  }
744  sum += (sizeof(PerRegionTable*) * _max_fine_entries);
745  sum += (_coarse_map.size_in_words() * HeapWordSize);
746  sum += (_sparse_table.mem_size());
747  sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
748  return sum;
749}
750
751size_t OtherRegionsTable::static_mem_size() {
752  return FromCardCache::static_mem_size();
753}
754
755size_t OtherRegionsTable::fl_mem_size() {
756  return PerRegionTable::fl_mem_size();
757}
758
759void OtherRegionsTable::clear_fcc() {
760  FromCardCache::clear(_hr->hrm_index());
761}
762
763void OtherRegionsTable::clear() {
764  // if there are no entries, skip this step
765  if (_first_all_fine_prts != NULL) {
766    guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
767    PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
768    memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
769  } else {
770    guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
771  }
772
773  _first_all_fine_prts = _last_all_fine_prts = NULL;
774  _sparse_table.clear();
775  _coarse_map.clear();
776  _n_fine_entries = 0;
777  _n_coarse_entries = 0;
778
779  clear_fcc();
780}
781
782bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
783  // Cast away const in this case.
784  MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
785  return contains_reference_locked(from);
786}
787
788bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
789  HeapRegion* hr = _g1h->heap_region_containing_raw(from);
790  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
791  // Is this region in the coarse map?
792  if (_coarse_map.at(hr_ind)) return true;
793
794  PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
795                                     hr);
796  if (prt != NULL) {
797    return prt->contains_reference(from);
798
799  } else {
800    uintptr_t from_card =
801      (uintptr_t(from) >> CardTableModRefBS::card_shift);
802    uintptr_t hr_bot_card_index =
803      uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
804    assert(from_card >= hr_bot_card_index, "Inv");
805    CardIdx_t card_index = from_card - hr_bot_card_index;
806    assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
807           "Must be in range.");
808    return _sparse_table.contains_card(hr_ind, card_index);
809  }
810}
811
812void
813OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
814  _sparse_table.do_cleanup_work(hrrs_cleanup_task);
815}
816
817// Determines how many threads can add records to an rset in parallel.
818// This can be done by either mutator threads together with the
819// concurrent refinement threads or GC threads.
820uint HeapRegionRemSet::num_par_rem_sets() {
821  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
822}
823
824HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
825                                   HeapRegion* hr)
826  : _bosa(bosa),
827    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
828    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
829  reset_for_par_iteration();
830}
831
832void HeapRegionRemSet::setup_remset_size() {
833  // Setup sparse and fine-grain tables sizes.
834  // table_size = base * (log(region_size / 1M) + 1)
835  const int LOG_M = 20;
836  int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
837  if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
838    G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
839  }
840  if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
841    G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
842  }
843  guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
844}
845
846bool HeapRegionRemSet::claim_iter() {
847  if (_iter_state != Unclaimed) return false;
848  jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
849  return (res == Unclaimed);
850}
851
852void HeapRegionRemSet::set_iter_complete() {
853  _iter_state = Complete;
854}
855
856bool HeapRegionRemSet::iter_is_complete() {
857  return _iter_state == Complete;
858}
859
860#ifndef PRODUCT
861void HeapRegionRemSet::print() {
862  HeapRegionRemSetIterator iter(this);
863  size_t card_index;
864  while (iter.has_next(card_index)) {
865    HeapWord* card_start =
866      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
867    gclog_or_tty->print_cr("  Card " PTR_FORMAT, p2i(card_start));
868  }
869  if (iter.n_yielded() != occupied()) {
870    gclog_or_tty->print_cr("Yielded disagrees with occupied:");
871    gclog_or_tty->print_cr("  " SIZE_FORMAT_W(6) " yielded (" SIZE_FORMAT_W(6)
872                  " coarse, " SIZE_FORMAT_W(6) " fine).",
873                  iter.n_yielded(),
874                  iter.n_yielded_coarse(), iter.n_yielded_fine());
875    gclog_or_tty->print_cr("  " SIZE_FORMAT_W(6) " occ     (" SIZE_FORMAT_W(6)
876                           " coarse, " SIZE_FORMAT_W(6) " fine).",
877                  occupied(), occ_coarse(), occ_fine());
878  }
879  guarantee(iter.n_yielded() == occupied(),
880            "We should have yielded all the represented cards.");
881}
882#endif
883
884void HeapRegionRemSet::cleanup() {
885  SparsePRT::cleanup_all();
886}
887
888void HeapRegionRemSet::clear() {
889  MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
890  clear_locked();
891}
892
893void HeapRegionRemSet::clear_locked() {
894  _code_roots.clear();
895  _other_regions.clear();
896  assert(occupied_locked() == 0, "Should be clear.");
897  reset_for_par_iteration();
898}
899
900void HeapRegionRemSet::reset_for_par_iteration() {
901  _iter_state = Unclaimed;
902  _iter_claimed = 0;
903  // It's good to check this to make sure that the two methods are in sync.
904  assert(verify_ready_for_par_iteration(), "post-condition");
905}
906
907void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
908                             BitMap* region_bm, BitMap* card_bm) {
909  _other_regions.scrub(ctbs, region_bm, card_bm);
910}
911
912// Code roots support
913//
914// The code root set is protected by two separate locking schemes
915// When at safepoint the per-hrrs lock must be held during modifications
916// except when doing a full gc.
917// When not at safepoint the CodeCache_lock must be held during modifications.
918// When concurrent readers access the contains() function
919// (during the evacuation phase) no removals are allowed.
920
921void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
922  assert(nm != NULL, "sanity");
923  // Optimistic unlocked contains-check
924  if (!_code_roots.contains(nm)) {
925    MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
926    add_strong_code_root_locked(nm);
927  }
928}
929
930void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
931  assert(nm != NULL, "sanity");
932  _code_roots.add(nm);
933}
934
935void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
936  assert(nm != NULL, "sanity");
937  assert_locked_or_safepoint(CodeCache_lock);
938
939  MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
940  _code_roots.remove(nm);
941
942  // Check that there were no duplicates
943  guarantee(!_code_roots.contains(nm), "duplicate entry found");
944}
945
946void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
947  _code_roots.nmethods_do(blk);
948}
949
950void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
951  _code_roots.clean(hr);
952}
953
954size_t HeapRegionRemSet::strong_code_roots_mem_size() {
955  return _code_roots.mem_size();
956}
957
958HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
959  _hrrs(hrrs),
960  _g1h(G1CollectedHeap::heap()),
961  _coarse_map(&hrrs->_other_regions._coarse_map),
962  _bosa(hrrs->_bosa),
963  _is(Sparse),
964  // Set these values so that we increment to the first region.
965  _coarse_cur_region_index(-1),
966  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
967  _cur_card_in_prt(HeapRegion::CardsPerRegion),
968  _fine_cur_prt(NULL),
969  _n_yielded_coarse(0),
970  _n_yielded_fine(0),
971  _n_yielded_sparse(0),
972  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
973
974bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
975  if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
976  // Go to the next card.
977  _coarse_cur_region_cur_card++;
978  // Was the last the last card in the current region?
979  if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
980    // Yes: find the next region.  This may leave _coarse_cur_region_index
981    // Set to the last index, in which case there are no more coarse
982    // regions.
983    _coarse_cur_region_index =
984      (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
985    if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
986      _coarse_cur_region_cur_card = 0;
987      HeapWord* r_bot =
988        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
989      _cur_region_card_offset = _bosa->index_for(r_bot);
990    } else {
991      return false;
992    }
993  }
994  // If we didn't return false above, then we can yield a card.
995  card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
996  return true;
997}
998
999bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
1000  if (fine_has_next()) {
1001    _cur_card_in_prt =
1002      _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1003  }
1004  if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
1005    // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
1006    // the remembered set.
1007    if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
1008      return false;
1009    }
1010    PerRegionTable* next_prt = _fine_cur_prt->next();
1011    switch_to_prt(next_prt);
1012    _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1013  }
1014
1015  card_index = _cur_region_card_offset + _cur_card_in_prt;
1016  guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
1017            "Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt);
1018  return true;
1019}
1020
1021bool HeapRegionRemSetIterator::fine_has_next() {
1022  return _cur_card_in_prt != HeapRegion::CardsPerRegion;
1023}
1024
1025void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
1026  assert(prt != NULL, "Cannot switch to NULL prt");
1027  _fine_cur_prt = prt;
1028
1029  HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
1030  _cur_region_card_offset = _bosa->index_for(r_bot);
1031
1032  // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
1033  // To avoid special-casing this start case, and not miss the first bitmap
1034  // entry, initialize _cur_region_cur_card with -1 instead of 0.
1035  _cur_card_in_prt = (size_t)-1;
1036}
1037
1038bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
1039  switch (_is) {
1040  case Sparse: {
1041    if (_sparse_iter.has_next(card_index)) {
1042      _n_yielded_sparse++;
1043      return true;
1044    }
1045    // Otherwise, deliberate fall-through
1046    _is = Fine;
1047    PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
1048    if (initial_fine_prt != NULL) {
1049      switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
1050    }
1051  }
1052  case Fine:
1053    if (fine_has_next(card_index)) {
1054      _n_yielded_fine++;
1055      return true;
1056    }
1057    // Otherwise, deliberate fall-through
1058    _is = Coarse;
1059  case Coarse:
1060    if (coarse_has_next(card_index)) {
1061      _n_yielded_coarse++;
1062      return true;
1063    }
1064    // Otherwise...
1065    break;
1066  }
1067  assert(ParallelGCThreads > 1 ||
1068         n_yielded() == _hrrs->occupied(),
1069         "Should have yielded all the cards in the rem set "
1070         "(in the non-par case).");
1071  return false;
1072}
1073
1074
1075
1076OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
1077HeapWord**          HeapRegionRemSet::_recorded_cards = NULL;
1078HeapRegion**        HeapRegionRemSet::_recorded_regions = NULL;
1079int                 HeapRegionRemSet::_n_recorded = 0;
1080
1081HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
1082int*         HeapRegionRemSet::_recorded_event_index = NULL;
1083int          HeapRegionRemSet::_n_recorded_events = 0;
1084
1085void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1086  if (_recorded_oops == NULL) {
1087    assert(_n_recorded == 0
1088           && _recorded_cards == NULL
1089           && _recorded_regions == NULL,
1090           "Inv");
1091    _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
1092    _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded, mtGC);
1093    _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded, mtGC);
1094  }
1095  if (_n_recorded == MaxRecorded) {
1096    gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
1097  } else {
1098    _recorded_cards[_n_recorded] =
1099      (HeapWord*)align_size_down(uintptr_t(f),
1100                                 CardTableModRefBS::card_size);
1101    _recorded_oops[_n_recorded] = f;
1102    _recorded_regions[_n_recorded] = hr;
1103    _n_recorded++;
1104  }
1105}
1106
1107void HeapRegionRemSet::record_event(Event evnt) {
1108  if (!G1RecordHRRSEvents) return;
1109
1110  if (_recorded_events == NULL) {
1111    assert(_n_recorded_events == 0
1112           && _recorded_event_index == NULL,
1113           "Inv");
1114    _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
1115    _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
1116  }
1117  if (_n_recorded_events == MaxRecordedEvents) {
1118    gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
1119  } else {
1120    _recorded_events[_n_recorded_events] = evnt;
1121    _recorded_event_index[_n_recorded_events] = _n_recorded;
1122    _n_recorded_events++;
1123  }
1124}
1125
1126void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
1127  switch (evnt) {
1128  case Event_EvacStart:
1129    str->print("Evac Start");
1130    break;
1131  case Event_EvacEnd:
1132    str->print("Evac End");
1133    break;
1134  case Event_RSUpdateEnd:
1135    str->print("RS Update End");
1136    break;
1137  }
1138}
1139
1140void HeapRegionRemSet::print_recorded() {
1141  int cur_evnt = 0;
1142  Event cur_evnt_kind = Event_illegal;
1143  int cur_evnt_ind = 0;
1144  if (_n_recorded_events > 0) {
1145    cur_evnt_kind = _recorded_events[cur_evnt];
1146    cur_evnt_ind = _recorded_event_index[cur_evnt];
1147  }
1148
1149  for (int i = 0; i < _n_recorded; i++) {
1150    while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
1151      gclog_or_tty->print("Event: ");
1152      print_event(gclog_or_tty, cur_evnt_kind);
1153      gclog_or_tty->cr();
1154      cur_evnt++;
1155      if (cur_evnt < MaxRecordedEvents) {
1156        cur_evnt_kind = _recorded_events[cur_evnt];
1157        cur_evnt_ind = _recorded_event_index[cur_evnt];
1158      }
1159    }
1160    gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
1161                        " for ref " PTR_FORMAT ".\n",
1162                        p2i(_recorded_cards[i]), p2i(_recorded_regions[i]->bottom()),
1163                        p2i(_recorded_oops[i]));
1164  }
1165}
1166
1167void HeapRegionRemSet::reset_for_cleanup_tasks() {
1168  SparsePRT::reset_for_cleanup_tasks();
1169}
1170
1171void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1172  _other_regions.do_cleanup_work(hrrs_cleanup_task);
1173}
1174
1175void
1176HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1177  SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1178}
1179
1180#ifndef PRODUCT
1181void PerRegionTable::test_fl_mem_size() {
1182  PerRegionTable* dummy = alloc(NULL);
1183
1184  size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
1185  assert(dummy->mem_size() > min_prt_size,
1186         "PerRegionTable memory usage is suspiciously small, only has " SIZE_FORMAT " bytes. "
1187         "Should be at least " SIZE_FORMAT " bytes.", dummy->mem_size(), min_prt_size);
1188  free(dummy);
1189  guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
1190  // try to reset the state
1191  _free_list = NULL;
1192  delete dummy;
1193}
1194
1195void HeapRegionRemSet::test_prt() {
1196  PerRegionTable::test_fl_mem_size();
1197}
1198
1199void HeapRegionRemSet::test() {
1200  os::sleep(Thread::current(), (jlong)5000, false);
1201  G1CollectedHeap* g1h = G1CollectedHeap::heap();
1202
1203  // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1204  // hash bucket.
1205  HeapRegion* hr0 = g1h->region_at(0);
1206  HeapRegion* hr1 = g1h->region_at(1);
1207  HeapRegion* hr2 = g1h->region_at(5);
1208  HeapRegion* hr3 = g1h->region_at(6);
1209  HeapRegion* hr4 = g1h->region_at(7);
1210  HeapRegion* hr5 = g1h->region_at(8);
1211
1212  HeapWord* hr1_start = hr1->bottom();
1213  HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
1214  HeapWord* hr1_last = hr1->end() - 1;
1215
1216  HeapWord* hr2_start = hr2->bottom();
1217  HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
1218  HeapWord* hr2_last = hr2->end() - 1;
1219
1220  HeapWord* hr3_start = hr3->bottom();
1221  HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
1222  HeapWord* hr3_last = hr3->end() - 1;
1223
1224  HeapRegionRemSet* hrrs = hr0->rem_set();
1225
1226  // Make three references from region 0x101...
1227  hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
1228  hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
1229  hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1230
1231  hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
1232  hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
1233  hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1234
1235  hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
1236  hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
1237  hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1238
1239  // Now cause a coarsening.
1240  hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
1241  hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1242
1243  // Now, does iteration yield these three?
1244  HeapRegionRemSetIterator iter(hrrs);
1245  size_t sum = 0;
1246  size_t card_index;
1247  while (iter.has_next(card_index)) {
1248    HeapWord* card_start =
1249      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
1250    gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", p2i(card_start));
1251    sum++;
1252  }
1253  guarantee(sum == 11 - 3 + 2048, "Failure");
1254  guarantee(sum == hrrs->occupied(), "Failure");
1255}
1256#endif
1257