heapRegionRemSet.cpp revision 11857:d0fbf661cc16
1135446Strhodes/*
2193149Sdougb * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3135446Strhodes * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4135446Strhodes *
5193149Sdougb * This code is free software; you can redistribute it and/or modify it
6135446Strhodes * under the terms of the GNU General Public License version 2 only, as
7135446Strhodes * published by the Free Software Foundation.
8135446Strhodes *
9135446Strhodes * This code is distributed in the hope that it will be useful, but WITHOUT
10135446Strhodes * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11135446Strhodes * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12135446Strhodes * version 2 for more details (a copy is included in the LICENSE file that
13135446Strhodes * accompanied this code).
14135446Strhodes *
15135446Strhodes * You should have received a copy of the GNU General Public License version
16135446Strhodes * 2 along with this work; if not, write to the Free Software Foundation,
17135446Strhodes * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18234010Sdougb *
19135446Strhodes * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20170222Sdougb * or visit www.oracle.com if you need additional information or have any
21170222Sdougb * questions.
22135446Strhodes *
23135446Strhodes */
24135446Strhodes
25135446Strhodes#include "precompiled.hpp"
26135446Strhodes#include "gc/g1/concurrentG1Refine.hpp"
27135446Strhodes#include "gc/g1/g1BlockOffsetTable.inline.hpp"
28135446Strhodes#include "gc/g1/g1CollectedHeap.inline.hpp"
29135446Strhodes#include "gc/g1/g1CardLiveData.inline.hpp"
30135446Strhodes#include "gc/g1/heapRegionManager.inline.hpp"
31135446Strhodes#include "gc/g1/heapRegionRemSet.hpp"
32135446Strhodes#include "gc/shared/space.inline.hpp"
33135446Strhodes#include "memory/allocation.hpp"
34135446Strhodes#include "memory/padded.inline.hpp"
35135446Strhodes#include "oops/oop.inline.hpp"
36135446Strhodes#include "runtime/atomic.hpp"
37135446Strhodes#include "utilities/bitMap.inline.hpp"
38135446Strhodes#include "utilities/globalDefinitions.hpp"
39135446Strhodes#include "utilities/growableArray.hpp"
40135446Strhodes
41135446Strhodesclass PerRegionTable: public CHeapObj<mtGC> {
42135446Strhodes  friend class OtherRegionsTable;
43135446Strhodes  friend class HeapRegionRemSetIterator;
44135446Strhodes
45135446Strhodes  HeapRegion*     _hr;
46135446Strhodes  CHeapBitMap     _bm;
47135446Strhodes  jint            _occupied;
48135446Strhodes
49135446Strhodes  // next pointer for free/allocated 'all' list
50135446Strhodes  PerRegionTable* _next;
51135446Strhodes
52135446Strhodes  // prev pointer for the allocated 'all' list
53135446Strhodes  PerRegionTable* _prev;
54135446Strhodes
55135446Strhodes  // next pointer in collision list
56135446Strhodes  PerRegionTable * _collision_list_next;
57135446Strhodes
58135446Strhodes  // Global free list of PRTs
59135446Strhodes  static PerRegionTable* _free_list;
60135446Strhodes
61135446Strhodesprotected:
62135446Strhodes  // We need access in order to union things into the base table.
63135446Strhodes  BitMap* bm() { return &_bm; }
64135446Strhodes
65  void recount_occupied() {
66    _occupied = (jint) bm()->count_one_bits();
67  }
68
69  PerRegionTable(HeapRegion* hr) :
70    _hr(hr),
71    _occupied(0),
72    _bm(HeapRegion::CardsPerRegion),
73    _collision_list_next(NULL), _next(NULL), _prev(NULL)
74  {}
75
76  void add_card_work(CardIdx_t from_card, bool par) {
77    if (!_bm.at(from_card)) {
78      if (par) {
79        if (_bm.par_at_put(from_card, 1)) {
80          Atomic::inc(&_occupied);
81        }
82      } else {
83        _bm.at_put(from_card, 1);
84        _occupied++;
85      }
86    }
87  }
88
89  void add_reference_work(OopOrNarrowOopStar from, bool par) {
90    // Must make this robust in case "from" is not in "_hr", because of
91    // concurrency.
92
93    HeapRegion* loc_hr = hr();
94    // If the test below fails, then this table was reused concurrently
95    // with this operation.  This is OK, since the old table was coarsened,
96    // and adding a bit to the new table is never incorrect.
97    // If the table used to belong to a continues humongous region and is
98    // now reused for the corresponding start humongous region, we need to
99    // make sure that we detect this. Thus, we call is_in_reserved_raw()
100    // instead of just is_in_reserved() here.
101    if (loc_hr->is_in_reserved(from)) {
102      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
103      CardIdx_t from_card = (CardIdx_t)
104          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
105
106      assert((size_t)from_card < HeapRegion::CardsPerRegion,
107             "Must be in range.");
108      add_card_work(from_card, par);
109    }
110  }
111
112public:
113
114  HeapRegion* hr() const {
115    return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
116  }
117
118  jint occupied() const {
119    // Overkill, but if we ever need it...
120    // guarantee(_occupied == _bm.count_one_bits(), "Check");
121    return _occupied;
122  }
123
124  void init(HeapRegion* hr, bool clear_links_to_all_list) {
125    if (clear_links_to_all_list) {
126      set_next(NULL);
127      set_prev(NULL);
128    }
129    _collision_list_next = NULL;
130    _occupied = 0;
131    _bm.clear();
132    // Make sure that the bitmap clearing above has been finished before publishing
133    // this PRT to concurrent threads.
134    OrderAccess::release_store_ptr(&_hr, hr);
135  }
136
137  void add_reference(OopOrNarrowOopStar from) {
138    add_reference_work(from, /*parallel*/ true);
139  }
140
141  void seq_add_reference(OopOrNarrowOopStar from) {
142    add_reference_work(from, /*parallel*/ false);
143  }
144
145  void scrub(G1CardLiveData* live_data) {
146    live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
147    recount_occupied();
148  }
149
150  void add_card(CardIdx_t from_card_index) {
151    add_card_work(from_card_index, /*parallel*/ true);
152  }
153
154  void seq_add_card(CardIdx_t from_card_index) {
155    add_card_work(from_card_index, /*parallel*/ false);
156  }
157
158  // (Destructively) union the bitmap of the current table into the given
159  // bitmap (which is assumed to be of the same size.)
160  void union_bitmap_into(BitMap* bm) {
161    bm->set_union(_bm);
162  }
163
164  // Mem size in bytes.
165  size_t mem_size() const {
166    return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
167  }
168
169  // Requires "from" to be in "hr()".
170  bool contains_reference(OopOrNarrowOopStar from) const {
171    assert(hr()->is_in_reserved(from), "Precondition.");
172    size_t card_ind = pointer_delta(from, hr()->bottom(),
173                                    CardTableModRefBS::card_size);
174    return _bm.at(card_ind);
175  }
176
177  // Bulk-free the PRTs from prt to last, assumes that they are
178  // linked together using their _next field.
179  static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
180    while (true) {
181      PerRegionTable* fl = _free_list;
182      last->set_next(fl);
183      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
184      if (res == fl) {
185        return;
186      }
187    }
188    ShouldNotReachHere();
189  }
190
191  static void free(PerRegionTable* prt) {
192    bulk_free(prt, prt);
193  }
194
195  // Returns an initialized PerRegionTable instance.
196  static PerRegionTable* alloc(HeapRegion* hr) {
197    PerRegionTable* fl = _free_list;
198    while (fl != NULL) {
199      PerRegionTable* nxt = fl->next();
200      PerRegionTable* res =
201        (PerRegionTable*)
202        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
203      if (res == fl) {
204        fl->init(hr, true);
205        return fl;
206      } else {
207        fl = _free_list;
208      }
209    }
210    assert(fl == NULL, "Loop condition.");
211    return new PerRegionTable(hr);
212  }
213
214  PerRegionTable* next() const { return _next; }
215  void set_next(PerRegionTable* next) { _next = next; }
216  PerRegionTable* prev() const { return _prev; }
217  void set_prev(PerRegionTable* prev) { _prev = prev; }
218
219  // Accessor and Modification routines for the pointer for the
220  // singly linked collision list that links the PRTs within the
221  // OtherRegionsTable::_fine_grain_regions hash table.
222  //
223  // It might be useful to also make the collision list doubly linked
224  // to avoid iteration over the collisions list during scrubbing/deletion.
225  // OTOH there might not be many collisions.
226
227  PerRegionTable* collision_list_next() const {
228    return _collision_list_next;
229  }
230
231  void set_collision_list_next(PerRegionTable* next) {
232    _collision_list_next = next;
233  }
234
235  PerRegionTable** collision_list_next_addr() {
236    return &_collision_list_next;
237  }
238
239  static size_t fl_mem_size() {
240    PerRegionTable* cur = _free_list;
241    size_t res = 0;
242    while (cur != NULL) {
243      res += cur->mem_size();
244      cur = cur->next();
245    }
246    return res;
247  }
248
249  static void test_fl_mem_size();
250};
251
252PerRegionTable* PerRegionTable::_free_list = NULL;
253
254size_t OtherRegionsTable::_max_fine_entries = 0;
255size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
256size_t OtherRegionsTable::_fine_eviction_stride = 0;
257size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
258
259OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
260  _g1h(G1CollectedHeap::heap()),
261  _hr(hr), _m(m),
262  _coarse_map(G1CollectedHeap::heap()->max_regions()),
263  _fine_grain_regions(NULL),
264  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
265  _n_fine_entries(0), _n_coarse_entries(0),
266  _fine_eviction_start(0),
267  _sparse_table(hr)
268{
269  typedef PerRegionTable* PerRegionTablePtr;
270
271  if (_max_fine_entries == 0) {
272    assert(_mod_max_fine_entries_mask == 0, "Both or none.");
273    size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
274    _max_fine_entries = (size_t)1 << max_entries_log;
275    _mod_max_fine_entries_mask = _max_fine_entries - 1;
276
277    assert(_fine_eviction_sample_size == 0
278           && _fine_eviction_stride == 0, "All init at same time.");
279    _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
280    _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
281  }
282
283  _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
284                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
285
286  if (_fine_grain_regions == NULL) {
287    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
288                          "Failed to allocate _fine_grain_entries.");
289  }
290
291  for (size_t i = 0; i < _max_fine_entries; i++) {
292    _fine_grain_regions[i] = NULL;
293  }
294}
295
296void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
297  // We always append to the beginning of the list for convenience;
298  // the order of entries in this list does not matter.
299  if (_first_all_fine_prts != NULL) {
300    assert(_first_all_fine_prts->prev() == NULL, "invariant");
301    _first_all_fine_prts->set_prev(prt);
302    prt->set_next(_first_all_fine_prts);
303  } else {
304    // this is the first element we insert. Adjust the "last" pointer
305    _last_all_fine_prts = prt;
306    assert(prt->next() == NULL, "just checking");
307  }
308  // the new element is always the first element without a predecessor
309  prt->set_prev(NULL);
310  _first_all_fine_prts = prt;
311
312  assert(prt->prev() == NULL, "just checking");
313  assert(_first_all_fine_prts == prt, "just checking");
314  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
315         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
316         "just checking");
317  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
318         "just checking");
319  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
320         "just checking");
321}
322
323void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
324  if (prt->prev() != NULL) {
325    assert(_first_all_fine_prts != prt, "just checking");
326    prt->prev()->set_next(prt->next());
327    // removing the last element in the list?
328    if (_last_all_fine_prts == prt) {
329      _last_all_fine_prts = prt->prev();
330    }
331  } else {
332    assert(_first_all_fine_prts == prt, "just checking");
333    _first_all_fine_prts = prt->next();
334    // list is empty now?
335    if (_first_all_fine_prts == NULL) {
336      _last_all_fine_prts = NULL;
337    }
338  }
339
340  if (prt->next() != NULL) {
341    prt->next()->set_prev(prt->prev());
342  }
343
344  prt->set_next(NULL);
345  prt->set_prev(NULL);
346
347  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
348         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
349         "just checking");
350  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
351         "just checking");
352  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
353         "just checking");
354}
355
356void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
357  uint cur_hrm_ind = _hr->hrm_index();
358
359  int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
360
361  if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
362    assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
363    return;
364  }
365
366  // Note that this may be a continued H region.
367  HeapRegion* from_hr = _g1h->heap_region_containing(from);
368  RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
369
370  // If the region is already coarsened, return.
371  if (_coarse_map.at(from_hrm_ind)) {
372    assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
373    return;
374  }
375
376  // Otherwise find a per-region table to add it to.
377  size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
378  PerRegionTable* prt = find_region_table(ind, from_hr);
379  if (prt == NULL) {
380    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
381    // Confirm that it's really not there...
382    prt = find_region_table(ind, from_hr);
383    if (prt == NULL) {
384
385      uintptr_t from_hr_bot_card_index =
386        uintptr_t(from_hr->bottom())
387          >> CardTableModRefBS::card_shift;
388      CardIdx_t card_index = from_card - from_hr_bot_card_index;
389      assert((size_t)card_index < HeapRegion::CardsPerRegion,
390             "Must be in range.");
391      if (G1HRRSUseSparseTable &&
392          _sparse_table.add_card(from_hrm_ind, card_index)) {
393        assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
394        return;
395      }
396
397      if (_n_fine_entries == _max_fine_entries) {
398        prt = delete_region_table();
399        // There is no need to clear the links to the 'all' list here:
400        // prt will be reused immediately, i.e. remain in the 'all' list.
401        prt->init(from_hr, false /* clear_links_to_all_list */);
402      } else {
403        prt = PerRegionTable::alloc(from_hr);
404        link_to_all(prt);
405      }
406
407      PerRegionTable* first_prt = _fine_grain_regions[ind];
408      prt->set_collision_list_next(first_prt);
409      // The assignment into _fine_grain_regions allows the prt to
410      // start being used concurrently. In addition to
411      // collision_list_next which must be visible (else concurrent
412      // parsing of the list, if any, may fail to see other entries),
413      // the content of the prt must be visible (else for instance
414      // some mark bits may not yet seem cleared or a 'later' update
415      // performed by a concurrent thread could be undone when the
416      // zeroing becomes visible). This requires store ordering.
417      OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
418      _n_fine_entries++;
419
420      if (G1HRRSUseSparseTable) {
421        // Transfer from sparse to fine-grain.
422        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
423        assert(sprt_entry != NULL, "There should have been an entry");
424        for (int i = 0; i < sprt_entry->num_valid_cards(); i++) {
425          CardIdx_t c = sprt_entry->card(i);
426          prt->add_card(c);
427        }
428        // Now we can delete the sparse entry.
429        bool res = _sparse_table.delete_entry(from_hrm_ind);
430        assert(res, "It should have been there.");
431      }
432    }
433    assert(prt != NULL && prt->hr() == from_hr, "consequence");
434  }
435  // Note that we can't assert "prt->hr() == from_hr", because of the
436  // possibility of concurrent reuse.  But see head comment of
437  // OtherRegionsTable for why this is OK.
438  assert(prt != NULL, "Inv");
439
440  prt->add_reference(from);
441  assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from));
442}
443
444PerRegionTable*
445OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
446  assert(ind < _max_fine_entries, "Preconditions.");
447  PerRegionTable* prt = _fine_grain_regions[ind];
448  while (prt != NULL && prt->hr() != hr) {
449    prt = prt->collision_list_next();
450  }
451  // Loop postcondition is the method postcondition.
452  return prt;
453}
454
455jint OtherRegionsTable::_n_coarsenings = 0;
456
457PerRegionTable* OtherRegionsTable::delete_region_table() {
458  assert(_m->owned_by_self(), "Precondition");
459  assert(_n_fine_entries == _max_fine_entries, "Precondition");
460  PerRegionTable* max = NULL;
461  jint max_occ = 0;
462  PerRegionTable** max_prev = NULL;
463  size_t max_ind;
464
465  size_t i = _fine_eviction_start;
466  for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
467    size_t ii = i;
468    // Make sure we get a non-NULL sample.
469    while (_fine_grain_regions[ii] == NULL) {
470      ii++;
471      if (ii == _max_fine_entries) ii = 0;
472      guarantee(ii != i, "We must find one.");
473    }
474    PerRegionTable** prev = &_fine_grain_regions[ii];
475    PerRegionTable* cur = *prev;
476    while (cur != NULL) {
477      jint cur_occ = cur->occupied();
478      if (max == NULL || cur_occ > max_occ) {
479        max = cur;
480        max_prev = prev;
481        max_ind = i;
482        max_occ = cur_occ;
483      }
484      prev = cur->collision_list_next_addr();
485      cur = cur->collision_list_next();
486    }
487    i = i + _fine_eviction_stride;
488    if (i >= _n_fine_entries) i = i - _n_fine_entries;
489  }
490
491  _fine_eviction_start++;
492
493  if (_fine_eviction_start >= _n_fine_entries) {
494    _fine_eviction_start -= _n_fine_entries;
495  }
496
497  guarantee(max != NULL, "Since _n_fine_entries > 0");
498  guarantee(max_prev != NULL, "Since max != NULL.");
499
500  // Set the corresponding coarse bit.
501  size_t max_hrm_index = (size_t) max->hr()->hrm_index();
502  if (!_coarse_map.at(max_hrm_index)) {
503    _coarse_map.at_put(max_hrm_index, true);
504    _n_coarse_entries++;
505  }
506
507  // Unsplice.
508  *max_prev = max->collision_list_next();
509  Atomic::inc(&_n_coarsenings);
510  _n_fine_entries--;
511  return max;
512}
513
514void OtherRegionsTable::scrub(G1CardLiveData* live_data) {
515  // First eliminated garbage regions from the coarse map.
516  log_develop_trace(gc, remset, scrub)("Scrubbing region %u:", _hr->hrm_index());
517
518  log_develop_trace(gc, remset, scrub)("   Coarse map: before = " SIZE_FORMAT "...", _n_coarse_entries);
519  if (_n_coarse_entries > 0) {
520    live_data->remove_nonlive_regions(&_coarse_map);
521    _n_coarse_entries = _coarse_map.count_one_bits();
522  }
523  log_develop_trace(gc, remset, scrub)("   after = " SIZE_FORMAT ".", _n_coarse_entries);
524
525  // Now do the fine-grained maps.
526  for (size_t i = 0; i < _max_fine_entries; i++) {
527    PerRegionTable* cur = _fine_grain_regions[i];
528    PerRegionTable** prev = &_fine_grain_regions[i];
529    while (cur != NULL) {
530      PerRegionTable* nxt = cur->collision_list_next();
531      // If the entire region is dead, eliminate.
532      log_develop_trace(gc, remset, scrub)("     For other region %u:", cur->hr()->hrm_index());
533      if (!live_data->is_region_live(cur->hr()->hrm_index())) {
534        *prev = nxt;
535        cur->set_collision_list_next(NULL);
536        _n_fine_entries--;
537        log_develop_trace(gc, remset, scrub)("          deleted via region map.");
538        unlink_from_all(cur);
539        PerRegionTable::free(cur);
540      } else {
541        // Do fine-grain elimination.
542        log_develop_trace(gc, remset, scrub)("          occ: before = %4d.", cur->occupied());
543        cur->scrub(live_data);
544        log_develop_trace(gc, remset, scrub)("          after = %4d.", cur->occupied());
545        // Did that empty the table completely?
546        if (cur->occupied() == 0) {
547          *prev = nxt;
548          cur->set_collision_list_next(NULL);
549          _n_fine_entries--;
550          unlink_from_all(cur);
551          PerRegionTable::free(cur);
552        } else {
553          prev = cur->collision_list_next_addr();
554        }
555      }
556      cur = nxt;
557    }
558  }
559  // Since we may have deleted a from_card_cache entry from the RS, clear
560  // the FCC.
561  clear_fcc();
562}
563
564bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
565  if (limit <= (size_t)G1RSetSparseRegionEntries) {
566    return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
567  } else {
568    // Current uses of this method may only use values less than G1RSetSparseRegionEntries
569    // for the limit. The solution, comparing against occupied() would be too slow
570    // at this time.
571    Unimplemented();
572    return false;
573  }
574}
575
576bool OtherRegionsTable::is_empty() const {
577  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
578}
579
580size_t OtherRegionsTable::occupied() const {
581  size_t sum = occ_fine();
582  sum += occ_sparse();
583  sum += occ_coarse();
584  return sum;
585}
586
587size_t OtherRegionsTable::occ_fine() const {
588  size_t sum = 0;
589
590  size_t num = 0;
591  PerRegionTable * cur = _first_all_fine_prts;
592  while (cur != NULL) {
593    sum += cur->occupied();
594    cur = cur->next();
595    num++;
596  }
597  guarantee(num == _n_fine_entries, "just checking");
598  return sum;
599}
600
601size_t OtherRegionsTable::occ_coarse() const {
602  return (_n_coarse_entries * HeapRegion::CardsPerRegion);
603}
604
605size_t OtherRegionsTable::occ_sparse() const {
606  return _sparse_table.occupied();
607}
608
609size_t OtherRegionsTable::mem_size() const {
610  size_t sum = 0;
611  // all PRTs are of the same size so it is sufficient to query only one of them.
612  if (_first_all_fine_prts != NULL) {
613    assert(_last_all_fine_prts != NULL &&
614      _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
615    sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
616  }
617  sum += (sizeof(PerRegionTable*) * _max_fine_entries);
618  sum += (_coarse_map.size_in_words() * HeapWordSize);
619  sum += (_sparse_table.mem_size());
620  sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
621  return sum;
622}
623
624size_t OtherRegionsTable::static_mem_size() {
625  return G1FromCardCache::static_mem_size();
626}
627
628size_t OtherRegionsTable::fl_mem_size() {
629  return PerRegionTable::fl_mem_size();
630}
631
632void OtherRegionsTable::clear_fcc() {
633  G1FromCardCache::clear(_hr->hrm_index());
634}
635
636void OtherRegionsTable::clear() {
637  // if there are no entries, skip this step
638  if (_first_all_fine_prts != NULL) {
639    guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
640    PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
641    memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
642  } else {
643    guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
644  }
645
646  _first_all_fine_prts = _last_all_fine_prts = NULL;
647  _sparse_table.clear();
648  if (_n_coarse_entries > 0) {
649    _coarse_map.clear();
650  }
651  _n_fine_entries = 0;
652  _n_coarse_entries = 0;
653
654  clear_fcc();
655}
656
657bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
658  // Cast away const in this case.
659  MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
660  return contains_reference_locked(from);
661}
662
663bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
664  HeapRegion* hr = _g1h->heap_region_containing(from);
665  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
666  // Is this region in the coarse map?
667  if (_coarse_map.at(hr_ind)) return true;
668
669  PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
670                                     hr);
671  if (prt != NULL) {
672    return prt->contains_reference(from);
673
674  } else {
675    uintptr_t from_card =
676      (uintptr_t(from) >> CardTableModRefBS::card_shift);
677    uintptr_t hr_bot_card_index =
678      uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
679    assert(from_card >= hr_bot_card_index, "Inv");
680    CardIdx_t card_index = from_card - hr_bot_card_index;
681    assert((size_t)card_index < HeapRegion::CardsPerRegion,
682           "Must be in range.");
683    return _sparse_table.contains_card(hr_ind, card_index);
684  }
685}
686
687void
688OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
689  _sparse_table.do_cleanup_work(hrrs_cleanup_task);
690}
691
692HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
693                                   HeapRegion* hr)
694  : _bot(bot),
695    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
696    _code_roots(),
697    _other_regions(hr, &_m) {
698}
699
700void HeapRegionRemSet::setup_remset_size() {
701  // Setup sparse and fine-grain tables sizes.
702  // table_size = base * (log(region_size / 1M) + 1)
703  const int LOG_M = 20;
704  int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
705  if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
706    G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
707  }
708  if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
709    G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
710  }
711  guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
712}
713
714#ifndef PRODUCT
715void HeapRegionRemSet::print() {
716  HeapRegionRemSetIterator iter(this);
717  size_t card_index;
718  while (iter.has_next(card_index)) {
719    HeapWord* card_start = _bot->address_for_index(card_index);
720    tty->print_cr("  Card " PTR_FORMAT, p2i(card_start));
721  }
722  if (iter.n_yielded() != occupied()) {
723    tty->print_cr("Yielded disagrees with occupied:");
724    tty->print_cr("  " SIZE_FORMAT_W(6) " yielded (" SIZE_FORMAT_W(6)
725                  " coarse, " SIZE_FORMAT_W(6) " fine).",
726                  iter.n_yielded(),
727                  iter.n_yielded_coarse(), iter.n_yielded_fine());
728    tty->print_cr("  " SIZE_FORMAT_W(6) " occ     (" SIZE_FORMAT_W(6)
729                           " coarse, " SIZE_FORMAT_W(6) " fine).",
730                  occupied(), occ_coarse(), occ_fine());
731  }
732  guarantee(iter.n_yielded() == occupied(),
733            "We should have yielded all the represented cards.");
734}
735#endif
736
737void HeapRegionRemSet::cleanup() {
738  SparsePRT::cleanup_all();
739}
740
741void HeapRegionRemSet::clear() {
742  MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
743  clear_locked();
744}
745
746void HeapRegionRemSet::clear_locked() {
747  _code_roots.clear();
748  _other_regions.clear();
749  assert(occupied_locked() == 0, "Should be clear.");
750}
751
752void HeapRegionRemSet::scrub(G1CardLiveData* live_data) {
753  _other_regions.scrub(live_data);
754}
755
756// Code roots support
757//
758// The code root set is protected by two separate locking schemes
759// When at safepoint the per-hrrs lock must be held during modifications
760// except when doing a full gc.
761// When not at safepoint the CodeCache_lock must be held during modifications.
762// When concurrent readers access the contains() function
763// (during the evacuation phase) no removals are allowed.
764
765void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
766  assert(nm != NULL, "sanity");
767  assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
768          "should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
769          BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()));
770  // Optimistic unlocked contains-check
771  if (!_code_roots.contains(nm)) {
772    MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
773    add_strong_code_root_locked(nm);
774  }
775}
776
777void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
778  assert(nm != NULL, "sanity");
779  assert((CodeCache_lock->owned_by_self() ||
780         (SafepointSynchronize::is_at_safepoint() &&
781          (_m.owned_by_self() || Thread::current()->is_VM_thread()))),
782          "not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
783          BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
784          BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread()));
785  _code_roots.add(nm);
786}
787
788void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
789  assert(nm != NULL, "sanity");
790  assert_locked_or_safepoint(CodeCache_lock);
791
792  MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
793  _code_roots.remove(nm);
794
795  // Check that there were no duplicates
796  guarantee(!_code_roots.contains(nm), "duplicate entry found");
797}
798
799void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
800  _code_roots.nmethods_do(blk);
801}
802
803void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
804  _code_roots.clean(hr);
805}
806
807size_t HeapRegionRemSet::strong_code_roots_mem_size() {
808  return _code_roots.mem_size();
809}
810
811HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
812  _hrrs(hrrs),
813  _g1h(G1CollectedHeap::heap()),
814  _coarse_map(&hrrs->_other_regions._coarse_map),
815  _bot(hrrs->_bot),
816  _is(Sparse),
817  // Set these values so that we increment to the first region.
818  _coarse_cur_region_index(-1),
819  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
820  _cur_card_in_prt(HeapRegion::CardsPerRegion),
821  _fine_cur_prt(NULL),
822  _n_yielded_coarse(0),
823  _n_yielded_fine(0),
824  _n_yielded_sparse(0),
825  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
826
827bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
828  if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
829  // Go to the next card.
830  _coarse_cur_region_cur_card++;
831  // Was the last the last card in the current region?
832  if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
833    // Yes: find the next region.  This may leave _coarse_cur_region_index
834    // Set to the last index, in which case there are no more coarse
835    // regions.
836    _coarse_cur_region_index =
837      (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
838    if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
839      _coarse_cur_region_cur_card = 0;
840      HeapWord* r_bot =
841        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
842      _cur_region_card_offset = _bot->index_for(r_bot);
843    } else {
844      return false;
845    }
846  }
847  // If we didn't return false above, then we can yield a card.
848  card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
849  return true;
850}
851
852bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
853  if (fine_has_next()) {
854    _cur_card_in_prt =
855      _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
856  }
857  if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
858    // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
859    // the remembered set.
860    if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
861      return false;
862    }
863    PerRegionTable* next_prt = _fine_cur_prt->next();
864    switch_to_prt(next_prt);
865    _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
866  }
867
868  card_index = _cur_region_card_offset + _cur_card_in_prt;
869  guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
870            "Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt);
871  return true;
872}
873
874bool HeapRegionRemSetIterator::fine_has_next() {
875  return _cur_card_in_prt != HeapRegion::CardsPerRegion;
876}
877
878void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
879  assert(prt != NULL, "Cannot switch to NULL prt");
880  _fine_cur_prt = prt;
881
882  HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
883  _cur_region_card_offset = _bot->index_for(r_bot);
884
885  // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
886  // To avoid special-casing this start case, and not miss the first bitmap
887  // entry, initialize _cur_region_cur_card with -1 instead of 0.
888  _cur_card_in_prt = (size_t)-1;
889}
890
891bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
892  switch (_is) {
893  case Sparse: {
894    if (_sparse_iter.has_next(card_index)) {
895      _n_yielded_sparse++;
896      return true;
897    }
898    // Otherwise, deliberate fall-through
899    _is = Fine;
900    PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
901    if (initial_fine_prt != NULL) {
902      switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
903    }
904  }
905  case Fine:
906    if (fine_has_next(card_index)) {
907      _n_yielded_fine++;
908      return true;
909    }
910    // Otherwise, deliberate fall-through
911    _is = Coarse;
912  case Coarse:
913    if (coarse_has_next(card_index)) {
914      _n_yielded_coarse++;
915      return true;
916    }
917    // Otherwise...
918    break;
919  }
920  assert(ParallelGCThreads > 1 ||
921         n_yielded() == _hrrs->occupied(),
922         "Should have yielded all the cards in the rem set "
923         "(in the non-par case).");
924  return false;
925}
926
927void HeapRegionRemSet::reset_for_cleanup_tasks() {
928  SparsePRT::reset_for_cleanup_tasks();
929}
930
931void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
932  _other_regions.do_cleanup_work(hrrs_cleanup_task);
933}
934
935void
936HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
937  SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
938}
939
940#ifndef PRODUCT
941void HeapRegionRemSet::test() {
942  os::sleep(Thread::current(), (jlong)5000, false);
943  G1CollectedHeap* g1h = G1CollectedHeap::heap();
944
945  // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
946  // hash bucket.
947  HeapRegion* hr0 = g1h->region_at(0);
948  HeapRegion* hr1 = g1h->region_at(1);
949  HeapRegion* hr2 = g1h->region_at(5);
950  HeapRegion* hr3 = g1h->region_at(6);
951  HeapRegion* hr4 = g1h->region_at(7);
952  HeapRegion* hr5 = g1h->region_at(8);
953
954  HeapWord* hr1_start = hr1->bottom();
955  HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
956  HeapWord* hr1_last = hr1->end() - 1;
957
958  HeapWord* hr2_start = hr2->bottom();
959  HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
960  HeapWord* hr2_last = hr2->end() - 1;
961
962  HeapWord* hr3_start = hr3->bottom();
963  HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
964  HeapWord* hr3_last = hr3->end() - 1;
965
966  HeapRegionRemSet* hrrs = hr0->rem_set();
967
968  // Make three references from region 0x101...
969  hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
970  hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
971  hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
972
973  hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
974  hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
975  hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
976
977  hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
978  hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
979  hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
980
981  // Now cause a coarsening.
982  hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
983  hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
984
985  // Now, does iteration yield these three?
986  HeapRegionRemSetIterator iter(hrrs);
987  size_t sum = 0;
988  size_t card_index;
989  while (iter.has_next(card_index)) {
990    HeapWord* card_start =
991      G1CollectedHeap::heap()->bot()->address_for_index(card_index);
992    tty->print_cr("  Card " PTR_FORMAT ".", p2i(card_start));
993    sum++;
994  }
995  guarantee(sum == 11 - 3 + 2048, "Failure");
996  guarantee(sum == hrrs->occupied(), "Failure");
997}
998#endif
999