1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/concurrentG1Refine.hpp"
27#include "gc/g1/g1BlockOffsetTable.inline.hpp"
28#include "gc/g1/g1CollectedHeap.inline.hpp"
29#include "gc/g1/g1CardLiveData.inline.hpp"
30#include "gc/g1/heapRegionManager.inline.hpp"
31#include "gc/g1/heapRegionRemSet.hpp"
32#include "gc/shared/space.inline.hpp"
33#include "memory/allocation.hpp"
34#include "memory/padded.inline.hpp"
35#include "oops/oop.inline.hpp"
36#include "runtime/atomic.hpp"
37#include "utilities/bitMap.inline.hpp"
38#include "utilities/debug.hpp"
39#include "utilities/formatBuffer.hpp"
40#include "utilities/globalDefinitions.hpp"
41#include "utilities/growableArray.hpp"
42
43class PerRegionTable: public CHeapObj<mtGC> {
44  friend class OtherRegionsTable;
45  friend class HeapRegionRemSetIterator;
46
47  HeapRegion*     _hr;
48  CHeapBitMap     _bm;
49  jint            _occupied;
50
51  // next pointer for free/allocated 'all' list
52  PerRegionTable* _next;
53
54  // prev pointer for the allocated 'all' list
55  PerRegionTable* _prev;
56
57  // next pointer in collision list
58  PerRegionTable * _collision_list_next;
59
60  // Global free list of PRTs
61  static PerRegionTable* volatile _free_list;
62
63protected:
64  // We need access in order to union things into the base table.
65  BitMap* bm() { return &_bm; }
66
67  void recount_occupied() {
68    _occupied = (jint) bm()->count_one_bits();
69  }
70
71  PerRegionTable(HeapRegion* hr) :
72    _hr(hr),
73    _occupied(0),
74    _bm(HeapRegion::CardsPerRegion, mtGC),
75    _collision_list_next(NULL), _next(NULL), _prev(NULL)
76  {}
77
78  void add_card_work(CardIdx_t from_card, bool par) {
79    if (!_bm.at(from_card)) {
80      if (par) {
81        if (_bm.par_at_put(from_card, 1)) {
82          Atomic::inc(&_occupied);
83        }
84      } else {
85        _bm.at_put(from_card, 1);
86        _occupied++;
87      }
88    }
89  }
90
91  void add_reference_work(OopOrNarrowOopStar from, bool par) {
92    // Must make this robust in case "from" is not in "_hr", because of
93    // concurrency.
94
95    HeapRegion* loc_hr = hr();
96    // If the test below fails, then this table was reused concurrently
97    // with this operation.  This is OK, since the old table was coarsened,
98    // and adding a bit to the new table is never incorrect.
99    // If the table used to belong to a continues humongous region and is
100    // now reused for the corresponding start humongous region, we need to
101    // make sure that we detect this. Thus, we call is_in_reserved_raw()
102    // instead of just is_in_reserved() here.
103    if (loc_hr->is_in_reserved(from)) {
104      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
105      CardIdx_t from_card = (CardIdx_t)
106          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
107
108      assert((size_t)from_card < HeapRegion::CardsPerRegion,
109             "Must be in range.");
110      add_card_work(from_card, par);
111    }
112  }
113
114public:
115
116  HeapRegion* hr() const {
117    return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
118  }
119
120  jint occupied() const {
121    // Overkill, but if we ever need it...
122    // guarantee(_occupied == _bm.count_one_bits(), "Check");
123    return _occupied;
124  }
125
126  void init(HeapRegion* hr, bool clear_links_to_all_list) {
127    if (clear_links_to_all_list) {
128      set_next(NULL);
129      set_prev(NULL);
130    }
131    _collision_list_next = NULL;
132    _occupied = 0;
133    _bm.clear();
134    // Make sure that the bitmap clearing above has been finished before publishing
135    // this PRT to concurrent threads.
136    OrderAccess::release_store_ptr(&_hr, hr);
137  }
138
139  void add_reference(OopOrNarrowOopStar from) {
140    add_reference_work(from, /*parallel*/ true);
141  }
142
143  void seq_add_reference(OopOrNarrowOopStar from) {
144    add_reference_work(from, /*parallel*/ false);
145  }
146
147  void scrub(G1CardLiveData* live_data) {
148    live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
149    recount_occupied();
150  }
151
152  void add_card(CardIdx_t from_card_index) {
153    add_card_work(from_card_index, /*parallel*/ true);
154  }
155
156  void seq_add_card(CardIdx_t from_card_index) {
157    add_card_work(from_card_index, /*parallel*/ false);
158  }
159
160  // (Destructively) union the bitmap of the current table into the given
161  // bitmap (which is assumed to be of the same size.)
162  void union_bitmap_into(BitMap* bm) {
163    bm->set_union(_bm);
164  }
165
166  // Mem size in bytes.
167  size_t mem_size() const {
168    return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
169  }
170
171  // Requires "from" to be in "hr()".
172  bool contains_reference(OopOrNarrowOopStar from) const {
173    assert(hr()->is_in_reserved(from), "Precondition.");
174    size_t card_ind = pointer_delta(from, hr()->bottom(),
175                                    CardTableModRefBS::card_size);
176    return _bm.at(card_ind);
177  }
178
179  // Bulk-free the PRTs from prt to last, assumes that they are
180  // linked together using their _next field.
181  static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
182    while (true) {
183      PerRegionTable* fl = _free_list;
184      last->set_next(fl);
185      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
186      if (res == fl) {
187        return;
188      }
189    }
190    ShouldNotReachHere();
191  }
192
193  static void free(PerRegionTable* prt) {
194    bulk_free(prt, prt);
195  }
196
197  // Returns an initialized PerRegionTable instance.
198  static PerRegionTable* alloc(HeapRegion* hr) {
199    PerRegionTable* fl = _free_list;
200    while (fl != NULL) {
201      PerRegionTable* nxt = fl->next();
202      PerRegionTable* res =
203        (PerRegionTable*)
204        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
205      if (res == fl) {
206        fl->init(hr, true);
207        return fl;
208      } else {
209        fl = _free_list;
210      }
211    }
212    assert(fl == NULL, "Loop condition.");
213    return new PerRegionTable(hr);
214  }
215
216  PerRegionTable* next() const { return _next; }
217  void set_next(PerRegionTable* next) { _next = next; }
218  PerRegionTable* prev() const { return _prev; }
219  void set_prev(PerRegionTable* prev) { _prev = prev; }
220
221  // Accessor and Modification routines for the pointer for the
222  // singly linked collision list that links the PRTs within the
223  // OtherRegionsTable::_fine_grain_regions hash table.
224  //
225  // It might be useful to also make the collision list doubly linked
226  // to avoid iteration over the collisions list during scrubbing/deletion.
227  // OTOH there might not be many collisions.
228
229  PerRegionTable* collision_list_next() const {
230    return _collision_list_next;
231  }
232
233  void set_collision_list_next(PerRegionTable* next) {
234    _collision_list_next = next;
235  }
236
237  PerRegionTable** collision_list_next_addr() {
238    return &_collision_list_next;
239  }
240
241  static size_t fl_mem_size() {
242    PerRegionTable* cur = _free_list;
243    size_t res = 0;
244    while (cur != NULL) {
245      res += cur->mem_size();
246      cur = cur->next();
247    }
248    return res;
249  }
250
251  static void test_fl_mem_size();
252};
253
254PerRegionTable* volatile PerRegionTable::_free_list = NULL;
255
256size_t OtherRegionsTable::_max_fine_entries = 0;
257size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
258size_t OtherRegionsTable::_fine_eviction_stride = 0;
259size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
260
261OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
262  _g1h(G1CollectedHeap::heap()),
263  _hr(hr), _m(m),
264  _coarse_map(G1CollectedHeap::heap()->max_regions(), mtGC),
265  _fine_grain_regions(NULL),
266  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
267  _n_fine_entries(0), _n_coarse_entries(0),
268  _fine_eviction_start(0),
269  _sparse_table(hr)
270{
271  typedef PerRegionTable* PerRegionTablePtr;
272
273  if (_max_fine_entries == 0) {
274    assert(_mod_max_fine_entries_mask == 0, "Both or none.");
275    size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
276    _max_fine_entries = (size_t)1 << max_entries_log;
277    _mod_max_fine_entries_mask = _max_fine_entries - 1;
278
279    assert(_fine_eviction_sample_size == 0
280           && _fine_eviction_stride == 0, "All init at same time.");
281    _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
282    _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
283  }
284
285  _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
286                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
287
288  if (_fine_grain_regions == NULL) {
289    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
290                          "Failed to allocate _fine_grain_entries.");
291  }
292
293  for (size_t i = 0; i < _max_fine_entries; i++) {
294    _fine_grain_regions[i] = NULL;
295  }
296}
297
298void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
299  // We always append to the beginning of the list for convenience;
300  // the order of entries in this list does not matter.
301  if (_first_all_fine_prts != NULL) {
302    assert(_first_all_fine_prts->prev() == NULL, "invariant");
303    _first_all_fine_prts->set_prev(prt);
304    prt->set_next(_first_all_fine_prts);
305  } else {
306    // this is the first element we insert. Adjust the "last" pointer
307    _last_all_fine_prts = prt;
308    assert(prt->next() == NULL, "just checking");
309  }
310  // the new element is always the first element without a predecessor
311  prt->set_prev(NULL);
312  _first_all_fine_prts = prt;
313
314  assert(prt->prev() == NULL, "just checking");
315  assert(_first_all_fine_prts == prt, "just checking");
316  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
317         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
318         "just checking");
319  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
320         "just checking");
321  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
322         "just checking");
323}
324
325void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
326  if (prt->prev() != NULL) {
327    assert(_first_all_fine_prts != prt, "just checking");
328    prt->prev()->set_next(prt->next());
329    // removing the last element in the list?
330    if (_last_all_fine_prts == prt) {
331      _last_all_fine_prts = prt->prev();
332    }
333  } else {
334    assert(_first_all_fine_prts == prt, "just checking");
335    _first_all_fine_prts = prt->next();
336    // list is empty now?
337    if (_first_all_fine_prts == NULL) {
338      _last_all_fine_prts = NULL;
339    }
340  }
341
342  if (prt->next() != NULL) {
343    prt->next()->set_prev(prt->prev());
344  }
345
346  prt->set_next(NULL);
347  prt->set_prev(NULL);
348
349  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
350         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
351         "just checking");
352  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
353         "just checking");
354  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
355         "just checking");
356}
357
358void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
359  uint cur_hrm_ind = _hr->hrm_index();
360
361  int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
362
363  if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
364    assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
365    return;
366  }
367
368  // Note that this may be a continued H region.
369  HeapRegion* from_hr = _g1h->heap_region_containing(from);
370  RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
371
372  // If the region is already coarsened, return.
373  if (_coarse_map.at(from_hrm_ind)) {
374    assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
375    return;
376  }
377
378  // Otherwise find a per-region table to add it to.
379  size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
380  PerRegionTable* prt = find_region_table(ind, from_hr);
381  if (prt == NULL) {
382    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
383    // Confirm that it's really not there...
384    prt = find_region_table(ind, from_hr);
385    if (prt == NULL) {
386
387      uintptr_t from_hr_bot_card_index =
388        uintptr_t(from_hr->bottom())
389          >> CardTableModRefBS::card_shift;
390      CardIdx_t card_index = from_card - from_hr_bot_card_index;
391      assert((size_t)card_index < HeapRegion::CardsPerRegion,
392             "Must be in range.");
393      if (G1HRRSUseSparseTable &&
394          _sparse_table.add_card(from_hrm_ind, card_index)) {
395        assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
396        return;
397      }
398
399      if (_n_fine_entries == _max_fine_entries) {
400        prt = delete_region_table();
401        // There is no need to clear the links to the 'all' list here:
402        // prt will be reused immediately, i.e. remain in the 'all' list.
403        prt->init(from_hr, false /* clear_links_to_all_list */);
404      } else {
405        prt = PerRegionTable::alloc(from_hr);
406        link_to_all(prt);
407      }
408
409      PerRegionTable* first_prt = _fine_grain_regions[ind];
410      prt->set_collision_list_next(first_prt);
411      // The assignment into _fine_grain_regions allows the prt to
412      // start being used concurrently. In addition to
413      // collision_list_next which must be visible (else concurrent
414      // parsing of the list, if any, may fail to see other entries),
415      // the content of the prt must be visible (else for instance
416      // some mark bits may not yet seem cleared or a 'later' update
417      // performed by a concurrent thread could be undone when the
418      // zeroing becomes visible). This requires store ordering.
419      OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
420      _n_fine_entries++;
421
422      if (G1HRRSUseSparseTable) {
423        // Transfer from sparse to fine-grain.
424        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
425        assert(sprt_entry != NULL, "There should have been an entry");
426        for (int i = 0; i < sprt_entry->num_valid_cards(); i++) {
427          CardIdx_t c = sprt_entry->card(i);
428          prt->add_card(c);
429        }
430        // Now we can delete the sparse entry.
431        bool res = _sparse_table.delete_entry(from_hrm_ind);
432        assert(res, "It should have been there.");
433      }
434    }
435    assert(prt != NULL && prt->hr() == from_hr, "consequence");
436  }
437  // Note that we can't assert "prt->hr() == from_hr", because of the
438  // possibility of concurrent reuse.  But see head comment of
439  // OtherRegionsTable for why this is OK.
440  assert(prt != NULL, "Inv");
441
442  prt->add_reference(from);
443  assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from));
444}
445
446PerRegionTable*
447OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
448  assert(ind < _max_fine_entries, "Preconditions.");
449  PerRegionTable* prt = _fine_grain_regions[ind];
450  while (prt != NULL && prt->hr() != hr) {
451    prt = prt->collision_list_next();
452  }
453  // Loop postcondition is the method postcondition.
454  return prt;
455}
456
457jint OtherRegionsTable::_n_coarsenings = 0;
458
459PerRegionTable* OtherRegionsTable::delete_region_table() {
460  assert(_m->owned_by_self(), "Precondition");
461  assert(_n_fine_entries == _max_fine_entries, "Precondition");
462  PerRegionTable* max = NULL;
463  jint max_occ = 0;
464  PerRegionTable** max_prev = NULL;
465  size_t max_ind;
466
467  size_t i = _fine_eviction_start;
468  for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
469    size_t ii = i;
470    // Make sure we get a non-NULL sample.
471    while (_fine_grain_regions[ii] == NULL) {
472      ii++;
473      if (ii == _max_fine_entries) ii = 0;
474      guarantee(ii != i, "We must find one.");
475    }
476    PerRegionTable** prev = &_fine_grain_regions[ii];
477    PerRegionTable* cur = *prev;
478    while (cur != NULL) {
479      jint cur_occ = cur->occupied();
480      if (max == NULL || cur_occ > max_occ) {
481        max = cur;
482        max_prev = prev;
483        max_ind = i;
484        max_occ = cur_occ;
485      }
486      prev = cur->collision_list_next_addr();
487      cur = cur->collision_list_next();
488    }
489    i = i + _fine_eviction_stride;
490    if (i >= _n_fine_entries) i = i - _n_fine_entries;
491  }
492
493  _fine_eviction_start++;
494
495  if (_fine_eviction_start >= _n_fine_entries) {
496    _fine_eviction_start -= _n_fine_entries;
497  }
498
499  guarantee(max != NULL, "Since _n_fine_entries > 0");
500  guarantee(max_prev != NULL, "Since max != NULL.");
501
502  // Set the corresponding coarse bit.
503  size_t max_hrm_index = (size_t) max->hr()->hrm_index();
504  if (!_coarse_map.at(max_hrm_index)) {
505    _coarse_map.at_put(max_hrm_index, true);
506    _n_coarse_entries++;
507  }
508
509  // Unsplice.
510  *max_prev = max->collision_list_next();
511  Atomic::inc(&_n_coarsenings);
512  _n_fine_entries--;
513  return max;
514}
515
516void OtherRegionsTable::scrub(G1CardLiveData* live_data) {
517  // First eliminated garbage regions from the coarse map.
518  log_develop_trace(gc, remset, scrub)("Scrubbing region %u:", _hr->hrm_index());
519
520  log_develop_trace(gc, remset, scrub)("   Coarse map: before = " SIZE_FORMAT "...", _n_coarse_entries);
521  if (_n_coarse_entries > 0) {
522    live_data->remove_nonlive_regions(&_coarse_map);
523    _n_coarse_entries = _coarse_map.count_one_bits();
524  }
525  log_develop_trace(gc, remset, scrub)("   after = " SIZE_FORMAT ".", _n_coarse_entries);
526
527  // Now do the fine-grained maps.
528  for (size_t i = 0; i < _max_fine_entries; i++) {
529    PerRegionTable* cur = _fine_grain_regions[i];
530    PerRegionTable** prev = &_fine_grain_regions[i];
531    while (cur != NULL) {
532      PerRegionTable* nxt = cur->collision_list_next();
533      // If the entire region is dead, eliminate.
534      log_develop_trace(gc, remset, scrub)("     For other region %u:", cur->hr()->hrm_index());
535      if (!live_data->is_region_live(cur->hr()->hrm_index())) {
536        *prev = nxt;
537        cur->set_collision_list_next(NULL);
538        _n_fine_entries--;
539        log_develop_trace(gc, remset, scrub)("          deleted via region map.");
540        unlink_from_all(cur);
541        PerRegionTable::free(cur);
542      } else {
543        // Do fine-grain elimination.
544        log_develop_trace(gc, remset, scrub)("          occ: before = %4d.", cur->occupied());
545        cur->scrub(live_data);
546        log_develop_trace(gc, remset, scrub)("          after = %4d.", cur->occupied());
547        // Did that empty the table completely?
548        if (cur->occupied() == 0) {
549          *prev = nxt;
550          cur->set_collision_list_next(NULL);
551          _n_fine_entries--;
552          unlink_from_all(cur);
553          PerRegionTable::free(cur);
554        } else {
555          prev = cur->collision_list_next_addr();
556        }
557      }
558      cur = nxt;
559    }
560  }
561  // Since we may have deleted a from_card_cache entry from the RS, clear
562  // the FCC.
563  clear_fcc();
564}
565
566bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
567  if (limit <= (size_t)G1RSetSparseRegionEntries) {
568    return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
569  } else {
570    // Current uses of this method may only use values less than G1RSetSparseRegionEntries
571    // for the limit. The solution, comparing against occupied() would be too slow
572    // at this time.
573    Unimplemented();
574    return false;
575  }
576}
577
578bool OtherRegionsTable::is_empty() const {
579  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
580}
581
582size_t OtherRegionsTable::occupied() const {
583  size_t sum = occ_fine();
584  sum += occ_sparse();
585  sum += occ_coarse();
586  return sum;
587}
588
589size_t OtherRegionsTable::occ_fine() const {
590  size_t sum = 0;
591
592  size_t num = 0;
593  PerRegionTable * cur = _first_all_fine_prts;
594  while (cur != NULL) {
595    sum += cur->occupied();
596    cur = cur->next();
597    num++;
598  }
599  guarantee(num == _n_fine_entries, "just checking");
600  return sum;
601}
602
603size_t OtherRegionsTable::occ_coarse() const {
604  return (_n_coarse_entries * HeapRegion::CardsPerRegion);
605}
606
607size_t OtherRegionsTable::occ_sparse() const {
608  return _sparse_table.occupied();
609}
610
611size_t OtherRegionsTable::mem_size() const {
612  size_t sum = 0;
613  // all PRTs are of the same size so it is sufficient to query only one of them.
614  if (_first_all_fine_prts != NULL) {
615    assert(_last_all_fine_prts != NULL &&
616      _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
617    sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
618  }
619  sum += (sizeof(PerRegionTable*) * _max_fine_entries);
620  sum += (_coarse_map.size_in_words() * HeapWordSize);
621  sum += (_sparse_table.mem_size());
622  sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
623  return sum;
624}
625
626size_t OtherRegionsTable::static_mem_size() {
627  return G1FromCardCache::static_mem_size();
628}
629
630size_t OtherRegionsTable::fl_mem_size() {
631  return PerRegionTable::fl_mem_size();
632}
633
634void OtherRegionsTable::clear_fcc() {
635  G1FromCardCache::clear(_hr->hrm_index());
636}
637
638void OtherRegionsTable::clear() {
639  // if there are no entries, skip this step
640  if (_first_all_fine_prts != NULL) {
641    guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
642    PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
643    memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
644  } else {
645    guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
646  }
647
648  _first_all_fine_prts = _last_all_fine_prts = NULL;
649  _sparse_table.clear();
650  if (_n_coarse_entries > 0) {
651    _coarse_map.clear();
652  }
653  _n_fine_entries = 0;
654  _n_coarse_entries = 0;
655
656  clear_fcc();
657}
658
659bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
660  // Cast away const in this case.
661  MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
662  return contains_reference_locked(from);
663}
664
665bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
666  HeapRegion* hr = _g1h->heap_region_containing(from);
667  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
668  // Is this region in the coarse map?
669  if (_coarse_map.at(hr_ind)) return true;
670
671  PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
672                                     hr);
673  if (prt != NULL) {
674    return prt->contains_reference(from);
675
676  } else {
677    uintptr_t from_card =
678      (uintptr_t(from) >> CardTableModRefBS::card_shift);
679    uintptr_t hr_bot_card_index =
680      uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
681    assert(from_card >= hr_bot_card_index, "Inv");
682    CardIdx_t card_index = from_card - hr_bot_card_index;
683    assert((size_t)card_index < HeapRegion::CardsPerRegion,
684           "Must be in range.");
685    return _sparse_table.contains_card(hr_ind, card_index);
686  }
687}
688
689void
690OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
691  _sparse_table.do_cleanup_work(hrrs_cleanup_task);
692}
693
694HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
695                                   HeapRegion* hr)
696  : _bot(bot),
697    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
698    _code_roots(),
699    _other_regions(hr, &_m) {
700}
701
702void HeapRegionRemSet::setup_remset_size() {
703  // Setup sparse and fine-grain tables sizes.
704  // table_size = base * (log(region_size / 1M) + 1)
705  const int LOG_M = 20;
706  int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
707  if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
708    G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
709  }
710  if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
711    G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
712  }
713  guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
714}
715
716void HeapRegionRemSet::cleanup() {
717  SparsePRT::cleanup_all();
718}
719
720void HeapRegionRemSet::clear() {
721  MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
722  clear_locked();
723}
724
725void HeapRegionRemSet::clear_locked() {
726  _code_roots.clear();
727  _other_regions.clear();
728  assert(occupied_locked() == 0, "Should be clear.");
729}
730
731void HeapRegionRemSet::scrub(G1CardLiveData* live_data) {
732  _other_regions.scrub(live_data);
733}
734
735// Code roots support
736//
737// The code root set is protected by two separate locking schemes
738// When at safepoint the per-hrrs lock must be held during modifications
739// except when doing a full gc.
740// When not at safepoint the CodeCache_lock must be held during modifications.
741// When concurrent readers access the contains() function
742// (during the evacuation phase) no removals are allowed.
743
744void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
745  assert(nm != NULL, "sanity");
746  assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
747          "should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
748          BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()));
749  // Optimistic unlocked contains-check
750  if (!_code_roots.contains(nm)) {
751    MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
752    add_strong_code_root_locked(nm);
753  }
754}
755
756void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
757  assert(nm != NULL, "sanity");
758  assert((CodeCache_lock->owned_by_self() ||
759         (SafepointSynchronize::is_at_safepoint() &&
760          (_m.owned_by_self() || Thread::current()->is_VM_thread()))),
761          "not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
762          BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
763          BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread()));
764  _code_roots.add(nm);
765}
766
767void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
768  assert(nm != NULL, "sanity");
769  assert_locked_or_safepoint(CodeCache_lock);
770
771  MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
772  _code_roots.remove(nm);
773
774  // Check that there were no duplicates
775  guarantee(!_code_roots.contains(nm), "duplicate entry found");
776}
777
778void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
779  _code_roots.nmethods_do(blk);
780}
781
782void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
783  _code_roots.clean(hr);
784}
785
786size_t HeapRegionRemSet::strong_code_roots_mem_size() {
787  return _code_roots.mem_size();
788}
789
790HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
791  _hrrs(hrrs),
792  _g1h(G1CollectedHeap::heap()),
793  _coarse_map(&hrrs->_other_regions._coarse_map),
794  _bot(hrrs->_bot),
795  _is(Sparse),
796  // Set these values so that we increment to the first region.
797  _coarse_cur_region_index(-1),
798  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
799  _cur_card_in_prt(HeapRegion::CardsPerRegion),
800  _fine_cur_prt(NULL),
801  _n_yielded_coarse(0),
802  _n_yielded_fine(0),
803  _n_yielded_sparse(0),
804  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
805
806bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
807  if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
808  // Go to the next card.
809  _coarse_cur_region_cur_card++;
810  // Was the last the last card in the current region?
811  if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
812    // Yes: find the next region.  This may leave _coarse_cur_region_index
813    // Set to the last index, in which case there are no more coarse
814    // regions.
815    _coarse_cur_region_index =
816      (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
817    if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
818      _coarse_cur_region_cur_card = 0;
819      HeapWord* r_bot =
820        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
821      _cur_region_card_offset = _bot->index_for(r_bot);
822    } else {
823      return false;
824    }
825  }
826  // If we didn't return false above, then we can yield a card.
827  card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
828  return true;
829}
830
831bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
832  if (fine_has_next()) {
833    _cur_card_in_prt =
834      _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
835  }
836  if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
837    // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
838    // the remembered set.
839    if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
840      return false;
841    }
842    PerRegionTable* next_prt = _fine_cur_prt->next();
843    switch_to_prt(next_prt);
844    _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
845  }
846
847  card_index = _cur_region_card_offset + _cur_card_in_prt;
848  guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
849            "Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt);
850  return true;
851}
852
853bool HeapRegionRemSetIterator::fine_has_next() {
854  return _cur_card_in_prt != HeapRegion::CardsPerRegion;
855}
856
857void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
858  assert(prt != NULL, "Cannot switch to NULL prt");
859  _fine_cur_prt = prt;
860
861  HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
862  _cur_region_card_offset = _bot->index_for(r_bot);
863
864  // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
865  // To avoid special-casing this start case, and not miss the first bitmap
866  // entry, initialize _cur_region_cur_card with -1 instead of 0.
867  _cur_card_in_prt = (size_t)-1;
868}
869
870bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
871  switch (_is) {
872  case Sparse: {
873    if (_sparse_iter.has_next(card_index)) {
874      _n_yielded_sparse++;
875      return true;
876    }
877    // Otherwise, deliberate fall-through
878    _is = Fine;
879    PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
880    if (initial_fine_prt != NULL) {
881      switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
882    }
883  }
884  case Fine:
885    if (fine_has_next(card_index)) {
886      _n_yielded_fine++;
887      return true;
888    }
889    // Otherwise, deliberate fall-through
890    _is = Coarse;
891  case Coarse:
892    if (coarse_has_next(card_index)) {
893      _n_yielded_coarse++;
894      return true;
895    }
896    // Otherwise...
897    break;
898  }
899  return false;
900}
901
902void HeapRegionRemSet::reset_for_cleanup_tasks() {
903  SparsePRT::reset_for_cleanup_tasks();
904}
905
906void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
907  _other_regions.do_cleanup_work(hrrs_cleanup_task);
908}
909
910void
911HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
912  SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
913}
914
915#ifndef PRODUCT
916void HeapRegionRemSet::test() {
917  os::sleep(Thread::current(), (jlong)5000, false);
918  G1CollectedHeap* g1h = G1CollectedHeap::heap();
919
920  // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
921  // hash bucket.
922  HeapRegion* hr0 = g1h->region_at(0);
923  HeapRegion* hr1 = g1h->region_at(1);
924  HeapRegion* hr2 = g1h->region_at(5);
925  HeapRegion* hr3 = g1h->region_at(6);
926  HeapRegion* hr4 = g1h->region_at(7);
927  HeapRegion* hr5 = g1h->region_at(8);
928
929  HeapWord* hr1_start = hr1->bottom();
930  HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
931  HeapWord* hr1_last = hr1->end() - 1;
932
933  HeapWord* hr2_start = hr2->bottom();
934  HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
935  HeapWord* hr2_last = hr2->end() - 1;
936
937  HeapWord* hr3_start = hr3->bottom();
938  HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
939  HeapWord* hr3_last = hr3->end() - 1;
940
941  HeapRegionRemSet* hrrs = hr0->rem_set();
942
943  // Make three references from region 0x101...
944  hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
945  hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
946  hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
947
948  hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
949  hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
950  hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
951
952  hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
953  hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
954  hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
955
956  // Now cause a coarsening.
957  hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
958  hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
959
960  // Now, does iteration yield these three?
961  HeapRegionRemSetIterator iter(hrrs);
962  size_t sum = 0;
963  size_t card_index;
964  while (iter.has_next(card_index)) {
965    HeapWord* card_start =
966      G1CollectedHeap::heap()->bot()->address_for_index(card_index);
967    tty->print_cr("  Card " PTR_FORMAT ".", p2i(card_start));
968    sum++;
969  }
970  guarantee(sum == 11 - 3 + 2048, "Failure");
971  guarantee(sum == hrrs->occupied(), "Failure");
972}
973#endif
974