cardTableModRefBS.cpp revision 8413:92457dfb91bd
1/*
2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/cardTableModRefBS.inline.hpp"
27#include "gc/shared/cardTableRS.hpp"
28#include "gc/shared/collectedHeap.hpp"
29#include "gc/shared/genCollectedHeap.hpp"
30#include "gc/shared/space.hpp"
31#include "gc/shared/space.inline.hpp"
32#include "memory/allocation.inline.hpp"
33#include "memory/universe.hpp"
34#include "memory/virtualspace.hpp"
35#include "runtime/java.hpp"
36#include "runtime/mutexLocker.hpp"
37#include "services/memTracker.hpp"
38#include "utilities/macros.hpp"
39#ifdef COMPILER1
40#include "c1/c1_LIR.hpp"
41#include "c1/c1_LIRGenerator.hpp"
42#endif
43
44// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
45// enumerate ref fields that have been modified (since the last
46// enumeration.)
47
48size_t CardTableModRefBS::compute_byte_map_size()
49{
50  assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
51                                        "uninitialized, check declaration order");
52  assert(_page_size != 0, "uninitialized, check declaration order");
53  const size_t granularity = os::vm_allocation_granularity();
54  return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
55}
56
57CardTableModRefBS::CardTableModRefBS(
58  MemRegion whole_heap,
59  const BarrierSet::FakeRtti& fake_rtti) :
60  ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
61  _whole_heap(whole_heap),
62  _guard_index(0),
63  _guard_region(),
64  _last_valid_index(0),
65  _page_size(os::vm_page_size()),
66  _byte_map_size(0),
67  _covered(NULL),
68  _committed(NULL),
69  _cur_covered_regions(0),
70  _byte_map(NULL),
71  byte_map_base(NULL),
72  // LNC functionality
73  _lowest_non_clean(NULL),
74  _lowest_non_clean_chunk_size(NULL),
75  _lowest_non_clean_base_chunk_index(NULL),
76  _last_LNC_resizing_collection(NULL)
77{
78  assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
79  assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
80
81  assert(card_size <= 512, "card_size must be less than 512"); // why?
82
83  _covered   = new MemRegion[_max_covered_regions];
84  if (_covered == NULL) {
85    vm_exit_during_initialization("Could not allocate card table covered region set.");
86  }
87}
88
89void CardTableModRefBS::initialize() {
90  _guard_index = cards_required(_whole_heap.word_size()) - 1;
91  _last_valid_index = _guard_index - 1;
92
93  _byte_map_size = compute_byte_map_size();
94
95  HeapWord* low_bound  = _whole_heap.start();
96  HeapWord* high_bound = _whole_heap.end();
97
98  _cur_covered_regions = 0;
99  _committed = new MemRegion[_max_covered_regions];
100  if (_committed == NULL) {
101    vm_exit_during_initialization("Could not allocate card table committed region set.");
102  }
103
104  const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
105    MAX2(_page_size, (size_t) os::vm_allocation_granularity());
106  ReservedSpace heap_rs(_byte_map_size, rs_align, false);
107
108  MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
109
110  os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
111                       _page_size, heap_rs.base(), heap_rs.size());
112  if (!heap_rs.is_reserved()) {
113    vm_exit_during_initialization("Could not reserve enough space for the "
114                                  "card marking array");
115  }
116
117  // The assembler store_check code will do an unsigned shift of the oop,
118  // then add it to byte_map_base, i.e.
119  //
120  //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
121  _byte_map = (jbyte*) heap_rs.base();
122  byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
123  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
124  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
125
126  jbyte* guard_card = &_byte_map[_guard_index];
127  uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
128  _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
129  os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
130                            !ExecMem, "card table last card");
131  *guard_card = last_card;
132
133  _lowest_non_clean =
134    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
135  _lowest_non_clean_chunk_size =
136    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
137  _lowest_non_clean_base_chunk_index =
138    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
139  _last_LNC_resizing_collection =
140    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
141  if (_lowest_non_clean == NULL
142      || _lowest_non_clean_chunk_size == NULL
143      || _lowest_non_clean_base_chunk_index == NULL
144      || _last_LNC_resizing_collection == NULL)
145    vm_exit_during_initialization("couldn't allocate an LNC array.");
146  for (int i = 0; i < _max_covered_regions; i++) {
147    _lowest_non_clean[i] = NULL;
148    _lowest_non_clean_chunk_size[i] = 0;
149    _last_LNC_resizing_collection[i] = -1;
150  }
151
152  if (TraceCardTableModRefBS) {
153    gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
154    gclog_or_tty->print_cr("  "
155                  "  &_byte_map[0]: " INTPTR_FORMAT
156                  "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
157                  p2i(&_byte_map[0]),
158                  p2i(&_byte_map[_last_valid_index]));
159    gclog_or_tty->print_cr("  "
160                  "  byte_map_base: " INTPTR_FORMAT,
161                  p2i(byte_map_base));
162  }
163}
164
165CardTableModRefBS::~CardTableModRefBS() {
166  if (_covered) {
167    delete[] _covered;
168    _covered = NULL;
169  }
170  if (_committed) {
171    delete[] _committed;
172    _committed = NULL;
173  }
174  if (_lowest_non_clean) {
175    FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
176    _lowest_non_clean = NULL;
177  }
178  if (_lowest_non_clean_chunk_size) {
179    FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
180    _lowest_non_clean_chunk_size = NULL;
181  }
182  if (_lowest_non_clean_base_chunk_index) {
183    FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
184    _lowest_non_clean_base_chunk_index = NULL;
185  }
186  if (_last_LNC_resizing_collection) {
187    FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
188    _last_LNC_resizing_collection = NULL;
189  }
190}
191
192int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
193  int i;
194  for (i = 0; i < _cur_covered_regions; i++) {
195    if (_covered[i].start() == base) return i;
196    if (_covered[i].start() > base) break;
197  }
198  // If we didn't find it, create a new one.
199  assert(_cur_covered_regions < _max_covered_regions,
200         "too many covered regions");
201  // Move the ones above up, to maintain sorted order.
202  for (int j = _cur_covered_regions; j > i; j--) {
203    _covered[j] = _covered[j-1];
204    _committed[j] = _committed[j-1];
205  }
206  int res = i;
207  _cur_covered_regions++;
208  _covered[res].set_start(base);
209  _covered[res].set_word_size(0);
210  jbyte* ct_start = byte_for(base);
211  uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
212  _committed[res].set_start((HeapWord*)ct_start_aligned);
213  _committed[res].set_word_size(0);
214  return res;
215}
216
217int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
218  for (int i = 0; i < _cur_covered_regions; i++) {
219    if (_covered[i].contains(addr)) {
220      return i;
221    }
222  }
223  assert(0, "address outside of heap?");
224  return -1;
225}
226
227HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
228  HeapWord* max_end = NULL;
229  for (int j = 0; j < ind; j++) {
230    HeapWord* this_end = _committed[j].end();
231    if (this_end > max_end) max_end = this_end;
232  }
233  return max_end;
234}
235
236MemRegion CardTableModRefBS::committed_unique_to_self(int self,
237                                                      MemRegion mr) const {
238  MemRegion result = mr;
239  for (int r = 0; r < _cur_covered_regions; r += 1) {
240    if (r != self) {
241      result = result.minus(_committed[r]);
242    }
243  }
244  // Never include the guard page.
245  result = result.minus(_guard_region);
246  return result;
247}
248
249void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
250  // We don't change the start of a region, only the end.
251  assert(_whole_heap.contains(new_region),
252           "attempt to cover area not in reserved area");
253  debug_only(verify_guard();)
254  // collided is true if the expansion would push into another committed region
255  debug_only(bool collided = false;)
256  int const ind = find_covering_region_by_base(new_region.start());
257  MemRegion const old_region = _covered[ind];
258  assert(old_region.start() == new_region.start(), "just checking");
259  if (new_region.word_size() != old_region.word_size()) {
260    // Commit new or uncommit old pages, if necessary.
261    MemRegion cur_committed = _committed[ind];
262    // Extend the end of this _committed region
263    // to cover the end of any lower _committed regions.
264    // This forms overlapping regions, but never interior regions.
265    HeapWord* const max_prev_end = largest_prev_committed_end(ind);
266    if (max_prev_end > cur_committed.end()) {
267      cur_committed.set_end(max_prev_end);
268    }
269    // Align the end up to a page size (starts are already aligned).
270    jbyte* const new_end = byte_after(new_region.last());
271    HeapWord* new_end_aligned =
272      (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
273    assert(new_end_aligned >= (HeapWord*) new_end,
274           "align up, but less");
275    // Check the other regions (excludes "ind") to ensure that
276    // the new_end_aligned does not intrude onto the committed
277    // space of another region.
278    int ri = 0;
279    for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
280      if (new_end_aligned > _committed[ri].start()) {
281        assert(new_end_aligned <= _committed[ri].end(),
282               "An earlier committed region can't cover a later committed region");
283        // Any region containing the new end
284        // should start at or beyond the region found (ind)
285        // for the new end (committed regions are not expected to
286        // be proper subsets of other committed regions).
287        assert(_committed[ri].start() >= _committed[ind].start(),
288               "New end of committed region is inconsistent");
289        new_end_aligned = _committed[ri].start();
290        // new_end_aligned can be equal to the start of its
291        // committed region (i.e., of "ind") if a second
292        // region following "ind" also start at the same location
293        // as "ind".
294        assert(new_end_aligned >= _committed[ind].start(),
295          "New end of committed region is before start");
296        debug_only(collided = true;)
297        // Should only collide with 1 region
298        break;
299      }
300    }
301#ifdef ASSERT
302    for (++ri; ri < _cur_covered_regions; ri++) {
303      assert(!_committed[ri].contains(new_end_aligned),
304        "New end of committed region is in a second committed region");
305    }
306#endif
307    // The guard page is always committed and should not be committed over.
308    // "guarded" is used for assertion checking below and recalls the fact
309    // that the would-be end of the new committed region would have
310    // penetrated the guard page.
311    HeapWord* new_end_for_commit = new_end_aligned;
312
313    DEBUG_ONLY(bool guarded = false;)
314    if (new_end_for_commit > _guard_region.start()) {
315      new_end_for_commit = _guard_region.start();
316      DEBUG_ONLY(guarded = true;)
317    }
318
319    if (new_end_for_commit > cur_committed.end()) {
320      // Must commit new pages.
321      MemRegion const new_committed =
322        MemRegion(cur_committed.end(), new_end_for_commit);
323
324      assert(!new_committed.is_empty(), "Region should not be empty here");
325      os::commit_memory_or_exit((char*)new_committed.start(),
326                                new_committed.byte_size(), _page_size,
327                                !ExecMem, "card table expansion");
328    // Use new_end_aligned (as opposed to new_end_for_commit) because
329    // the cur_committed region may include the guard region.
330    } else if (new_end_aligned < cur_committed.end()) {
331      // Must uncommit pages.
332      MemRegion const uncommit_region =
333        committed_unique_to_self(ind, MemRegion(new_end_aligned,
334                                                cur_committed.end()));
335      if (!uncommit_region.is_empty()) {
336        // It is not safe to uncommit cards if the boundary between
337        // the generations is moving.  A shrink can uncommit cards
338        // owned by generation A but being used by generation B.
339        if (!UseAdaptiveGCBoundary) {
340          if (!os::uncommit_memory((char*)uncommit_region.start(),
341                                   uncommit_region.byte_size())) {
342            assert(false, "Card table contraction failed");
343            // The call failed so don't change the end of the
344            // committed region.  This is better than taking the
345            // VM down.
346            new_end_aligned = _committed[ind].end();
347          }
348        } else {
349          new_end_aligned = _committed[ind].end();
350        }
351      }
352    }
353    // In any case, we can reset the end of the current committed entry.
354    _committed[ind].set_end(new_end_aligned);
355
356#ifdef ASSERT
357    // Check that the last card in the new region is committed according
358    // to the tables.
359    bool covered = false;
360    for (int cr = 0; cr < _cur_covered_regions; cr++) {
361      if (_committed[cr].contains(new_end - 1)) {
362        covered = true;
363        break;
364      }
365    }
366    assert(covered, "Card for end of new region not committed");
367#endif
368
369    // The default of 0 is not necessarily clean cards.
370    jbyte* entry;
371    if (old_region.last() < _whole_heap.start()) {
372      entry = byte_for(_whole_heap.start());
373    } else {
374      entry = byte_after(old_region.last());
375    }
376    assert(index_for(new_region.last()) <  _guard_index,
377      "The guard card will be overwritten");
378    // This line commented out cleans the newly expanded region and
379    // not the aligned up expanded region.
380    // jbyte* const end = byte_after(new_region.last());
381    jbyte* const end = (jbyte*) new_end_for_commit;
382    assert((end >= byte_after(new_region.last())) || collided || guarded,
383      "Expect to be beyond new region unless impacting another region");
384    // do nothing if we resized downward.
385#ifdef ASSERT
386    for (int ri = 0; ri < _cur_covered_regions; ri++) {
387      if (ri != ind) {
388        // The end of the new committed region should not
389        // be in any existing region unless it matches
390        // the start of the next region.
391        assert(!_committed[ri].contains(end) ||
392               (_committed[ri].start() == (HeapWord*) end),
393               "Overlapping committed regions");
394      }
395    }
396#endif
397    if (entry < end) {
398      memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
399    }
400  }
401  // In any case, the covered size changes.
402  _covered[ind].set_word_size(new_region.word_size());
403  if (TraceCardTableModRefBS) {
404    gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
405    gclog_or_tty->print_cr("  "
406                  "  _covered[%d].start(): " INTPTR_FORMAT
407                  "  _covered[%d].last(): " INTPTR_FORMAT,
408                  ind, p2i(_covered[ind].start()),
409                  ind, p2i(_covered[ind].last()));
410    gclog_or_tty->print_cr("  "
411                  "  _committed[%d].start(): " INTPTR_FORMAT
412                  "  _committed[%d].last(): " INTPTR_FORMAT,
413                  ind, p2i(_committed[ind].start()),
414                  ind, p2i(_committed[ind].last()));
415    gclog_or_tty->print_cr("  "
416                  "  byte_for(start): " INTPTR_FORMAT
417                  "  byte_for(last): " INTPTR_FORMAT,
418                  p2i(byte_for(_covered[ind].start())),
419                  p2i(byte_for(_covered[ind].last())));
420    gclog_or_tty->print_cr("  "
421                  "  addr_for(start): " INTPTR_FORMAT
422                  "  addr_for(last): " INTPTR_FORMAT,
423                  p2i(addr_for((jbyte*) _committed[ind].start())),
424                  p2i(addr_for((jbyte*) _committed[ind].last())));
425  }
426  // Touch the last card of the covered region to show that it
427  // is committed (or SEGV).
428  debug_only((void) (*byte_for(_covered[ind].last()));)
429  debug_only(verify_guard();)
430}
431
432// Note that these versions are precise!  The scanning code has to handle the
433// fact that the write barrier may be either precise or imprecise.
434
435void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
436  inline_write_ref_field(field, newVal, release);
437}
438
439
440void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
441                                                                 MemRegion mr,
442                                                                 OopsInGenClosure* cl,
443                                                                 CardTableRS* ct) {
444  if (!mr.is_empty()) {
445    // Caller (process_roots()) claims that all GC threads
446    // execute this call.  With UseDynamicNumberOfGCThreads now all
447    // active GC threads execute this call.  The number of active GC
448    // threads needs to be passed to par_non_clean_card_iterate_work()
449    // to get proper partitioning and termination.
450    //
451    // This is an example of where n_par_threads() is used instead
452    // of workers()->active_workers().  n_par_threads can be set to 0 to
453    // turn off parallelism.  For example when this code is called as
454    // part of verification during root processing then n_par_threads()
455    // may have been set to 0. active_workers is not overloaded with
456    // the meaning that it is a switch to disable parallelism and so keeps
457    // the meaning of the number of active gc workers. If parallelism has
458    // not been shut off by setting n_par_threads to 0, then n_par_threads
459    // should be equal to active_workers.  When a different mechanism for
460    // shutting off parallelism is used, then active_workers can be used in
461    // place of n_par_threads.
462    int n_threads =  GenCollectedHeap::heap()->n_par_threads();
463    bool is_par = n_threads > 0;
464    if (is_par) {
465#if INCLUDE_ALL_GCS
466      assert(GenCollectedHeap::heap()->n_par_threads() ==
467             GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
468      non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
469#else  // INCLUDE_ALL_GCS
470      fatal("Parallel gc not supported here.");
471#endif // INCLUDE_ALL_GCS
472    } else {
473      // clear_cl finds contiguous dirty ranges of cards to process and clear.
474
475      DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary());
476      ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
477
478      clear_cl.do_MemRegion(mr);
479    }
480  }
481}
482
483void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
484  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
485  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
486  jbyte* cur  = byte_for(mr.start());
487  jbyte* last = byte_after(mr.last());
488  while (cur < last) {
489    *cur = dirty_card;
490    cur++;
491  }
492}
493
494void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
495  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
496  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
497  for (int i = 0; i < _cur_covered_regions; i++) {
498    MemRegion mri = mr.intersection(_covered[i]);
499    if (!mri.is_empty()) dirty_MemRegion(mri);
500  }
501}
502
503void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
504  // Be conservative: only clean cards entirely contained within the
505  // region.
506  jbyte* cur;
507  if (mr.start() == _whole_heap.start()) {
508    cur = byte_for(mr.start());
509  } else {
510    assert(mr.start() > _whole_heap.start(), "mr is not covered.");
511    cur = byte_after(mr.start() - 1);
512  }
513  jbyte* last = byte_after(mr.last());
514  memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
515}
516
517void CardTableModRefBS::clear(MemRegion mr) {
518  for (int i = 0; i < _cur_covered_regions; i++) {
519    MemRegion mri = mr.intersection(_covered[i]);
520    if (!mri.is_empty()) clear_MemRegion(mri);
521  }
522}
523
524void CardTableModRefBS::dirty(MemRegion mr) {
525  jbyte* first = byte_for(mr.start());
526  jbyte* last  = byte_after(mr.last());
527  memset(first, dirty_card, last-first);
528}
529
530// Unlike several other card table methods, dirty_card_iterate()
531// iterates over dirty cards ranges in increasing address order.
532void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
533                                           MemRegionClosure* cl) {
534  for (int i = 0; i < _cur_covered_regions; i++) {
535    MemRegion mri = mr.intersection(_covered[i]);
536    if (!mri.is_empty()) {
537      jbyte *cur_entry, *next_entry, *limit;
538      for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
539           cur_entry <= limit;
540           cur_entry  = next_entry) {
541        next_entry = cur_entry + 1;
542        if (*cur_entry == dirty_card) {
543          size_t dirty_cards;
544          // Accumulate maximal dirty card range, starting at cur_entry
545          for (dirty_cards = 1;
546               next_entry <= limit && *next_entry == dirty_card;
547               dirty_cards++, next_entry++);
548          MemRegion cur_cards(addr_for(cur_entry),
549                              dirty_cards*card_size_in_words);
550          cl->do_MemRegion(cur_cards);
551        }
552      }
553    }
554  }
555}
556
557MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
558                                                          bool reset,
559                                                          int reset_val) {
560  for (int i = 0; i < _cur_covered_regions; i++) {
561    MemRegion mri = mr.intersection(_covered[i]);
562    if (!mri.is_empty()) {
563      jbyte* cur_entry, *next_entry, *limit;
564      for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
565           cur_entry <= limit;
566           cur_entry  = next_entry) {
567        next_entry = cur_entry + 1;
568        if (*cur_entry == dirty_card) {
569          size_t dirty_cards;
570          // Accumulate maximal dirty card range, starting at cur_entry
571          for (dirty_cards = 1;
572               next_entry <= limit && *next_entry == dirty_card;
573               dirty_cards++, next_entry++);
574          MemRegion cur_cards(addr_for(cur_entry),
575                              dirty_cards*card_size_in_words);
576          if (reset) {
577            for (size_t i = 0; i < dirty_cards; i++) {
578              cur_entry[i] = reset_val;
579            }
580          }
581          return cur_cards;
582        }
583      }
584    }
585  }
586  return MemRegion(mr.end(), mr.end());
587}
588
589uintx CardTableModRefBS::ct_max_alignment_constraint() {
590  return card_size * os::vm_page_size();
591}
592
593void CardTableModRefBS::verify_guard() {
594  // For product build verification
595  guarantee(_byte_map[_guard_index] == last_card,
596            "card table guard has been modified");
597}
598
599void CardTableModRefBS::verify() {
600  verify_guard();
601}
602
603#ifndef PRODUCT
604void CardTableModRefBS::verify_region(MemRegion mr,
605                                      jbyte val, bool val_equals) {
606  jbyte* start    = byte_for(mr.start());
607  jbyte* end      = byte_for(mr.last());
608  bool failures = false;
609  for (jbyte* curr = start; curr <= end; ++curr) {
610    jbyte curr_val = *curr;
611    bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
612    if (failed) {
613      if (!failures) {
614        tty->cr();
615        tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
616        tty->print_cr("==   %sexpecting value: %d",
617                      (val_equals) ? "" : "not ", val);
618        failures = true;
619      }
620      tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
621                    "val: %d", p2i(curr), p2i(addr_for(curr)),
622                    p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
623                    (int) curr_val);
624    }
625  }
626  guarantee(!failures, "there should not have been any failures");
627}
628
629void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
630  verify_region(mr, dirty_card, false /* val_equals */);
631}
632
633void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
634  verify_region(mr, dirty_card, true /* val_equals */);
635}
636#endif
637
638void CardTableModRefBS::print_on(outputStream* st) const {
639  st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
640               p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
641}
642
643bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
644  return
645    CardTableModRefBS::card_will_be_scanned(cv) ||
646    _rs->is_prev_nonclean_card_val(cv);
647};
648
649bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
650  return
651    cv != clean_card &&
652    (CardTableModRefBS::card_may_have_been_dirty(cv) ||
653     CardTableRS::youngergen_may_have_been_dirty(cv));
654};
655