1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/parallel/cardTableExtension.hpp"
27#include "gc/parallel/gcTaskManager.hpp"
28#include "gc/parallel/objectStartArray.inline.hpp"
29#include "gc/parallel/parallelScavengeHeap.hpp"
30#include "gc/parallel/psPromotionManager.inline.hpp"
31#include "gc/parallel/psScavenge.hpp"
32#include "gc/parallel/psTasks.hpp"
33#include "gc/parallel/psYoungGen.hpp"
34#include "oops/oop.inline.hpp"
35#include "runtime/prefetch.inline.hpp"
36#include "utilities/align.hpp"
37
38// Checks an individual oop for missing precise marks. Mark
39// may be either dirty or newgen.
40class CheckForUnmarkedOops : public OopClosure {
41 private:
42  PSYoungGen*         _young_gen;
43  CardTableExtension* _card_table;
44  HeapWord*           _unmarked_addr;
45
46 protected:
47  template <class T> void do_oop_work(T* p) {
48    oop obj = oopDesc::load_decode_heap_oop(p);
49    if (_young_gen->is_in_reserved(obj) &&
50        !_card_table->addr_is_marked_imprecise(p)) {
51      // Don't overwrite the first missing card mark
52      if (_unmarked_addr == NULL) {
53        _unmarked_addr = (HeapWord*)p;
54      }
55    }
56  }
57
58 public:
59  CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
60    _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
61
62  virtual void do_oop(oop* p)       { CheckForUnmarkedOops::do_oop_work(p); }
63  virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
64
65  bool has_unmarked_oop() {
66    return _unmarked_addr != NULL;
67  }
68};
69
70// Checks all objects for the existence of some type of mark,
71// precise or imprecise, dirty or newgen.
72class CheckForUnmarkedObjects : public ObjectClosure {
73 private:
74  PSYoungGen*         _young_gen;
75  CardTableExtension* _card_table;
76
77 public:
78  CheckForUnmarkedObjects() {
79    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
80    _young_gen = heap->young_gen();
81    _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
82    // No point in asserting barrier set type here. Need to make CardTableExtension
83    // a unique barrier set type.
84  }
85
86  // Card marks are not precise. The current system can leave us with
87  // a mismatch of precise marks and beginning of object marks. This means
88  // we test for missing precise marks first. If any are found, we don't
89  // fail unless the object head is also unmarked.
90  virtual void do_object(oop obj) {
91    CheckForUnmarkedOops object_check(_young_gen, _card_table);
92    obj->oop_iterate_no_header(&object_check);
93    if (object_check.has_unmarked_oop()) {
94      guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
95    }
96  }
97};
98
99// Checks for precise marking of oops as newgen.
100class CheckForPreciseMarks : public OopClosure {
101 private:
102  PSYoungGen*         _young_gen;
103  CardTableExtension* _card_table;
104
105 protected:
106  template <class T> void do_oop_work(T* p) {
107    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
108    if (_young_gen->is_in_reserved(obj)) {
109      assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
110      _card_table->set_card_newgen(p);
111    }
112  }
113
114 public:
115  CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
116    _young_gen(young_gen), _card_table(card_table) { }
117
118  virtual void do_oop(oop* p)       { CheckForPreciseMarks::do_oop_work(p); }
119  virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
120};
121
122// We get passed the space_top value to prevent us from traversing into
123// the old_gen promotion labs, which cannot be safely parsed.
124
125// Do not call this method if the space is empty.
126// It is a waste to start tasks and get here only to
127// do no work.  If this method needs to be called
128// when the space is empty, fix the calculation of
129// end_card to allow sp_top == sp->bottom().
130
131void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
132                                                    MutableSpace* sp,
133                                                    HeapWord* space_top,
134                                                    PSPromotionManager* pm,
135                                                    uint stripe_number,
136                                                    uint stripe_total) {
137  int ssize = 128; // Naked constant!  Work unit = 64k.
138  int dirty_card_count = 0;
139
140  // It is a waste to get here if empty.
141  assert(sp->bottom() < sp->top(), "Should not be called if empty");
142  oop* sp_top = (oop*)space_top;
143  jbyte* start_card = byte_for(sp->bottom());
144  jbyte* end_card   = byte_for(sp_top - 1) + 1;
145  oop* last_scanned = NULL; // Prevent scanning objects more than once
146  // The width of the stripe ssize*stripe_total must be
147  // consistent with the number of stripes so that the complete slice
148  // is covered.
149  size_t slice_width = ssize * stripe_total;
150  for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
151    jbyte* worker_start_card = slice + stripe_number * ssize;
152    if (worker_start_card >= end_card)
153      return; // We're done.
154
155    jbyte* worker_end_card = worker_start_card + ssize;
156    if (worker_end_card > end_card)
157      worker_end_card = end_card;
158
159    // We do not want to scan objects more than once. In order to accomplish
160    // this, we assert that any object with an object head inside our 'slice'
161    // belongs to us. We may need to extend the range of scanned cards if the
162    // last object continues into the next 'slice'.
163    //
164    // Note! ending cards are exclusive!
165    HeapWord* slice_start = addr_for(worker_start_card);
166    HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
167
168#ifdef ASSERT
169    if (GCWorkerDelayMillis > 0) {
170      // Delay 1 worker so that it proceeds after all the work
171      // has been completed.
172      if (stripe_number < 2) {
173        os::sleep(Thread::current(), GCWorkerDelayMillis, false);
174      }
175    }
176#endif
177
178    // If there are not objects starting within the chunk, skip it.
179    if (!start_array->object_starts_in_range(slice_start, slice_end)) {
180      continue;
181    }
182    // Update our beginning addr
183    HeapWord* first_object = start_array->object_start(slice_start);
184    debug_only(oop* first_object_within_slice = (oop*) first_object;)
185    if (first_object < slice_start) {
186      last_scanned = (oop*)(first_object + oop(first_object)->size());
187      debug_only(first_object_within_slice = last_scanned;)
188      worker_start_card = byte_for(last_scanned);
189    }
190
191    // Update the ending addr
192    if (slice_end < (HeapWord*)sp_top) {
193      // The subtraction is important! An object may start precisely at slice_end.
194      HeapWord* last_object = start_array->object_start(slice_end - 1);
195      slice_end = last_object + oop(last_object)->size();
196      // worker_end_card is exclusive, so bump it one past the end of last_object's
197      // covered span.
198      worker_end_card = byte_for(slice_end) + 1;
199
200      if (worker_end_card > end_card)
201        worker_end_card = end_card;
202    }
203
204    assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
205    assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
206    assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
207    // Note that worker_start_card >= worker_end_card is legal, and happens when
208    // an object spans an entire slice.
209    assert(worker_start_card <= end_card, "worker start card beyond end card");
210    assert(worker_end_card <= end_card, "worker end card beyond end card");
211
212    jbyte* current_card = worker_start_card;
213    while (current_card < worker_end_card) {
214      // Find an unclean card.
215      while (current_card < worker_end_card && card_is_clean(*current_card)) {
216        current_card++;
217      }
218      jbyte* first_unclean_card = current_card;
219
220      // Find the end of a run of contiguous unclean cards
221      while (current_card < worker_end_card && !card_is_clean(*current_card)) {
222        while (current_card < worker_end_card && !card_is_clean(*current_card)) {
223          current_card++;
224        }
225
226        if (current_card < worker_end_card) {
227          // Some objects may be large enough to span several cards. If such
228          // an object has more than one dirty card, separated by a clean card,
229          // we will attempt to scan it twice. The test against "last_scanned"
230          // prevents the redundant object scan, but it does not prevent newly
231          // marked cards from being cleaned.
232          HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
233          size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
234          HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
235          jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
236          assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
237          if (ending_card_of_last_object > current_card) {
238            // This means the object spans the next complete card.
239            // We need to bump the current_card to ending_card_of_last_object
240            current_card = ending_card_of_last_object;
241          }
242        }
243      }
244      jbyte* following_clean_card = current_card;
245
246      if (first_unclean_card < worker_end_card) {
247        oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
248        assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
249        // "p" should always be >= "last_scanned" because newly GC dirtied
250        // cards are no longer scanned again (see comment at end
251        // of loop on the increment of "current_card").  Test that
252        // hypothesis before removing this code.
253        // If this code is removed, deal with the first time through
254        // the loop when the last_scanned is the object starting in
255        // the previous slice.
256        assert((p >= last_scanned) ||
257               (last_scanned == first_object_within_slice),
258               "Should no longer be possible");
259        if (p < last_scanned) {
260          // Avoid scanning more than once; this can happen because
261          // newgen cards set by GC may a different set than the
262          // originally dirty set
263          p = last_scanned;
264        }
265        oop* to = (oop*)addr_for(following_clean_card);
266
267        // Test slice_end first!
268        if ((HeapWord*)to > slice_end) {
269          to = (oop*)slice_end;
270        } else if (to > sp_top) {
271          to = sp_top;
272        }
273
274        // we know which cards to scan, now clear them
275        if (first_unclean_card <= worker_start_card+1)
276          first_unclean_card = worker_start_card+1;
277        if (following_clean_card >= worker_end_card-1)
278          following_clean_card = worker_end_card-1;
279
280        while (first_unclean_card < following_clean_card) {
281          *first_unclean_card++ = clean_card;
282        }
283
284        const int interval = PrefetchScanIntervalInBytes;
285        // scan all objects in the range
286        if (interval != 0) {
287          while (p < to) {
288            Prefetch::write(p, interval);
289            oop m = oop(p);
290            assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
291            pm->push_contents(m);
292            p += m->size();
293          }
294          pm->drain_stacks_cond_depth();
295        } else {
296          while (p < to) {
297            oop m = oop(p);
298            assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
299            pm->push_contents(m);
300            p += m->size();
301          }
302          pm->drain_stacks_cond_depth();
303        }
304        last_scanned = p;
305      }
306      // "current_card" is still the "following_clean_card" or
307      // the current_card is >= the worker_end_card so the
308      // loop will not execute again.
309      assert((current_card == following_clean_card) ||
310             (current_card >= worker_end_card),
311        "current_card should only be incremented if it still equals "
312        "following_clean_card");
313      // Increment current_card so that it is not processed again.
314      // It may now be dirty because a old-to-young pointer was
315      // found on it an updated.  If it is now dirty, it cannot be
316      // be safely cleaned in the next iteration.
317      current_card++;
318    }
319  }
320}
321
322// This should be called before a scavenge.
323void CardTableExtension::verify_all_young_refs_imprecise() {
324  CheckForUnmarkedObjects check;
325
326  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
327  PSOldGen* old_gen = heap->old_gen();
328
329  old_gen->object_iterate(&check);
330}
331
332// This should be called immediately after a scavenge, before mutators resume.
333void CardTableExtension::verify_all_young_refs_precise() {
334  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
335  PSOldGen* old_gen = heap->old_gen();
336
337  CheckForPreciseMarks check(
338    heap->young_gen(),
339    barrier_set_cast<CardTableExtension>(heap->barrier_set()));
340
341  old_gen->oop_iterate_no_header(&check);
342
343  verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
344}
345
346void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
347  CardTableExtension* card_table =
348    barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
349
350  jbyte* bot = card_table->byte_for(mr.start());
351  jbyte* top = card_table->byte_for(mr.end());
352  while(bot <= top) {
353    assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
354    if (*bot == verify_card)
355      *bot = youngergen_card;
356    bot++;
357  }
358}
359
360bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
361  jbyte* p = byte_for(addr);
362  jbyte val = *p;
363
364  if (card_is_dirty(val))
365    return true;
366
367  if (card_is_newgen(val))
368    return true;
369
370  if (card_is_clean(val))
371    return false;
372
373  assert(false, "Found unhandled card mark type");
374
375  return false;
376}
377
378// Also includes verify_card
379bool CardTableExtension::addr_is_marked_precise(void *addr) {
380  jbyte* p = byte_for(addr);
381  jbyte val = *p;
382
383  if (card_is_newgen(val))
384    return true;
385
386  if (card_is_verify(val))
387    return true;
388
389  if (card_is_clean(val))
390    return false;
391
392  if (card_is_dirty(val))
393    return false;
394
395  assert(false, "Found unhandled card mark type");
396
397  return false;
398}
399
400// Assumes that only the base or the end changes.  This allows indentification
401// of the region that is being resized.  The
402// CardTableModRefBS::resize_covered_region() is used for the normal case
403// where the covered regions are growing or shrinking at the high end.
404// The method resize_covered_region_by_end() is analogous to
405// CardTableModRefBS::resize_covered_region() but
406// for regions that grow or shrink at the low end.
407void CardTableExtension::resize_covered_region(MemRegion new_region) {
408
409  for (int i = 0; i < _cur_covered_regions; i++) {
410    if (_covered[i].start() == new_region.start()) {
411      // Found a covered region with the same start as the
412      // new region.  The region is growing or shrinking
413      // from the start of the region.
414      resize_covered_region_by_start(new_region);
415      return;
416    }
417    if (_covered[i].start() > new_region.start()) {
418      break;
419    }
420  }
421
422  int changed_region = -1;
423  for (int j = 0; j < _cur_covered_regions; j++) {
424    if (_covered[j].end() == new_region.end()) {
425      changed_region = j;
426      // This is a case where the covered region is growing or shrinking
427      // at the start of the region.
428      assert(changed_region != -1, "Don't expect to add a covered region");
429      assert(_covered[changed_region].byte_size() != new_region.byte_size(),
430        "The sizes should be different here");
431      resize_covered_region_by_end(changed_region, new_region);
432      return;
433    }
434  }
435  // This should only be a new covered region (where no existing
436  // covered region matches at the start or the end).
437  assert(_cur_covered_regions < _max_covered_regions,
438    "An existing region should have been found");
439  resize_covered_region_by_start(new_region);
440}
441
442void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
443  CardTableModRefBS::resize_covered_region(new_region);
444  debug_only(verify_guard();)
445}
446
447void CardTableExtension::resize_covered_region_by_end(int changed_region,
448                                                      MemRegion new_region) {
449  assert(SafepointSynchronize::is_at_safepoint(),
450    "Only expect an expansion at the low end at a GC");
451  debug_only(verify_guard();)
452#ifdef ASSERT
453  for (int k = 0; k < _cur_covered_regions; k++) {
454    if (_covered[k].end() == new_region.end()) {
455      assert(changed_region == k, "Changed region is incorrect");
456      break;
457    }
458  }
459#endif
460
461  // Commit new or uncommit old pages, if necessary.
462  if (resize_commit_uncommit(changed_region, new_region)) {
463    // Set the new start of the committed region
464    resize_update_committed_table(changed_region, new_region);
465  }
466
467  // Update card table entries
468  resize_update_card_table_entries(changed_region, new_region);
469
470  // Update the covered region
471  resize_update_covered_table(changed_region, new_region);
472
473  int ind = changed_region;
474  log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
475  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT "  _covered[%d].last(): " INTPTR_FORMAT,
476                ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
477  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
478                ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
479  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
480                p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
481  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
482                p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
483
484  debug_only(verify_guard();)
485}
486
487bool CardTableExtension::resize_commit_uncommit(int changed_region,
488                                                MemRegion new_region) {
489  bool result = false;
490  // Commit new or uncommit old pages, if necessary.
491  MemRegion cur_committed = _committed[changed_region];
492  assert(_covered[changed_region].end() == new_region.end(),
493    "The ends of the regions are expected to match");
494  // Extend the start of this _committed region to
495  // to cover the start of any previous _committed region.
496  // This forms overlapping regions, but never interior regions.
497  HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
498  if (min_prev_start < cur_committed.start()) {
499    // Only really need to set start of "cur_committed" to
500    // the new start (min_prev_start) but assertion checking code
501    // below use cur_committed.end() so make it correct.
502    MemRegion new_committed =
503        MemRegion(min_prev_start, cur_committed.end());
504    cur_committed = new_committed;
505  }
506#ifdef ASSERT
507  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
508  assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
509    "Starts should have proper alignment");
510#endif
511
512  jbyte* new_start = byte_for(new_region.start());
513  // Round down because this is for the start address
514  HeapWord* new_start_aligned =
515    (HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
516  // The guard page is always committed and should not be committed over.
517  // This method is used in cases where the generation is growing toward
518  // lower addresses but the guard region is still at the end of the
519  // card table.  That still makes sense when looking for writes
520  // off the end of the card table.
521  if (new_start_aligned < cur_committed.start()) {
522    // Expand the committed region
523    //
524    // Case A
525    //                                          |+ guard +|
526    //                          |+ cur committed +++++++++|
527    //                  |+ new committed +++++++++++++++++|
528    //
529    // Case B
530    //                                          |+ guard +|
531    //                        |+ cur committed +|
532    //                  |+ new committed +++++++|
533    //
534    // These are not expected because the calculation of the
535    // cur committed region and the new committed region
536    // share the same end for the covered region.
537    // Case C
538    //                                          |+ guard +|
539    //                        |+ cur committed +|
540    //                  |+ new committed +++++++++++++++++|
541    // Case D
542    //                                          |+ guard +|
543    //                        |+ cur committed +++++++++++|
544    //                  |+ new committed +++++++|
545
546    HeapWord* new_end_for_commit =
547      MIN2(cur_committed.end(), _guard_region.start());
548    if(new_start_aligned < new_end_for_commit) {
549      MemRegion new_committed =
550        MemRegion(new_start_aligned, new_end_for_commit);
551      os::commit_memory_or_exit((char*)new_committed.start(),
552                                new_committed.byte_size(), !ExecMem,
553                                "card table expansion");
554    }
555    result = true;
556  } else if (new_start_aligned > cur_committed.start()) {
557    // Shrink the committed region
558#if 0 // uncommitting space is currently unsafe because of the interactions
559      // of growing and shrinking regions.  One region A can uncommit space
560      // that it owns but which is being used by another region B (maybe).
561      // Region B has not committed the space because it was already
562      // committed by region A.
563    MemRegion uncommit_region = committed_unique_to_self(changed_region,
564      MemRegion(cur_committed.start(), new_start_aligned));
565    if (!uncommit_region.is_empty()) {
566      if (!os::uncommit_memory((char*)uncommit_region.start(),
567                               uncommit_region.byte_size())) {
568        // If the uncommit fails, ignore it.  Let the
569        // committed table resizing go even though the committed
570        // table will over state the committed space.
571      }
572    }
573#else
574    assert(!result, "Should be false with current workaround");
575#endif
576  }
577  assert(_committed[changed_region].end() == cur_committed.end(),
578    "end should not change");
579  return result;
580}
581
582void CardTableExtension::resize_update_committed_table(int changed_region,
583                                                       MemRegion new_region) {
584
585  jbyte* new_start = byte_for(new_region.start());
586  // Set the new start of the committed region
587  HeapWord* new_start_aligned =
588    (HeapWord*)align_down(new_start, os::vm_page_size());
589  MemRegion new_committed = MemRegion(new_start_aligned,
590    _committed[changed_region].end());
591  _committed[changed_region] = new_committed;
592  _committed[changed_region].set_start(new_start_aligned);
593}
594
595void CardTableExtension::resize_update_card_table_entries(int changed_region,
596                                                          MemRegion new_region) {
597  debug_only(verify_guard();)
598  MemRegion original_covered = _covered[changed_region];
599  // Initialize the card entries.  Only consider the
600  // region covered by the card table (_whole_heap)
601  jbyte* entry;
602  if (new_region.start() < _whole_heap.start()) {
603    entry = byte_for(_whole_heap.start());
604  } else {
605    entry = byte_for(new_region.start());
606  }
607  jbyte* end = byte_for(original_covered.start());
608  // If _whole_heap starts at the original covered regions start,
609  // this loop will not execute.
610  while (entry < end) { *entry++ = clean_card; }
611}
612
613void CardTableExtension::resize_update_covered_table(int changed_region,
614                                                     MemRegion new_region) {
615  // Update the covered region
616  _covered[changed_region].set_start(new_region.start());
617  _covered[changed_region].set_word_size(new_region.word_size());
618
619  // reorder regions.  There should only be at most 1 out
620  // of order.
621  for (int i = _cur_covered_regions-1 ; i > 0; i--) {
622    if (_covered[i].start() < _covered[i-1].start()) {
623        MemRegion covered_mr = _covered[i-1];
624        _covered[i-1] = _covered[i];
625        _covered[i] = covered_mr;
626        MemRegion committed_mr = _committed[i-1];
627      _committed[i-1] = _committed[i];
628      _committed[i] = committed_mr;
629      break;
630    }
631  }
632#ifdef ASSERT
633  for (int m = 0; m < _cur_covered_regions-1; m++) {
634    assert(_covered[m].start() <= _covered[m+1].start(),
635      "Covered regions out of order");
636    assert(_committed[m].start() <= _committed[m+1].start(),
637      "Committed regions out of order");
638  }
639#endif
640}
641
642// Returns the start of any committed region that is lower than
643// the target committed region (index ind) and that intersects the
644// target region.  If none, return start of target region.
645//
646//      -------------
647//      |           |
648//      -------------
649//              ------------
650//              | target   |
651//              ------------
652//                               -------------
653//                               |           |
654//                               -------------
655//      ^ returns this
656//
657//      -------------
658//      |           |
659//      -------------
660//                      ------------
661//                      | target   |
662//                      ------------
663//                               -------------
664//                               |           |
665//                               -------------
666//                      ^ returns this
667
668HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
669  assert(_cur_covered_regions >= 0, "Expecting at least on region");
670  HeapWord* min_start = _committed[ind].start();
671  for (int j = 0; j < ind; j++) {
672    HeapWord* this_start = _committed[j].start();
673    if ((this_start < min_start) &&
674        !(_committed[j].intersection(_committed[ind])).is_empty()) {
675       min_start = this_start;
676    }
677  }
678  return min_start;
679}
680