1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/parallel/cardTableExtension.hpp"
27#include "gc/parallel/gcTaskManager.hpp"
28#include "gc/parallel/objectStartArray.inline.hpp"
29#include "gc/parallel/parallelScavengeHeap.hpp"
30#include "gc/parallel/psPromotionManager.inline.hpp"
31#include "gc/parallel/psScavenge.hpp"
32#include "gc/parallel/psTasks.hpp"
33#include "gc/parallel/psYoungGen.hpp"
34#include "oops/oop.inline.hpp"
35#include "runtime/prefetch.inline.hpp"
36
37// Checks an individual oop for missing precise marks. Mark
38// may be either dirty or newgen.
39class CheckForUnmarkedOops : public OopClosure {
40 private:
41  PSYoungGen*         _young_gen;
42  CardTableExtension* _card_table;
43  HeapWord*           _unmarked_addr;
44
45 protected:
46  template <class T> void do_oop_work(T* p) {
47    oop obj = oopDesc::load_decode_heap_oop(p);
48    if (_young_gen->is_in_reserved(obj) &&
49        !_card_table->addr_is_marked_imprecise(p)) {
50      // Don't overwrite the first missing card mark
51      if (_unmarked_addr == NULL) {
52        _unmarked_addr = (HeapWord*)p;
53      }
54    }
55  }
56
57 public:
58  CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
59    _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
60
61  virtual void do_oop(oop* p)       { CheckForUnmarkedOops::do_oop_work(p); }
62  virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
63
64  bool has_unmarked_oop() {
65    return _unmarked_addr != NULL;
66  }
67};
68
69// Checks all objects for the existence of some type of mark,
70// precise or imprecise, dirty or newgen.
71class CheckForUnmarkedObjects : public ObjectClosure {
72 private:
73  PSYoungGen*         _young_gen;
74  CardTableExtension* _card_table;
75
76 public:
77  CheckForUnmarkedObjects() {
78    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
79    _young_gen = heap->young_gen();
80    _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
81    // No point in asserting barrier set type here. Need to make CardTableExtension
82    // a unique barrier set type.
83  }
84
85  // Card marks are not precise. The current system can leave us with
86  // a mismatch of precise marks and beginning of object marks. This means
87  // we test for missing precise marks first. If any are found, we don't
88  // fail unless the object head is also unmarked.
89  virtual void do_object(oop obj) {
90    CheckForUnmarkedOops object_check(_young_gen, _card_table);
91    obj->oop_iterate_no_header(&object_check);
92    if (object_check.has_unmarked_oop()) {
93      guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
94    }
95  }
96};
97
98// Checks for precise marking of oops as newgen.
99class CheckForPreciseMarks : public OopClosure {
100 private:
101  PSYoungGen*         _young_gen;
102  CardTableExtension* _card_table;
103
104 protected:
105  template <class T> void do_oop_work(T* p) {
106    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
107    if (_young_gen->is_in_reserved(obj)) {
108      assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
109      _card_table->set_card_newgen(p);
110    }
111  }
112
113 public:
114  CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
115    _young_gen(young_gen), _card_table(card_table) { }
116
117  virtual void do_oop(oop* p)       { CheckForPreciseMarks::do_oop_work(p); }
118  virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
119};
120
121// We get passed the space_top value to prevent us from traversing into
122// the old_gen promotion labs, which cannot be safely parsed.
123
124// Do not call this method if the space is empty.
125// It is a waste to start tasks and get here only to
126// do no work.  If this method needs to be called
127// when the space is empty, fix the calculation of
128// end_card to allow sp_top == sp->bottom().
129
130void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
131                                                    MutableSpace* sp,
132                                                    HeapWord* space_top,
133                                                    PSPromotionManager* pm,
134                                                    uint stripe_number,
135                                                    uint stripe_total) {
136  int ssize = 128; // Naked constant!  Work unit = 64k.
137  int dirty_card_count = 0;
138
139  // It is a waste to get here if empty.
140  assert(sp->bottom() < sp->top(), "Should not be called if empty");
141  oop* sp_top = (oop*)space_top;
142  jbyte* start_card = byte_for(sp->bottom());
143  jbyte* end_card   = byte_for(sp_top - 1) + 1;
144  oop* last_scanned = NULL; // Prevent scanning objects more than once
145  // The width of the stripe ssize*stripe_total must be
146  // consistent with the number of stripes so that the complete slice
147  // is covered.
148  size_t slice_width = ssize * stripe_total;
149  for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
150    jbyte* worker_start_card = slice + stripe_number * ssize;
151    if (worker_start_card >= end_card)
152      return; // We're done.
153
154    jbyte* worker_end_card = worker_start_card + ssize;
155    if (worker_end_card > end_card)
156      worker_end_card = end_card;
157
158    // We do not want to scan objects more than once. In order to accomplish
159    // this, we assert that any object with an object head inside our 'slice'
160    // belongs to us. We may need to extend the range of scanned cards if the
161    // last object continues into the next 'slice'.
162    //
163    // Note! ending cards are exclusive!
164    HeapWord* slice_start = addr_for(worker_start_card);
165    HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
166
167#ifdef ASSERT
168    if (GCWorkerDelayMillis > 0) {
169      // Delay 1 worker so that it proceeds after all the work
170      // has been completed.
171      if (stripe_number < 2) {
172        os::sleep(Thread::current(), GCWorkerDelayMillis, false);
173      }
174    }
175#endif
176
177    // If there are not objects starting within the chunk, skip it.
178    if (!start_array->object_starts_in_range(slice_start, slice_end)) {
179      continue;
180    }
181    // Update our beginning addr
182    HeapWord* first_object = start_array->object_start(slice_start);
183    debug_only(oop* first_object_within_slice = (oop*) first_object;)
184    if (first_object < slice_start) {
185      last_scanned = (oop*)(first_object + oop(first_object)->size());
186      debug_only(first_object_within_slice = last_scanned;)
187      worker_start_card = byte_for(last_scanned);
188    }
189
190    // Update the ending addr
191    if (slice_end < (HeapWord*)sp_top) {
192      // The subtraction is important! An object may start precisely at slice_end.
193      HeapWord* last_object = start_array->object_start(slice_end - 1);
194      slice_end = last_object + oop(last_object)->size();
195      // worker_end_card is exclusive, so bump it one past the end of last_object's
196      // covered span.
197      worker_end_card = byte_for(slice_end) + 1;
198
199      if (worker_end_card > end_card)
200        worker_end_card = end_card;
201    }
202
203    assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
204    assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
205    assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
206    // Note that worker_start_card >= worker_end_card is legal, and happens when
207    // an object spans an entire slice.
208    assert(worker_start_card <= end_card, "worker start card beyond end card");
209    assert(worker_end_card <= end_card, "worker end card beyond end card");
210
211    jbyte* current_card = worker_start_card;
212    while (current_card < worker_end_card) {
213      // Find an unclean card.
214      while (current_card < worker_end_card && card_is_clean(*current_card)) {
215        current_card++;
216      }
217      jbyte* first_unclean_card = current_card;
218
219      // Find the end of a run of contiguous unclean cards
220      while (current_card < worker_end_card && !card_is_clean(*current_card)) {
221        while (current_card < worker_end_card && !card_is_clean(*current_card)) {
222          current_card++;
223        }
224
225        if (current_card < worker_end_card) {
226          // Some objects may be large enough to span several cards. If such
227          // an object has more than one dirty card, separated by a clean card,
228          // we will attempt to scan it twice. The test against "last_scanned"
229          // prevents the redundant object scan, but it does not prevent newly
230          // marked cards from being cleaned.
231          HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
232          size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
233          HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
234          jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
235          assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
236          if (ending_card_of_last_object > current_card) {
237            // This means the object spans the next complete card.
238            // We need to bump the current_card to ending_card_of_last_object
239            current_card = ending_card_of_last_object;
240          }
241        }
242      }
243      jbyte* following_clean_card = current_card;
244
245      if (first_unclean_card < worker_end_card) {
246        oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
247        assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
248        // "p" should always be >= "last_scanned" because newly GC dirtied
249        // cards are no longer scanned again (see comment at end
250        // of loop on the increment of "current_card").  Test that
251        // hypothesis before removing this code.
252        // If this code is removed, deal with the first time through
253        // the loop when the last_scanned is the object starting in
254        // the previous slice.
255        assert((p >= last_scanned) ||
256               (last_scanned == first_object_within_slice),
257               "Should no longer be possible");
258        if (p < last_scanned) {
259          // Avoid scanning more than once; this can happen because
260          // newgen cards set by GC may a different set than the
261          // originally dirty set
262          p = last_scanned;
263        }
264        oop* to = (oop*)addr_for(following_clean_card);
265
266        // Test slice_end first!
267        if ((HeapWord*)to > slice_end) {
268          to = (oop*)slice_end;
269        } else if (to > sp_top) {
270          to = sp_top;
271        }
272
273        // we know which cards to scan, now clear them
274        if (first_unclean_card <= worker_start_card+1)
275          first_unclean_card = worker_start_card+1;
276        if (following_clean_card >= worker_end_card-1)
277          following_clean_card = worker_end_card-1;
278
279        while (first_unclean_card < following_clean_card) {
280          *first_unclean_card++ = clean_card;
281        }
282
283        const int interval = PrefetchScanIntervalInBytes;
284        // scan all objects in the range
285        if (interval != 0) {
286          while (p < to) {
287            Prefetch::write(p, interval);
288            oop m = oop(p);
289            assert(m->is_oop_or_null(), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
290            pm->push_contents(m);
291            p += m->size();
292          }
293          pm->drain_stacks_cond_depth();
294        } else {
295          while (p < to) {
296            oop m = oop(p);
297            assert(m->is_oop_or_null(), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
298            pm->push_contents(m);
299            p += m->size();
300          }
301          pm->drain_stacks_cond_depth();
302        }
303        last_scanned = p;
304      }
305      // "current_card" is still the "following_clean_card" or
306      // the current_card is >= the worker_end_card so the
307      // loop will not execute again.
308      assert((current_card == following_clean_card) ||
309             (current_card >= worker_end_card),
310        "current_card should only be incremented if it still equals "
311        "following_clean_card");
312      // Increment current_card so that it is not processed again.
313      // It may now be dirty because a old-to-young pointer was
314      // found on it an updated.  If it is now dirty, it cannot be
315      // be safely cleaned in the next iteration.
316      current_card++;
317    }
318  }
319}
320
321// This should be called before a scavenge.
322void CardTableExtension::verify_all_young_refs_imprecise() {
323  CheckForUnmarkedObjects check;
324
325  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
326  PSOldGen* old_gen = heap->old_gen();
327
328  old_gen->object_iterate(&check);
329}
330
331// This should be called immediately after a scavenge, before mutators resume.
332void CardTableExtension::verify_all_young_refs_precise() {
333  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
334  PSOldGen* old_gen = heap->old_gen();
335
336  CheckForPreciseMarks check(
337    heap->young_gen(),
338    barrier_set_cast<CardTableExtension>(heap->barrier_set()));
339
340  old_gen->oop_iterate_no_header(&check);
341
342  verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
343}
344
345void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
346  CardTableExtension* card_table =
347    barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
348
349  jbyte* bot = card_table->byte_for(mr.start());
350  jbyte* top = card_table->byte_for(mr.end());
351  while(bot <= top) {
352    assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
353    if (*bot == verify_card)
354      *bot = youngergen_card;
355    bot++;
356  }
357}
358
359bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
360  jbyte* p = byte_for(addr);
361  jbyte val = *p;
362
363  if (card_is_dirty(val))
364    return true;
365
366  if (card_is_newgen(val))
367    return true;
368
369  if (card_is_clean(val))
370    return false;
371
372  assert(false, "Found unhandled card mark type");
373
374  return false;
375}
376
377// Also includes verify_card
378bool CardTableExtension::addr_is_marked_precise(void *addr) {
379  jbyte* p = byte_for(addr);
380  jbyte val = *p;
381
382  if (card_is_newgen(val))
383    return true;
384
385  if (card_is_verify(val))
386    return true;
387
388  if (card_is_clean(val))
389    return false;
390
391  if (card_is_dirty(val))
392    return false;
393
394  assert(false, "Found unhandled card mark type");
395
396  return false;
397}
398
399// Assumes that only the base or the end changes.  This allows indentification
400// of the region that is being resized.  The
401// CardTableModRefBS::resize_covered_region() is used for the normal case
402// where the covered regions are growing or shrinking at the high end.
403// The method resize_covered_region_by_end() is analogous to
404// CardTableModRefBS::resize_covered_region() but
405// for regions that grow or shrink at the low end.
406void CardTableExtension::resize_covered_region(MemRegion new_region) {
407
408  for (int i = 0; i < _cur_covered_regions; i++) {
409    if (_covered[i].start() == new_region.start()) {
410      // Found a covered region with the same start as the
411      // new region.  The region is growing or shrinking
412      // from the start of the region.
413      resize_covered_region_by_start(new_region);
414      return;
415    }
416    if (_covered[i].start() > new_region.start()) {
417      break;
418    }
419  }
420
421  int changed_region = -1;
422  for (int j = 0; j < _cur_covered_regions; j++) {
423    if (_covered[j].end() == new_region.end()) {
424      changed_region = j;
425      // This is a case where the covered region is growing or shrinking
426      // at the start of the region.
427      assert(changed_region != -1, "Don't expect to add a covered region");
428      assert(_covered[changed_region].byte_size() != new_region.byte_size(),
429        "The sizes should be different here");
430      resize_covered_region_by_end(changed_region, new_region);
431      return;
432    }
433  }
434  // This should only be a new covered region (where no existing
435  // covered region matches at the start or the end).
436  assert(_cur_covered_regions < _max_covered_regions,
437    "An existing region should have been found");
438  resize_covered_region_by_start(new_region);
439}
440
441void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
442  CardTableModRefBS::resize_covered_region(new_region);
443  debug_only(verify_guard();)
444}
445
446void CardTableExtension::resize_covered_region_by_end(int changed_region,
447                                                      MemRegion new_region) {
448  assert(SafepointSynchronize::is_at_safepoint(),
449    "Only expect an expansion at the low end at a GC");
450  debug_only(verify_guard();)
451#ifdef ASSERT
452  for (int k = 0; k < _cur_covered_regions; k++) {
453    if (_covered[k].end() == new_region.end()) {
454      assert(changed_region == k, "Changed region is incorrect");
455      break;
456    }
457  }
458#endif
459
460  // Commit new or uncommit old pages, if necessary.
461  if (resize_commit_uncommit(changed_region, new_region)) {
462    // Set the new start of the committed region
463    resize_update_committed_table(changed_region, new_region);
464  }
465
466  // Update card table entries
467  resize_update_card_table_entries(changed_region, new_region);
468
469  // Update the covered region
470  resize_update_covered_table(changed_region, new_region);
471
472  int ind = changed_region;
473  log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
474  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT "  _covered[%d].last(): " INTPTR_FORMAT,
475                ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
476  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
477                ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
478  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
479                p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
480  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
481                p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
482
483  debug_only(verify_guard();)
484}
485
486bool CardTableExtension::resize_commit_uncommit(int changed_region,
487                                                MemRegion new_region) {
488  bool result = false;
489  // Commit new or uncommit old pages, if necessary.
490  MemRegion cur_committed = _committed[changed_region];
491  assert(_covered[changed_region].end() == new_region.end(),
492    "The ends of the regions are expected to match");
493  // Extend the start of this _committed region to
494  // to cover the start of any previous _committed region.
495  // This forms overlapping regions, but never interior regions.
496  HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
497  if (min_prev_start < cur_committed.start()) {
498    // Only really need to set start of "cur_committed" to
499    // the new start (min_prev_start) but assertion checking code
500    // below use cur_committed.end() so make it correct.
501    MemRegion new_committed =
502        MemRegion(min_prev_start, cur_committed.end());
503    cur_committed = new_committed;
504  }
505#ifdef ASSERT
506  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
507  assert(cur_committed.start() ==
508    (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
509                              os::vm_page_size()),
510    "Starts should have proper alignment");
511#endif
512
513  jbyte* new_start = byte_for(new_region.start());
514  // Round down because this is for the start address
515  HeapWord* new_start_aligned =
516    (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
517  // The guard page is always committed and should not be committed over.
518  // This method is used in cases where the generation is growing toward
519  // lower addresses but the guard region is still at the end of the
520  // card table.  That still makes sense when looking for writes
521  // off the end of the card table.
522  if (new_start_aligned < cur_committed.start()) {
523    // Expand the committed region
524    //
525    // Case A
526    //                                          |+ guard +|
527    //                          |+ cur committed +++++++++|
528    //                  |+ new committed +++++++++++++++++|
529    //
530    // Case B
531    //                                          |+ guard +|
532    //                        |+ cur committed +|
533    //                  |+ new committed +++++++|
534    //
535    // These are not expected because the calculation of the
536    // cur committed region and the new committed region
537    // share the same end for the covered region.
538    // Case C
539    //                                          |+ guard +|
540    //                        |+ cur committed +|
541    //                  |+ new committed +++++++++++++++++|
542    // Case D
543    //                                          |+ guard +|
544    //                        |+ cur committed +++++++++++|
545    //                  |+ new committed +++++++|
546
547    HeapWord* new_end_for_commit =
548      MIN2(cur_committed.end(), _guard_region.start());
549    if(new_start_aligned < new_end_for_commit) {
550      MemRegion new_committed =
551        MemRegion(new_start_aligned, new_end_for_commit);
552      os::commit_memory_or_exit((char*)new_committed.start(),
553                                new_committed.byte_size(), !ExecMem,
554                                "card table expansion");
555    }
556    result = true;
557  } else if (new_start_aligned > cur_committed.start()) {
558    // Shrink the committed region
559#if 0 // uncommitting space is currently unsafe because of the interactions
560      // of growing and shrinking regions.  One region A can uncommit space
561      // that it owns but which is being used by another region B (maybe).
562      // Region B has not committed the space because it was already
563      // committed by region A.
564    MemRegion uncommit_region = committed_unique_to_self(changed_region,
565      MemRegion(cur_committed.start(), new_start_aligned));
566    if (!uncommit_region.is_empty()) {
567      if (!os::uncommit_memory((char*)uncommit_region.start(),
568                               uncommit_region.byte_size())) {
569        // If the uncommit fails, ignore it.  Let the
570        // committed table resizing go even though the committed
571        // table will over state the committed space.
572      }
573    }
574#else
575    assert(!result, "Should be false with current workaround");
576#endif
577  }
578  assert(_committed[changed_region].end() == cur_committed.end(),
579    "end should not change");
580  return result;
581}
582
583void CardTableExtension::resize_update_committed_table(int changed_region,
584                                                       MemRegion new_region) {
585
586  jbyte* new_start = byte_for(new_region.start());
587  // Set the new start of the committed region
588  HeapWord* new_start_aligned =
589    (HeapWord*)align_size_down((uintptr_t)new_start,
590                             os::vm_page_size());
591  MemRegion new_committed = MemRegion(new_start_aligned,
592    _committed[changed_region].end());
593  _committed[changed_region] = new_committed;
594  _committed[changed_region].set_start(new_start_aligned);
595}
596
597void CardTableExtension::resize_update_card_table_entries(int changed_region,
598                                                          MemRegion new_region) {
599  debug_only(verify_guard();)
600  MemRegion original_covered = _covered[changed_region];
601  // Initialize the card entries.  Only consider the
602  // region covered by the card table (_whole_heap)
603  jbyte* entry;
604  if (new_region.start() < _whole_heap.start()) {
605    entry = byte_for(_whole_heap.start());
606  } else {
607    entry = byte_for(new_region.start());
608  }
609  jbyte* end = byte_for(original_covered.start());
610  // If _whole_heap starts at the original covered regions start,
611  // this loop will not execute.
612  while (entry < end) { *entry++ = clean_card; }
613}
614
615void CardTableExtension::resize_update_covered_table(int changed_region,
616                                                     MemRegion new_region) {
617  // Update the covered region
618  _covered[changed_region].set_start(new_region.start());
619  _covered[changed_region].set_word_size(new_region.word_size());
620
621  // reorder regions.  There should only be at most 1 out
622  // of order.
623  for (int i = _cur_covered_regions-1 ; i > 0; i--) {
624    if (_covered[i].start() < _covered[i-1].start()) {
625        MemRegion covered_mr = _covered[i-1];
626        _covered[i-1] = _covered[i];
627        _covered[i] = covered_mr;
628        MemRegion committed_mr = _committed[i-1];
629      _committed[i-1] = _committed[i];
630      _committed[i] = committed_mr;
631      break;
632    }
633  }
634#ifdef ASSERT
635  for (int m = 0; m < _cur_covered_regions-1; m++) {
636    assert(_covered[m].start() <= _covered[m+1].start(),
637      "Covered regions out of order");
638    assert(_committed[m].start() <= _committed[m+1].start(),
639      "Committed regions out of order");
640  }
641#endif
642}
643
644// Returns the start of any committed region that is lower than
645// the target committed region (index ind) and that intersects the
646// target region.  If none, return start of target region.
647//
648//      -------------
649//      |           |
650//      -------------
651//              ------------
652//              | target   |
653//              ------------
654//                               -------------
655//                               |           |
656//                               -------------
657//      ^ returns this
658//
659//      -------------
660//      |           |
661//      -------------
662//                      ------------
663//                      | target   |
664//                      ------------
665//                               -------------
666//                               |           |
667//                               -------------
668//                      ^ returns this
669
670HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
671  assert(_cur_covered_regions >= 0, "Expecting at least on region");
672  HeapWord* min_start = _committed[ind].start();
673  for (int j = 0; j < ind; j++) {
674    HeapWord* this_start = _committed[j].start();
675    if ((this_start < min_start) &&
676        !(_committed[j].intersection(_committed[ind])).is_empty()) {
677       min_start = this_start;
678    }
679  }
680  return min_start;
681}
682