1/*
2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/concurrentMarkThread.hpp"
27#include "gc/g1/g1Allocator.inline.hpp"
28#include "gc/g1/g1CollectedHeap.hpp"
29#include "gc/g1/g1CollectedHeap.inline.hpp"
30#include "gc/g1/g1HeapVerifier.hpp"
31#include "gc/g1/g1Policy.hpp"
32#include "gc/g1/g1RemSet.hpp"
33#include "gc/g1/g1RootProcessor.hpp"
34#include "gc/g1/heapRegion.hpp"
35#include "gc/g1/heapRegion.inline.hpp"
36#include "gc/g1/heapRegionRemSet.hpp"
37#include "gc/g1/g1StringDedup.hpp"
38#include "logging/log.hpp"
39#include "logging/logStream.hpp"
40#include "memory/resourceArea.hpp"
41#include "oops/oop.inline.hpp"
42
43class VerifyRootsClosure: public OopClosure {
44private:
45  G1CollectedHeap* _g1h;
46  VerifyOption     _vo;
47  bool             _failures;
48public:
49  // _vo == UsePrevMarking -> use "prev" marking information,
50  // _vo == UseNextMarking -> use "next" marking information,
51  // _vo == UseMarkWord    -> use mark word from object header.
52  VerifyRootsClosure(VerifyOption vo) :
53    _g1h(G1CollectedHeap::heap()),
54    _vo(vo),
55    _failures(false) { }
56
57  bool failures() { return _failures; }
58
59  template <class T> void do_oop_nv(T* p) {
60    T heap_oop = oopDesc::load_heap_oop(p);
61    if (!oopDesc::is_null(heap_oop)) {
62      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
63      if (_g1h->is_obj_dead_cond(obj, _vo)) {
64        Log(gc, verify) log;
65        log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
66        if (_vo == VerifyOption_G1UseMarkWord) {
67          log.error("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
68        }
69        ResourceMark rm;
70        LogStream ls(log.error());
71        obj->print_on(&ls);
72        _failures = true;
73      }
74    }
75  }
76
77  void do_oop(oop* p)       { do_oop_nv(p); }
78  void do_oop(narrowOop* p) { do_oop_nv(p); }
79};
80
81class G1VerifyCodeRootOopClosure: public OopClosure {
82  G1CollectedHeap* _g1h;
83  OopClosure* _root_cl;
84  nmethod* _nm;
85  VerifyOption _vo;
86  bool _failures;
87
88  template <class T> void do_oop_work(T* p) {
89    // First verify that this root is live
90    _root_cl->do_oop(p);
91
92    if (!G1VerifyHeapRegionCodeRoots) {
93      // We're not verifying the code roots attached to heap region.
94      return;
95    }
96
97    // Don't check the code roots during marking verification in a full GC
98    if (_vo == VerifyOption_G1UseMarkWord) {
99      return;
100    }
101
102    // Now verify that the current nmethod (which contains p) is
103    // in the code root list of the heap region containing the
104    // object referenced by p.
105
106    T heap_oop = oopDesc::load_heap_oop(p);
107    if (!oopDesc::is_null(heap_oop)) {
108      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
109
110      // Now fetch the region containing the object
111      HeapRegion* hr = _g1h->heap_region_containing(obj);
112      HeapRegionRemSet* hrrs = hr->rem_set();
113      // Verify that the strong code root list for this region
114      // contains the nmethod
115      if (!hrrs->strong_code_roots_list_contains(_nm)) {
116        log_error(gc, verify)("Code root location " PTR_FORMAT " "
117                              "from nmethod " PTR_FORMAT " not in strong "
118                              "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
119                              p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
120        _failures = true;
121      }
122    }
123  }
124
125public:
126  G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
127    _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
128
129  void do_oop(oop* p) { do_oop_work(p); }
130  void do_oop(narrowOop* p) { do_oop_work(p); }
131
132  void set_nmethod(nmethod* nm) { _nm = nm; }
133  bool failures() { return _failures; }
134};
135
136class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
137  G1VerifyCodeRootOopClosure* _oop_cl;
138
139public:
140  G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
141    _oop_cl(oop_cl) {}
142
143  void do_code_blob(CodeBlob* cb) {
144    nmethod* nm = cb->as_nmethod_or_null();
145    if (nm != NULL) {
146      _oop_cl->set_nmethod(nm);
147      nm->oops_do(_oop_cl);
148    }
149  }
150};
151
152class YoungRefCounterClosure : public OopClosure {
153  G1CollectedHeap* _g1h;
154  int              _count;
155 public:
156  YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
157  void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
158  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
159
160  int count() { return _count; }
161  void reset_count() { _count = 0; };
162};
163
164class VerifyKlassClosure: public KlassClosure {
165  YoungRefCounterClosure _young_ref_counter_closure;
166  OopClosure *_oop_closure;
167 public:
168  VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
169  void do_klass(Klass* k) {
170    k->oops_do(_oop_closure);
171
172    _young_ref_counter_closure.reset_count();
173    k->oops_do(&_young_ref_counter_closure);
174    if (_young_ref_counter_closure.count() > 0) {
175      guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
176    }
177  }
178};
179
180class VerifyLivenessOopClosure: public OopClosure {
181  G1CollectedHeap* _g1h;
182  VerifyOption _vo;
183public:
184  VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
185    _g1h(g1h), _vo(vo)
186  { }
187  void do_oop(narrowOop *p) { do_oop_work(p); }
188  void do_oop(      oop *p) { do_oop_work(p); }
189
190  template <class T> void do_oop_work(T *p) {
191    oop obj = oopDesc::load_decode_heap_oop(p);
192    guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
193              "Dead object referenced by a not dead object");
194  }
195};
196
197class VerifyObjsInRegionClosure: public ObjectClosure {
198private:
199  G1CollectedHeap* _g1h;
200  size_t _live_bytes;
201  HeapRegion *_hr;
202  VerifyOption _vo;
203public:
204  // _vo == UsePrevMarking -> use "prev" marking information,
205  // _vo == UseNextMarking -> use "next" marking information,
206  // _vo == UseMarkWord    -> use mark word from object header.
207  VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
208    : _live_bytes(0), _hr(hr), _vo(vo) {
209    _g1h = G1CollectedHeap::heap();
210  }
211  void do_object(oop o) {
212    VerifyLivenessOopClosure isLive(_g1h, _vo);
213    assert(o != NULL, "Huh?");
214    if (!_g1h->is_obj_dead_cond(o, _vo)) {
215      // If the object is alive according to the mark word,
216      // then verify that the marking information agrees.
217      // Note we can't verify the contra-positive of the
218      // above: if the object is dead (according to the mark
219      // word), it may not be marked, or may have been marked
220      // but has since became dead, or may have been allocated
221      // since the last marking.
222      if (_vo == VerifyOption_G1UseMarkWord) {
223        guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
224      }
225
226      o->oop_iterate_no_header(&isLive);
227      if (!_hr->obj_allocated_since_prev_marking(o)) {
228        size_t obj_size = o->size();    // Make sure we don't overflow
229        _live_bytes += (obj_size * HeapWordSize);
230      }
231    }
232  }
233  size_t live_bytes() { return _live_bytes; }
234};
235
236class VerifyArchiveOopClosure: public OopClosure {
237  HeapRegion* _hr;
238public:
239  VerifyArchiveOopClosure(HeapRegion *hr)
240    : _hr(hr) { }
241  void do_oop(narrowOop *p) { do_oop_work(p); }
242  void do_oop(      oop *p) { do_oop_work(p); }
243
244  template <class T> void do_oop_work(T *p) {
245    oop obj = oopDesc::load_decode_heap_oop(p);
246
247    if (_hr->is_open_archive()) {
248      guarantee(obj == NULL || G1ArchiveAllocator::is_archive_object(obj),
249                "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
250                p2i(p), p2i(obj));
251    } else {
252      assert(_hr->is_closed_archive(), "should be closed archive region");
253      guarantee(obj == NULL || G1ArchiveAllocator::is_closed_archive_object(obj),
254                "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
255                p2i(p), p2i(obj));
256    }
257  }
258};
259
260class VerifyObjectInArchiveRegionClosure: public ObjectClosure {
261  HeapRegion* _hr;
262public:
263  VerifyObjectInArchiveRegionClosure(HeapRegion *hr, bool verbose)
264    : _hr(hr) { }
265  // Verify that all object pointers are to archive regions.
266  void do_object(oop o) {
267    VerifyArchiveOopClosure checkOop(_hr);
268    assert(o != NULL, "Should not be here for NULL oops");
269    o->oop_iterate_no_header(&checkOop);
270  }
271};
272
273// Should be only used at CDS dump time
274class VerifyArchivePointerRegionClosure: public HeapRegionClosure {
275private:
276  G1CollectedHeap* _g1h;
277public:
278  VerifyArchivePointerRegionClosure(G1CollectedHeap* g1h) { }
279  virtual bool doHeapRegion(HeapRegion* r) {
280   if (r->is_archive()) {
281      VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
282      r->object_iterate(&verify_oop_pointers);
283    }
284    return false;
285  }
286};
287
288void G1HeapVerifier::verify_archive_regions() {
289  G1CollectedHeap*  g1h = G1CollectedHeap::heap();
290  VerifyArchivePointerRegionClosure cl(NULL);
291  g1h->heap_region_iterate(&cl);
292}
293
294class VerifyRegionClosure: public HeapRegionClosure {
295private:
296  bool             _par;
297  VerifyOption     _vo;
298  bool             _failures;
299public:
300  // _vo == UsePrevMarking -> use "prev" marking information,
301  // _vo == UseNextMarking -> use "next" marking information,
302  // _vo == UseMarkWord    -> use mark word from object header.
303  VerifyRegionClosure(bool par, VerifyOption vo)
304    : _par(par),
305      _vo(vo),
306      _failures(false) {}
307
308  bool failures() {
309    return _failures;
310  }
311
312  bool doHeapRegion(HeapRegion* r) {
313    // For archive regions, verify there are no heap pointers to
314    // non-pinned regions. For all others, verify liveness info.
315    if (r->is_closed_archive()) {
316      VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
317      r->object_iterate(&verify_oop_pointers);
318      return true;
319    } else if (r->is_open_archive()) {
320      VerifyObjsInRegionClosure verify_open_archive_oop(r, _vo);
321      r->object_iterate(&verify_open_archive_oop);
322      return true;
323    } else if (!r->is_continues_humongous()) {
324      bool failures = false;
325      r->verify(_vo, &failures);
326      if (failures) {
327        _failures = true;
328      } else if (!r->is_starts_humongous()) {
329        VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
330        r->object_iterate(&not_dead_yet_cl);
331        if (_vo != VerifyOption_G1UseNextMarking) {
332          if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
333            log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
334                                  p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
335            _failures = true;
336          }
337        } else {
338          // When vo == UseNextMarking we cannot currently do a sanity
339          // check on the live bytes as the calculation has not been
340          // finalized yet.
341        }
342      }
343    }
344    return false; // stop the region iteration if we hit a failure
345  }
346};
347
348// This is the task used for parallel verification of the heap regions
349
350class G1ParVerifyTask: public AbstractGangTask {
351private:
352  G1CollectedHeap*  _g1h;
353  VerifyOption      _vo;
354  bool              _failures;
355  HeapRegionClaimer _hrclaimer;
356
357public:
358  // _vo == UsePrevMarking -> use "prev" marking information,
359  // _vo == UseNextMarking -> use "next" marking information,
360  // _vo == UseMarkWord    -> use mark word from object header.
361  G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
362      AbstractGangTask("Parallel verify task"),
363      _g1h(g1h),
364      _vo(vo),
365      _failures(false),
366      _hrclaimer(g1h->workers()->active_workers()) {}
367
368  bool failures() {
369    return _failures;
370  }
371
372  void work(uint worker_id) {
373    HandleMark hm;
374    VerifyRegionClosure blk(true, _vo);
375    _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
376    if (blk.failures()) {
377      _failures = true;
378    }
379  }
380};
381
382
383void G1HeapVerifier::verify(VerifyOption vo) {
384  if (!SafepointSynchronize::is_at_safepoint()) {
385    log_info(gc, verify)("Skipping verification. Not at safepoint.");
386  }
387
388  assert(Thread::current()->is_VM_thread(),
389         "Expected to be executed serially by the VM thread at this point");
390
391  log_debug(gc, verify)("Roots");
392  VerifyRootsClosure rootsCl(vo);
393  VerifyKlassClosure klassCl(_g1h, &rootsCl);
394  CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
395
396  // We apply the relevant closures to all the oops in the
397  // system dictionary, class loader data graph, the string table
398  // and the nmethods in the code cache.
399  G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo);
400  G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
401
402  {
403    G1RootProcessor root_processor(_g1h, 1);
404    root_processor.process_all_roots(&rootsCl,
405                                     &cldCl,
406                                     &blobsCl);
407  }
408
409  bool failures = rootsCl.failures() || codeRootsCl.failures();
410
411  if (vo != VerifyOption_G1UseMarkWord) {
412    // If we're verifying during a full GC then the region sets
413    // will have been torn down at the start of the GC. Therefore
414    // verifying the region sets will fail. So we only verify
415    // the region sets when not in a full GC.
416    log_debug(gc, verify)("HeapRegionSets");
417    verify_region_sets();
418  }
419
420  log_debug(gc, verify)("HeapRegions");
421  if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
422
423    G1ParVerifyTask task(_g1h, vo);
424    _g1h->workers()->run_task(&task);
425    if (task.failures()) {
426      failures = true;
427    }
428
429  } else {
430    VerifyRegionClosure blk(false, vo);
431    _g1h->heap_region_iterate(&blk);
432    if (blk.failures()) {
433      failures = true;
434    }
435  }
436
437  if (G1StringDedup::is_enabled()) {
438    log_debug(gc, verify)("StrDedup");
439    G1StringDedup::verify();
440  }
441
442  if (failures) {
443    log_error(gc, verify)("Heap after failed verification:");
444    // It helps to have the per-region information in the output to
445    // help us track down what went wrong. This is why we call
446    // print_extended_on() instead of print_on().
447    Log(gc, verify) log;
448    ResourceMark rm;
449    LogStream ls(log.error());
450    _g1h->print_extended_on(&ls);
451  }
452  guarantee(!failures, "there should not have been any failures");
453}
454
455// Heap region set verification
456
457class VerifyRegionListsClosure : public HeapRegionClosure {
458private:
459  HeapRegionSet*   _old_set;
460  HeapRegionSet*   _humongous_set;
461  HeapRegionManager*   _hrm;
462
463public:
464  uint _old_count;
465  uint _humongous_count;
466  uint _free_count;
467
468  VerifyRegionListsClosure(HeapRegionSet* old_set,
469                           HeapRegionSet* humongous_set,
470                           HeapRegionManager* hrm) :
471    _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
472    _old_count(), _humongous_count(), _free_count(){ }
473
474  bool doHeapRegion(HeapRegion* hr) {
475    if (hr->is_young()) {
476      // TODO
477    } else if (hr->is_humongous()) {
478      assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
479      _humongous_count++;
480    } else if (hr->is_empty()) {
481      assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
482      _free_count++;
483    } else if (hr->is_old()) {
484      assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
485      _old_count++;
486    } else {
487      // There are no other valid region types. Check for one invalid
488      // one we can identify: pinned without old or humongous set.
489      assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
490      ShouldNotReachHere();
491    }
492    return false;
493  }
494
495  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
496    guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
497    guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
498    guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
499  }
500};
501
502void G1HeapVerifier::verify_region_sets() {
503  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
504
505  // First, check the explicit lists.
506  _g1h->_hrm.verify();
507  {
508    // Given that a concurrent operation might be adding regions to
509    // the secondary free list we have to take the lock before
510    // verifying it.
511    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
512    _g1h->_secondary_free_list.verify_list();
513  }
514
515  // If a concurrent region freeing operation is in progress it will
516  // be difficult to correctly attributed any free regions we come
517  // across to the correct free list given that they might belong to
518  // one of several (free_list, secondary_free_list, any local lists,
519  // etc.). So, if that's the case we will skip the rest of the
520  // verification operation. Alternatively, waiting for the concurrent
521  // operation to complete will have a non-trivial effect on the GC's
522  // operation (no concurrent operation will last longer than the
523  // interval between two calls to verification) and it might hide
524  // any issues that we would like to catch during testing.
525  if (_g1h->free_regions_coming()) {
526    return;
527  }
528
529  // Make sure we append the secondary_free_list on the free_list so
530  // that all free regions we will come across can be safely
531  // attributed to the free_list.
532  _g1h->append_secondary_free_list_if_not_empty_with_lock();
533
534  // Finally, make sure that the region accounting in the lists is
535  // consistent with what we see in the heap.
536
537  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
538  _g1h->heap_region_iterate(&cl);
539  cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
540}
541
542void G1HeapVerifier::prepare_for_verify() {
543  if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
544    _g1h->ensure_parsability(false);
545  }
546}
547
548double G1HeapVerifier::verify(bool guard, const char* msg) {
549  double verify_time_ms = 0.0;
550
551  if (guard && _g1h->total_collections() >= VerifyGCStartAt) {
552    double verify_start = os::elapsedTime();
553    HandleMark hm;  // Discard invalid handles created during verification
554    prepare_for_verify();
555    Universe::verify(VerifyOption_G1UsePrevMarking, msg);
556    verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
557  }
558
559  return verify_time_ms;
560}
561
562void G1HeapVerifier::verify_before_gc() {
563  double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
564  _g1h->g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
565}
566
567void G1HeapVerifier::verify_after_gc() {
568  double verify_time_ms = verify(VerifyAfterGC, "After GC");
569  _g1h->g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
570}
571
572
573#ifndef PRODUCT
574class G1VerifyCardTableCleanup: public HeapRegionClosure {
575  G1HeapVerifier* _verifier;
576  G1SATBCardTableModRefBS* _ct_bs;
577public:
578  G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
579    : _verifier(verifier), _ct_bs(ct_bs) { }
580  virtual bool doHeapRegion(HeapRegion* r) {
581    if (r->is_survivor()) {
582      _verifier->verify_dirty_region(r);
583    } else {
584      _verifier->verify_not_dirty_region(r);
585    }
586    return false;
587  }
588};
589
590void G1HeapVerifier::verify_card_table_cleanup() {
591  if (G1VerifyCTCleanup || VerifyAfterGC) {
592    G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
593    _g1h->heap_region_iterate(&cleanup_verifier);
594  }
595}
596
597void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
598  // All of the region should be clean.
599  G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
600  MemRegion mr(hr->bottom(), hr->end());
601  ct_bs->verify_not_dirty_region(mr);
602}
603
604void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
605  // We cannot guarantee that [bottom(),end()] is dirty.  Threads
606  // dirty allocated blocks as they allocate them. The thread that
607  // retires each region and replaces it with a new one will do a
608  // maximal allocation to fill in [pre_dummy_top(),end()] but will
609  // not dirty that area (one less thing to have to do while holding
610  // a lock). So we can only verify that [bottom(),pre_dummy_top()]
611  // is dirty.
612  G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
613  MemRegion mr(hr->bottom(), hr->pre_dummy_top());
614  if (hr->is_young()) {
615    ct_bs->verify_g1_young_region(mr);
616  } else {
617    ct_bs->verify_dirty_region(mr);
618  }
619}
620
621class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
622private:
623  G1HeapVerifier* _verifier;
624public:
625  G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
626  virtual bool doHeapRegion(HeapRegion* r) {
627    _verifier->verify_dirty_region(r);
628    return false;
629  }
630};
631
632void G1HeapVerifier::verify_dirty_young_regions() {
633  G1VerifyDirtyYoungListClosure cl(this);
634  _g1h->collection_set()->iterate(&cl);
635}
636
637bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, const G1CMBitMap* const bitmap,
638                                               HeapWord* tams, HeapWord* end) {
639  guarantee(tams <= end,
640            "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
641  HeapWord* result = bitmap->get_next_marked_addr(tams, end);
642  if (result < end) {
643    log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
644    log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
645    return false;
646  }
647  return true;
648}
649
650bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
651  const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prevMarkBitMap();
652  const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->nextMarkBitMap();
653
654  HeapWord* ptams  = hr->prev_top_at_mark_start();
655  HeapWord* ntams  = hr->next_top_at_mark_start();
656  HeapWord* end    = hr->end();
657
658  bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
659
660  bool res_n = true;
661  // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
662  // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
663  // if we happen to be in that state.
664  if (_g1h->collector_state()->mark_in_progress() || !_g1h->_cmThread->in_progress()) {
665    res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
666  }
667  if (!res_p || !res_n) {
668    log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
669    log_error(gc, verify)("#### Caller: %s", caller);
670    return false;
671  }
672  return true;
673}
674
675void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
676  if (!G1VerifyBitmaps) return;
677
678  guarantee(verify_bitmaps(caller, hr), "bitmap verification");
679}
680
681class G1VerifyBitmapClosure : public HeapRegionClosure {
682private:
683  const char* _caller;
684  G1HeapVerifier* _verifier;
685  bool _failures;
686
687public:
688  G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) :
689    _caller(caller), _verifier(verifier), _failures(false) { }
690
691  bool failures() { return _failures; }
692
693  virtual bool doHeapRegion(HeapRegion* hr) {
694    bool result = _verifier->verify_bitmaps(_caller, hr);
695    if (!result) {
696      _failures = true;
697    }
698    return false;
699  }
700};
701
702void G1HeapVerifier::check_bitmaps(const char* caller) {
703  if (!G1VerifyBitmaps) return;
704
705  G1VerifyBitmapClosure cl(caller, this);
706  _g1h->heap_region_iterate(&cl);
707  guarantee(!cl.failures(), "bitmap verification");
708}
709
710class G1CheckCSetFastTableClosure : public HeapRegionClosure {
711 private:
712  bool _failures;
713 public:
714  G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
715
716  virtual bool doHeapRegion(HeapRegion* hr) {
717    uint i = hr->hrm_index();
718    InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
719    if (hr->is_humongous()) {
720      if (hr->in_collection_set()) {
721        log_error(gc, verify)("## humongous region %u in CSet", i);
722        _failures = true;
723        return true;
724      }
725      if (cset_state.is_in_cset()) {
726        log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
727        _failures = true;
728        return true;
729      }
730      if (hr->is_continues_humongous() && cset_state.is_humongous()) {
731        log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
732        _failures = true;
733        return true;
734      }
735    } else {
736      if (cset_state.is_humongous()) {
737        log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
738        _failures = true;
739        return true;
740      }
741      if (hr->in_collection_set() != cset_state.is_in_cset()) {
742        log_error(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
743                             hr->in_collection_set(), cset_state.value(), i);
744        _failures = true;
745        return true;
746      }
747      if (cset_state.is_in_cset()) {
748        if (hr->is_young() != (cset_state.is_young())) {
749          log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
750                               hr->is_young(), cset_state.value(), i);
751          _failures = true;
752          return true;
753        }
754        if (hr->is_old() != (cset_state.is_old())) {
755          log_error(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
756                               hr->is_old(), cset_state.value(), i);
757          _failures = true;
758          return true;
759        }
760      }
761    }
762    return false;
763  }
764
765  bool failures() const { return _failures; }
766};
767
768bool G1HeapVerifier::check_cset_fast_test() {
769  G1CheckCSetFastTableClosure cl;
770  _g1h->_hrm.iterate(&cl);
771  return !cl.failures();
772}
773#endif // PRODUCT
774