collectedHeap.cpp revision 9727:f944761a3ce3
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "gc/shared/allocTracer.hpp"
28#include "gc/shared/barrierSet.inline.hpp"
29#include "gc/shared/collectedHeap.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "gc/shared/gcHeapSummary.hpp"
32#include "gc/shared/gcTrace.hpp"
33#include "gc/shared/gcTraceTime.inline.hpp"
34#include "gc/shared/gcWhen.hpp"
35#include "gc/shared/vmGCOperations.hpp"
36#include "logging/log.hpp"
37#include "memory/metaspace.hpp"
38#include "oops/instanceMirrorKlass.hpp"
39#include "oops/oop.inline.hpp"
40#include "runtime/init.hpp"
41#include "runtime/thread.inline.hpp"
42#include "services/heapDumper.hpp"
43
44
45#ifdef ASSERT
46int CollectedHeap::_fire_out_of_memory_count = 0;
47#endif
48
49size_t CollectedHeap::_filler_array_max_size = 0;
50
51template <>
52void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
53  st->print_cr("GC heap %s", m.is_before ? "before" : "after");
54  st->print_raw(m);
55}
56
57void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
58  if (!should_log()) {
59    return;
60  }
61
62  double timestamp = fetch_timestamp();
63  MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
64  int index = compute_log_index();
65  _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
66  _records[index].timestamp = timestamp;
67  _records[index].data.is_before = before;
68  stringStream st(_records[index].data.buffer(), _records[index].data.size());
69
70  st.print_cr("{Heap %s GC invocations=%u (full %u):",
71                 before ? "before" : "after",
72                 heap->total_collections(),
73                 heap->total_full_collections());
74
75  heap->print_on(&st);
76  st.print_cr("}");
77}
78
79VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
80  size_t capacity_in_words = capacity() / HeapWordSize;
81
82  return VirtualSpaceSummary(
83    reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
84}
85
86GCHeapSummary CollectedHeap::create_heap_summary() {
87  VirtualSpaceSummary heap_space = create_heap_space_summary();
88  return GCHeapSummary(heap_space, used());
89}
90
91MetaspaceSummary CollectedHeap::create_metaspace_summary() {
92  const MetaspaceSizes meta_space(
93      MetaspaceAux::committed_bytes(),
94      MetaspaceAux::used_bytes(),
95      MetaspaceAux::reserved_bytes());
96  const MetaspaceSizes data_space(
97      MetaspaceAux::committed_bytes(Metaspace::NonClassType),
98      MetaspaceAux::used_bytes(Metaspace::NonClassType),
99      MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
100  const MetaspaceSizes class_space(
101      MetaspaceAux::committed_bytes(Metaspace::ClassType),
102      MetaspaceAux::used_bytes(Metaspace::ClassType),
103      MetaspaceAux::reserved_bytes(Metaspace::ClassType));
104
105  const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
106    MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
107  const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
108    MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
109
110  return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
111                          ms_chunk_free_list_summary, class_chunk_free_list_summary);
112}
113
114void CollectedHeap::print_heap_before_gc() {
115  Universe::print_heap_before_gc();
116  if (_gc_heap_log != NULL) {
117    _gc_heap_log->log_heap_before(this);
118  }
119}
120
121void CollectedHeap::print_heap_after_gc() {
122  Universe::print_heap_after_gc();
123  if (_gc_heap_log != NULL) {
124    _gc_heap_log->log_heap_after(this);
125  }
126}
127
128void CollectedHeap::print_on_error(outputStream* st) const {
129  st->print_cr("Heap:");
130  print_extended_on(st);
131  st->cr();
132
133  _barrier_set->print_on(st);
134}
135
136void CollectedHeap::register_nmethod(nmethod* nm) {
137  assert_locked_or_safepoint(CodeCache_lock);
138}
139
140void CollectedHeap::unregister_nmethod(nmethod* nm) {
141  assert_locked_or_safepoint(CodeCache_lock);
142}
143
144void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
145  const GCHeapSummary& heap_summary = create_heap_summary();
146  gc_tracer->report_gc_heap_summary(when, heap_summary);
147
148  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
149  gc_tracer->report_metaspace_summary(when, metaspace_summary);
150}
151
152void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
153  trace_heap(GCWhen::BeforeGC, gc_tracer);
154}
155
156void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
157  trace_heap(GCWhen::AfterGC, gc_tracer);
158}
159
160// Memory state functions.
161
162
163CollectedHeap::CollectedHeap() :
164  _barrier_set(NULL),
165  _is_gc_active(false),
166  _total_collections(0),
167  _total_full_collections(0),
168  _gc_cause(GCCause::_no_gc),
169  _gc_lastcause(GCCause::_no_gc),
170  _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
171{
172  const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
173  const size_t elements_per_word = HeapWordSize / sizeof(jint);
174  _filler_array_max_size = align_object_size(filler_array_hdr_size() +
175                                             max_len / elements_per_word);
176
177  NOT_PRODUCT(_promotion_failure_alot_count = 0;)
178  NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
179
180  if (UsePerfData) {
181    EXCEPTION_MARK;
182
183    // create the gc cause jvmstat counters
184    _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
185                             80, GCCause::to_string(_gc_cause), CHECK);
186
187    _perf_gc_lastcause =
188                PerfDataManager::create_string_variable(SUN_GC, "lastCause",
189                             80, GCCause::to_string(_gc_lastcause), CHECK);
190  }
191
192  // Create the ring log
193  if (LogEvents) {
194    _gc_heap_log = new GCHeapLog();
195  } else {
196    _gc_heap_log = NULL;
197  }
198}
199
200// This interface assumes that it's being called by the
201// vm thread. It collects the heap assuming that the
202// heap lock is already held and that we are executing in
203// the context of the vm thread.
204void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
205  assert(Thread::current()->is_VM_thread(), "Precondition#1");
206  assert(Heap_lock->is_locked(), "Precondition#2");
207  GCCauseSetter gcs(this, cause);
208  switch (cause) {
209    case GCCause::_heap_inspection:
210    case GCCause::_heap_dump:
211    case GCCause::_metadata_GC_threshold : {
212      HandleMark hm;
213      do_full_collection(false);        // don't clear all soft refs
214      break;
215    }
216    case GCCause::_last_ditch_collection: {
217      HandleMark hm;
218      do_full_collection(true);         // do clear all soft refs
219      break;
220    }
221    default:
222      ShouldNotReachHere(); // Unexpected use of this function
223  }
224}
225
226void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
227  _barrier_set = barrier_set;
228  oopDesc::set_bs(_barrier_set);
229}
230
231void CollectedHeap::pre_initialize() {
232  // Used for ReduceInitialCardMarks (when COMPILER2 is used);
233  // otherwise remains unused.
234#if defined(COMPILER2) || INCLUDE_JVMCI
235  _defer_initial_card_mark =    ReduceInitialCardMarks && can_elide_tlab_store_barriers()
236                             && (DeferInitialCardMark || card_mark_must_follow_store());
237#else
238  assert(_defer_initial_card_mark == false, "Who would set it?");
239#endif
240}
241
242#ifndef PRODUCT
243void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
244  if (CheckMemoryInitialization && ZapUnusedHeapArea) {
245    for (size_t slot = 0; slot < size; slot += 1) {
246      assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
247             "Found badHeapWordValue in post-allocation check");
248    }
249  }
250}
251
252void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
253  if (CheckMemoryInitialization && ZapUnusedHeapArea) {
254    for (size_t slot = 0; slot < size; slot += 1) {
255      assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
256             "Found non badHeapWordValue in pre-allocation check");
257    }
258  }
259}
260#endif // PRODUCT
261
262#ifdef ASSERT
263void CollectedHeap::check_for_valid_allocation_state() {
264  Thread *thread = Thread::current();
265  // How to choose between a pending exception and a potential
266  // OutOfMemoryError?  Don't allow pending exceptions.
267  // This is a VM policy failure, so how do we exhaustively test it?
268  assert(!thread->has_pending_exception(),
269         "shouldn't be allocating with pending exception");
270  if (StrictSafepointChecks) {
271    assert(thread->allow_allocation(),
272           "Allocation done by thread for which allocation is blocked "
273           "by No_Allocation_Verifier!");
274    // Allocation of an oop can always invoke a safepoint,
275    // hence, the true argument
276    thread->check_for_valid_safepoint_state(true);
277  }
278}
279#endif
280
281HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) {
282
283  // Retain tlab and allocate object in shared space if
284  // the amount free in the tlab is too large to discard.
285  if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
286    thread->tlab().record_slow_allocation(size);
287    return NULL;
288  }
289
290  // Discard tlab and allocate a new one.
291  // To minimize fragmentation, the last TLAB may be smaller than the rest.
292  size_t new_tlab_size = thread->tlab().compute_size(size);
293
294  thread->tlab().clear_before_allocation();
295
296  if (new_tlab_size == 0) {
297    return NULL;
298  }
299
300  // Allocate a new TLAB...
301  HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
302  if (obj == NULL) {
303    return NULL;
304  }
305
306  AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
307
308  if (ZeroTLAB) {
309    // ..and clear it.
310    Copy::zero_to_words(obj, new_tlab_size);
311  } else {
312    // ...and zap just allocated object.
313#ifdef ASSERT
314    // Skip mangling the space corresponding to the object header to
315    // ensure that the returned space is not considered parsable by
316    // any concurrent GC thread.
317    size_t hdr_size = oopDesc::header_size();
318    Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
319#endif // ASSERT
320  }
321  thread->tlab().fill(obj, obj + size, new_tlab_size);
322  return obj;
323}
324
325void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
326  MemRegion deferred = thread->deferred_card_mark();
327  if (!deferred.is_empty()) {
328    assert(_defer_initial_card_mark, "Otherwise should be empty");
329    {
330      // Verify that the storage points to a parsable object in heap
331      DEBUG_ONLY(oop old_obj = oop(deferred.start());)
332      assert(is_in(old_obj), "Not in allocated heap");
333      assert(!can_elide_initializing_store_barrier(old_obj),
334             "Else should have been filtered in new_store_pre_barrier()");
335      assert(old_obj->is_oop(true), "Not an oop");
336      assert(deferred.word_size() == (size_t)(old_obj->size()),
337             "Mismatch: multiple objects?");
338    }
339    BarrierSet* bs = barrier_set();
340    assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
341    bs->write_region(deferred);
342    // "Clear" the deferred_card_mark field
343    thread->set_deferred_card_mark(MemRegion());
344  }
345  assert(thread->deferred_card_mark().is_empty(), "invariant");
346}
347
348size_t CollectedHeap::max_tlab_size() const {
349  // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
350  // This restriction could be removed by enabling filling with multiple arrays.
351  // If we compute that the reasonable way as
352  //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
353  // we'll overflow on the multiply, so we do the divide first.
354  // We actually lose a little by dividing first,
355  // but that just makes the TLAB  somewhat smaller than the biggest array,
356  // which is fine, since we'll be able to fill that.
357  size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
358              sizeof(jint) *
359              ((juint) max_jint / (size_t) HeapWordSize);
360  return align_size_down(max_int_size, MinObjAlignment);
361}
362
363// Helper for ReduceInitialCardMarks. For performance,
364// compiled code may elide card-marks for initializing stores
365// to a newly allocated object along the fast-path. We
366// compensate for such elided card-marks as follows:
367// (a) Generational, non-concurrent collectors, such as
368//     GenCollectedHeap(ParNew,DefNew,Tenured) and
369//     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
370//     need the card-mark if and only if the region is
371//     in the old gen, and do not care if the card-mark
372//     succeeds or precedes the initializing stores themselves,
373//     so long as the card-mark is completed before the next
374//     scavenge. For all these cases, we can do a card mark
375//     at the point at which we do a slow path allocation
376//     in the old gen, i.e. in this call.
377// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
378//     in addition that the card-mark for an old gen allocated
379//     object strictly follow any associated initializing stores.
380//     In these cases, the memRegion remembered below is
381//     used to card-mark the entire region either just before the next
382//     slow-path allocation by this thread or just before the next scavenge or
383//     CMS-associated safepoint, whichever of these events happens first.
384//     (The implicit assumption is that the object has been fully
385//     initialized by this point, a fact that we assert when doing the
386//     card-mark.)
387// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
388//     G1 concurrent marking is in progress an SATB (pre-write-)barrier is
389//     is used to remember the pre-value of any store. Initializing
390//     stores will not need this barrier, so we need not worry about
391//     compensating for the missing pre-barrier here. Turning now
392//     to the post-barrier, we note that G1 needs a RS update barrier
393//     which simply enqueues a (sequence of) dirty cards which may
394//     optionally be refined by the concurrent update threads. Note
395//     that this barrier need only be applied to a non-young write,
396//     but, like in CMS, because of the presence of concurrent refinement
397//     (much like CMS' precleaning), must strictly follow the oop-store.
398//     Thus, using the same protocol for maintaining the intended
399//     invariants turns out, serendepitously, to be the same for both
400//     G1 and CMS.
401//
402// For any future collector, this code should be reexamined with
403// that specific collector in mind, and the documentation above suitably
404// extended and updated.
405oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
406  // If a previous card-mark was deferred, flush it now.
407  flush_deferred_store_barrier(thread);
408  if (can_elide_initializing_store_barrier(new_obj)) {
409    // The deferred_card_mark region should be empty
410    // following the flush above.
411    assert(thread->deferred_card_mark().is_empty(), "Error");
412  } else {
413    MemRegion mr((HeapWord*)new_obj, new_obj->size());
414    assert(!mr.is_empty(), "Error");
415    if (_defer_initial_card_mark) {
416      // Defer the card mark
417      thread->set_deferred_card_mark(mr);
418    } else {
419      // Do the card mark
420      BarrierSet* bs = barrier_set();
421      assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
422      bs->write_region(mr);
423    }
424  }
425  return new_obj;
426}
427
428size_t CollectedHeap::filler_array_hdr_size() {
429  return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
430}
431
432size_t CollectedHeap::filler_array_min_size() {
433  return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
434}
435
436#ifdef ASSERT
437void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
438{
439  assert(words >= min_fill_size(), "too small to fill");
440  assert(words % MinObjAlignment == 0, "unaligned size");
441  assert(Universe::heap()->is_in_reserved(start), "not in heap");
442  assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
443}
444
445void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
446{
447  if (ZapFillerObjects && zap) {
448    Copy::fill_to_words(start + filler_array_hdr_size(),
449                        words - filler_array_hdr_size(), 0XDEAFBABE);
450  }
451}
452#endif // ASSERT
453
454void
455CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
456{
457  assert(words >= filler_array_min_size(), "too small for an array");
458  assert(words <= filler_array_max_size(), "too big for a single object");
459
460  const size_t payload_size = words - filler_array_hdr_size();
461  const size_t len = payload_size * HeapWordSize / sizeof(jint);
462  assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
463
464  // Set the length first for concurrent GC.
465  ((arrayOop)start)->set_length((int)len);
466  post_allocation_setup_common(Universe::intArrayKlassObj(), start);
467  DEBUG_ONLY(zap_filler_array(start, words, zap);)
468}
469
470void
471CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
472{
473  assert(words <= filler_array_max_size(), "too big for a single object");
474
475  if (words >= filler_array_min_size()) {
476    fill_with_array(start, words, zap);
477  } else if (words > 0) {
478    assert(words == min_fill_size(), "unaligned size");
479    post_allocation_setup_common(SystemDictionary::Object_klass(), start);
480  }
481}
482
483void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
484{
485  DEBUG_ONLY(fill_args_check(start, words);)
486  HandleMark hm;  // Free handles before leaving.
487  fill_with_object_impl(start, words, zap);
488}
489
490void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
491{
492  DEBUG_ONLY(fill_args_check(start, words);)
493  HandleMark hm;  // Free handles before leaving.
494
495  // Multiple objects may be required depending on the filler array maximum size. Fill
496  // the range up to that with objects that are filler_array_max_size sized. The
497  // remainder is filled with a single object.
498  const size_t min = min_fill_size();
499  const size_t max = filler_array_max_size();
500  while (words > max) {
501    const size_t cur = (words - max) >= min ? max : max - min;
502    fill_with_array(start, cur, zap);
503    start += cur;
504    words -= cur;
505  }
506
507  fill_with_object_impl(start, words, zap);
508}
509
510void CollectedHeap::post_initialize() {
511  collector_policy()->post_heap_initialize();
512}
513
514HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
515  guarantee(false, "thread-local allocation buffers not supported");
516  return NULL;
517}
518
519void CollectedHeap::ensure_parsability(bool retire_tlabs) {
520  // The second disjunct in the assertion below makes a concession
521  // for the start-up verification done while the VM is being
522  // created. Callers be careful that you know that mutators
523  // aren't going to interfere -- for instance, this is permissible
524  // if we are still single-threaded and have either not yet
525  // started allocating (nothing much to verify) or we have
526  // started allocating but are now a full-fledged JavaThread
527  // (and have thus made our TLAB's) available for filling.
528  assert(SafepointSynchronize::is_at_safepoint() ||
529         !is_init_completed(),
530         "Should only be called at a safepoint or at start-up"
531         " otherwise concurrent mutator activity may make heap "
532         " unparsable again");
533  const bool use_tlab = UseTLAB;
534  const bool deferred = _defer_initial_card_mark;
535  // The main thread starts allocating via a TLAB even before it
536  // has added itself to the threads list at vm boot-up.
537  assert(!use_tlab || Threads::first() != NULL,
538         "Attempt to fill tlabs before main thread has been added"
539         " to threads list is doomed to failure!");
540  for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
541     if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
542#if defined(COMPILER2) || INCLUDE_JVMCI
543     // The deferred store barriers must all have been flushed to the
544     // card-table (or other remembered set structure) before GC starts
545     // processing the card-table (or other remembered set).
546     if (deferred) flush_deferred_store_barrier(thread);
547#else
548     assert(!deferred, "Should be false");
549     assert(thread->deferred_card_mark().is_empty(), "Should be empty");
550#endif
551  }
552}
553
554void CollectedHeap::accumulate_statistics_all_tlabs() {
555  if (UseTLAB) {
556    assert(SafepointSynchronize::is_at_safepoint() ||
557         !is_init_completed(),
558         "should only accumulate statistics on tlabs at safepoint");
559
560    ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
561  }
562}
563
564void CollectedHeap::resize_all_tlabs() {
565  if (UseTLAB) {
566    assert(SafepointSynchronize::is_at_safepoint() ||
567         !is_init_completed(),
568         "should only resize tlabs at safepoint");
569
570    ThreadLocalAllocBuffer::resize_all_tlabs();
571  }
572}
573
574void CollectedHeap::full_gc_dump(GCTimer* timer, const char* when) {
575  if (HeapDumpBeforeFullGC || HeapDumpAfterFullGC) {
576    GCIdMarkAndRestore gc_id_mark;
577    FormatBuffer<> title("Heap Dump (%s full gc)", when);
578    GCTraceTime(Info, gc) tm(title.buffer(), timer);
579    HeapDumper::dump_heap();
580  }
581  LogHandle(gc, classhisto) log;
582  if (log.is_trace()) {
583    ResourceMark rm;
584    GCIdMarkAndRestore gc_id_mark;
585    FormatBuffer<> title("Class Histogram (%s full gc)", when);
586    GCTraceTime(Trace, gc, classhisto) tm(title.buffer(), timer);
587    VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);
588    inspector.doit();
589  }
590}
591
592void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
593  full_gc_dump(timer, "before");
594}
595
596void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
597  full_gc_dump(timer, "after");
598}
599
600void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
601  // It is important to do this in a way such that concurrent readers can't
602  // temporarily think something is in the heap.  (Seen this happen in asserts.)
603  _reserved.set_word_size(0);
604  _reserved.set_start(start);
605  _reserved.set_end(end);
606}
607
608/////////////// Unit tests ///////////////
609
610#ifndef PRODUCT
611void CollectedHeap::test_is_in() {
612  CollectedHeap* heap = Universe::heap();
613
614  uintptr_t epsilon    = (uintptr_t) MinObjAlignment;
615  uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
616  uintptr_t heap_end   = (uintptr_t) heap->_reserved.end();
617
618  // Test that NULL is not in the heap.
619  assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
620
621  // Test that a pointer to before the heap start is reported as outside the heap.
622  assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
623  void* before_heap = (void*)(heap_start - epsilon);
624  assert(!heap->is_in(before_heap),
625         "before_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(before_heap));
626
627  // Test that a pointer to after the heap end is reported as outside the heap.
628  assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
629  void* after_heap = (void*)(heap_end + epsilon);
630  assert(!heap->is_in(after_heap),
631         "after_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(after_heap));
632}
633#endif
634