collectedHeap.cpp revision 13249:a2753984d2c1
13229Spst/*
250476Speter * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
33229Spst * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
43229Spst *
59720Swpaul * This code is free software; you can redistribute it and/or modify it
63229Spst * under the terms of the GNU General Public License version 2 only, as
73229Spst * published by the Free Software Foundation.
8201380Sed *
9201380Sed * This code is distributed in the hope that it will be useful, but WITHOUT
1013575Spst * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
113229Spst * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
123229Spst * version 2 for more details (a copy is included in the LICENSE file that
133229Spst * accompanied this code).
143229Spst *
1574814Sru * You should have received a copy of the GNU General Public License version
1613849Smpp * 2 along with this work; if not, write to the Free Software Foundation,
173229Spst * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
183229Spst *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "gc/shared/allocTracer.hpp"
28#include "gc/shared/barrierSet.inline.hpp"
29#include "gc/shared/collectedHeap.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "gc/shared/gcHeapSummary.hpp"
32#include "gc/shared/gcTrace.hpp"
33#include "gc/shared/gcTraceTime.inline.hpp"
34#include "gc/shared/gcWhen.hpp"
35#include "gc/shared/vmGCOperations.hpp"
36#include "logging/log.hpp"
37#include "memory/metaspace.hpp"
38#include "memory/resourceArea.hpp"
39#include "oops/instanceMirrorKlass.hpp"
40#include "oops/oop.inline.hpp"
41#include "runtime/init.hpp"
42#include "runtime/thread.inline.hpp"
43#include "services/heapDumper.hpp"
44#include "utilities/align.hpp"
45
46
47#ifdef ASSERT
48int CollectedHeap::_fire_out_of_memory_count = 0;
49#endif
50
51size_t CollectedHeap::_filler_array_max_size = 0;
52
53template <>
54void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
55  st->print_cr("GC heap %s", m.is_before ? "before" : "after");
56  st->print_raw(m);
57}
58
59void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
60  if (!should_log()) {
61    return;
62  }
63
64  double timestamp = fetch_timestamp();
65  MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
66  int index = compute_log_index();
67  _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
68  _records[index].timestamp = timestamp;
69  _records[index].data.is_before = before;
70  stringStream st(_records[index].data.buffer(), _records[index].data.size());
71
72  st.print_cr("{Heap %s GC invocations=%u (full %u):",
73                 before ? "before" : "after",
74                 heap->total_collections(),
75                 heap->total_full_collections());
76
77  heap->print_on(&st);
78  st.print_cr("}");
79}
80
81VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
82  size_t capacity_in_words = capacity() / HeapWordSize;
83
84  return VirtualSpaceSummary(
85    reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
86}
87
88GCHeapSummary CollectedHeap::create_heap_summary() {
89  VirtualSpaceSummary heap_space = create_heap_space_summary();
90  return GCHeapSummary(heap_space, used());
91}
92
93MetaspaceSummary CollectedHeap::create_metaspace_summary() {
94  const MetaspaceSizes meta_space(
95      MetaspaceAux::committed_bytes(),
96      MetaspaceAux::used_bytes(),
97      MetaspaceAux::reserved_bytes());
98  const MetaspaceSizes data_space(
99      MetaspaceAux::committed_bytes(Metaspace::NonClassType),
100      MetaspaceAux::used_bytes(Metaspace::NonClassType),
101      MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
102  const MetaspaceSizes class_space(
103      MetaspaceAux::committed_bytes(Metaspace::ClassType),
104      MetaspaceAux::used_bytes(Metaspace::ClassType),
105      MetaspaceAux::reserved_bytes(Metaspace::ClassType));
106
107  const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
108    MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
109  const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
110    MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
111
112  return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
113                          ms_chunk_free_list_summary, class_chunk_free_list_summary);
114}
115
116void CollectedHeap::print_heap_before_gc() {
117  Universe::print_heap_before_gc();
118  if (_gc_heap_log != NULL) {
119    _gc_heap_log->log_heap_before(this);
120  }
121}
122
123void CollectedHeap::print_heap_after_gc() {
124  Universe::print_heap_after_gc();
125  if (_gc_heap_log != NULL) {
126    _gc_heap_log->log_heap_after(this);
127  }
128}
129
130void CollectedHeap::print_on_error(outputStream* st) const {
131  st->print_cr("Heap:");
132  print_extended_on(st);
133  st->cr();
134
135  _barrier_set->print_on(st);
136}
137
138void CollectedHeap::register_nmethod(nmethod* nm) {
139  assert_locked_or_safepoint(CodeCache_lock);
140}
141
142void CollectedHeap::unregister_nmethod(nmethod* nm) {
143  assert_locked_or_safepoint(CodeCache_lock);
144}
145
146void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
147  const GCHeapSummary& heap_summary = create_heap_summary();
148  gc_tracer->report_gc_heap_summary(when, heap_summary);
149
150  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
151  gc_tracer->report_metaspace_summary(when, metaspace_summary);
152}
153
154void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
155  trace_heap(GCWhen::BeforeGC, gc_tracer);
156}
157
158void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
159  trace_heap(GCWhen::AfterGC, gc_tracer);
160}
161
162// WhiteBox API support for concurrent collectors.  These are the
163// default implementations, for collectors which don't support this
164// feature.
165bool CollectedHeap::supports_concurrent_phase_control() const {
166  return false;
167}
168
169const char* const* CollectedHeap::concurrent_phases() const {
170  static const char* const result[] = { NULL };
171  return result;
172}
173
174bool CollectedHeap::request_concurrent_phase(const char* phase) {
175  return false;
176}
177
178// Memory state functions.
179
180
181CollectedHeap::CollectedHeap() :
182  _barrier_set(NULL),
183  _is_gc_active(false),
184  _total_collections(0),
185  _total_full_collections(0),
186  _gc_cause(GCCause::_no_gc),
187  _gc_lastcause(GCCause::_no_gc),
188  _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
189{
190  const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
191  const size_t elements_per_word = HeapWordSize / sizeof(jint);
192  _filler_array_max_size = align_object_size(filler_array_hdr_size() +
193                                             max_len / elements_per_word);
194
195  NOT_PRODUCT(_promotion_failure_alot_count = 0;)
196  NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
197
198  if (UsePerfData) {
199    EXCEPTION_MARK;
200
201    // create the gc cause jvmstat counters
202    _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
203                             80, GCCause::to_string(_gc_cause), CHECK);
204
205    _perf_gc_lastcause =
206                PerfDataManager::create_string_variable(SUN_GC, "lastCause",
207                             80, GCCause::to_string(_gc_lastcause), CHECK);
208  }
209
210  // Create the ring log
211  if (LogEvents) {
212    _gc_heap_log = new GCHeapLog();
213  } else {
214    _gc_heap_log = NULL;
215  }
216}
217
218// This interface assumes that it's being called by the
219// vm thread. It collects the heap assuming that the
220// heap lock is already held and that we are executing in
221// the context of the vm thread.
222void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
223  assert(Thread::current()->is_VM_thread(), "Precondition#1");
224  assert(Heap_lock->is_locked(), "Precondition#2");
225  GCCauseSetter gcs(this, cause);
226  switch (cause) {
227    case GCCause::_heap_inspection:
228    case GCCause::_heap_dump:
229    case GCCause::_metadata_GC_threshold : {
230      HandleMark hm;
231      do_full_collection(false);        // don't clear all soft refs
232      break;
233    }
234    case GCCause::_metadata_GC_clear_soft_refs: {
235      HandleMark hm;
236      do_full_collection(true);         // do clear all soft refs
237      break;
238    }
239    default:
240      ShouldNotReachHere(); // Unexpected use of this function
241  }
242}
243
244void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
245  _barrier_set = barrier_set;
246  oopDesc::set_bs(_barrier_set);
247}
248
249void CollectedHeap::pre_initialize() {
250  // Used for ReduceInitialCardMarks (when COMPILER2 is used);
251  // otherwise remains unused.
252#if defined(COMPILER2) || INCLUDE_JVMCI
253  _defer_initial_card_mark = is_server_compilation_mode_vm() &&  ReduceInitialCardMarks && can_elide_tlab_store_barriers()
254                             && (DeferInitialCardMark || card_mark_must_follow_store());
255#else
256  assert(_defer_initial_card_mark == false, "Who would set it?");
257#endif
258}
259
260#ifndef PRODUCT
261void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
262  if (CheckMemoryInitialization && ZapUnusedHeapArea) {
263    for (size_t slot = 0; slot < size; slot += 1) {
264      assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
265             "Found badHeapWordValue in post-allocation check");
266    }
267  }
268}
269
270void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
271  if (CheckMemoryInitialization && ZapUnusedHeapArea) {
272    for (size_t slot = 0; slot < size; slot += 1) {
273      assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
274             "Found non badHeapWordValue in pre-allocation check");
275    }
276  }
277}
278#endif // PRODUCT
279
280#ifdef ASSERT
281void CollectedHeap::check_for_valid_allocation_state() {
282  Thread *thread = Thread::current();
283  // How to choose between a pending exception and a potential
284  // OutOfMemoryError?  Don't allow pending exceptions.
285  // This is a VM policy failure, so how do we exhaustively test it?
286  assert(!thread->has_pending_exception(),
287         "shouldn't be allocating with pending exception");
288  if (StrictSafepointChecks) {
289    assert(thread->allow_allocation(),
290           "Allocation done by thread for which allocation is blocked "
291           "by No_Allocation_Verifier!");
292    // Allocation of an oop can always invoke a safepoint,
293    // hence, the true argument
294    thread->check_for_valid_safepoint_state(true);
295  }
296}
297#endif
298
299HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
300
301  // Retain tlab and allocate object in shared space if
302  // the amount free in the tlab is too large to discard.
303  if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
304    thread->tlab().record_slow_allocation(size);
305    return NULL;
306  }
307
308  // Discard tlab and allocate a new one.
309  // To minimize fragmentation, the last TLAB may be smaller than the rest.
310  size_t new_tlab_size = thread->tlab().compute_size(size);
311
312  thread->tlab().clear_before_allocation();
313
314  if (new_tlab_size == 0) {
315    return NULL;
316  }
317
318  // Allocate a new TLAB...
319  HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
320  if (obj == NULL) {
321    return NULL;
322  }
323
324  AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
325
326  if (ZeroTLAB) {
327    // ..and clear it.
328    Copy::zero_to_words(obj, new_tlab_size);
329  } else {
330    // ...and zap just allocated object.
331#ifdef ASSERT
332    // Skip mangling the space corresponding to the object header to
333    // ensure that the returned space is not considered parsable by
334    // any concurrent GC thread.
335    size_t hdr_size = oopDesc::header_size();
336    Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
337#endif // ASSERT
338  }
339  thread->tlab().fill(obj, obj + size, new_tlab_size);
340  return obj;
341}
342
343void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
344  MemRegion deferred = thread->deferred_card_mark();
345  if (!deferred.is_empty()) {
346    assert(_defer_initial_card_mark, "Otherwise should be empty");
347    {
348      // Verify that the storage points to a parsable object in heap
349      DEBUG_ONLY(oop old_obj = oop(deferred.start());)
350      assert(is_in(old_obj), "Not in allocated heap");
351      assert(!can_elide_initializing_store_barrier(old_obj),
352             "Else should have been filtered in new_store_pre_barrier()");
353      assert(old_obj->is_oop(true), "Not an oop");
354      assert(deferred.word_size() == (size_t)(old_obj->size()),
355             "Mismatch: multiple objects?");
356    }
357    BarrierSet* bs = barrier_set();
358    assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
359    bs->write_region(deferred);
360    // "Clear" the deferred_card_mark field
361    thread->set_deferred_card_mark(MemRegion());
362  }
363  assert(thread->deferred_card_mark().is_empty(), "invariant");
364}
365
366size_t CollectedHeap::max_tlab_size() const {
367  // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
368  // This restriction could be removed by enabling filling with multiple arrays.
369  // If we compute that the reasonable way as
370  //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
371  // we'll overflow on the multiply, so we do the divide first.
372  // We actually lose a little by dividing first,
373  // but that just makes the TLAB  somewhat smaller than the biggest array,
374  // which is fine, since we'll be able to fill that.
375  size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
376              sizeof(jint) *
377              ((juint) max_jint / (size_t) HeapWordSize);
378  return align_down(max_int_size, MinObjAlignment);
379}
380
381// Helper for ReduceInitialCardMarks. For performance,
382// compiled code may elide card-marks for initializing stores
383// to a newly allocated object along the fast-path. We
384// compensate for such elided card-marks as follows:
385// (a) Generational, non-concurrent collectors, such as
386//     GenCollectedHeap(ParNew,DefNew,Tenured) and
387//     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
388//     need the card-mark if and only if the region is
389//     in the old gen, and do not care if the card-mark
390//     succeeds or precedes the initializing stores themselves,
391//     so long as the card-mark is completed before the next
392//     scavenge. For all these cases, we can do a card mark
393//     at the point at which we do a slow path allocation
394//     in the old gen, i.e. in this call.
395// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
396//     in addition that the card-mark for an old gen allocated
397//     object strictly follow any associated initializing stores.
398//     In these cases, the memRegion remembered below is
399//     used to card-mark the entire region either just before the next
400//     slow-path allocation by this thread or just before the next scavenge or
401//     CMS-associated safepoint, whichever of these events happens first.
402//     (The implicit assumption is that the object has been fully
403//     initialized by this point, a fact that we assert when doing the
404//     card-mark.)
405// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
406//     G1 concurrent marking is in progress an SATB (pre-write-)barrier
407//     is used to remember the pre-value of any store. Initializing
408//     stores will not need this barrier, so we need not worry about
409//     compensating for the missing pre-barrier here. Turning now
410//     to the post-barrier, we note that G1 needs a RS update barrier
411//     which simply enqueues a (sequence of) dirty cards which may
412//     optionally be refined by the concurrent update threads. Note
413//     that this barrier need only be applied to a non-young write,
414//     but, like in CMS, because of the presence of concurrent refinement
415//     (much like CMS' precleaning), must strictly follow the oop-store.
416//     Thus, using the same protocol for maintaining the intended
417//     invariants turns out, serendepitously, to be the same for both
418//     G1 and CMS.
419//
420// For any future collector, this code should be reexamined with
421// that specific collector in mind, and the documentation above suitably
422// extended and updated.
423oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
424  // If a previous card-mark was deferred, flush it now.
425  flush_deferred_store_barrier(thread);
426  if (can_elide_initializing_store_barrier(new_obj) ||
427      new_obj->is_typeArray()) {
428    // Arrays of non-references don't need a pre-barrier.
429    // The deferred_card_mark region should be empty
430    // following the flush above.
431    assert(thread->deferred_card_mark().is_empty(), "Error");
432  } else {
433    MemRegion mr((HeapWord*)new_obj, new_obj->size());
434    assert(!mr.is_empty(), "Error");
435    if (_defer_initial_card_mark) {
436      // Defer the card mark
437      thread->set_deferred_card_mark(mr);
438    } else {
439      // Do the card mark
440      BarrierSet* bs = barrier_set();
441      assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
442      bs->write_region(mr);
443    }
444  }
445  return new_obj;
446}
447
448size_t CollectedHeap::filler_array_hdr_size() {
449  return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
450}
451
452size_t CollectedHeap::filler_array_min_size() {
453  return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
454}
455
456#ifdef ASSERT
457void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
458{
459  assert(words >= min_fill_size(), "too small to fill");
460  assert(words % MinObjAlignment == 0, "unaligned size");
461  assert(Universe::heap()->is_in_reserved(start), "not in heap");
462  assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
463}
464
465void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
466{
467  if (ZapFillerObjects && zap) {
468    Copy::fill_to_words(start + filler_array_hdr_size(),
469                        words - filler_array_hdr_size(), 0XDEAFBABE);
470  }
471}
472#endif // ASSERT
473
474void
475CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
476{
477  assert(words >= filler_array_min_size(), "too small for an array");
478  assert(words <= filler_array_max_size(), "too big for a single object");
479
480  const size_t payload_size = words - filler_array_hdr_size();
481  const size_t len = payload_size * HeapWordSize / sizeof(jint);
482  assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
483
484  // Set the length first for concurrent GC.
485  ((arrayOop)start)->set_length((int)len);
486  post_allocation_setup_common(Universe::intArrayKlassObj(), start);
487  DEBUG_ONLY(zap_filler_array(start, words, zap);)
488}
489
490void
491CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
492{
493  assert(words <= filler_array_max_size(), "too big for a single object");
494
495  if (words >= filler_array_min_size()) {
496    fill_with_array(start, words, zap);
497  } else if (words > 0) {
498    assert(words == min_fill_size(), "unaligned size");
499    post_allocation_setup_common(SystemDictionary::Object_klass(), start);
500  }
501}
502
503void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
504{
505  DEBUG_ONLY(fill_args_check(start, words);)
506  HandleMark hm;  // Free handles before leaving.
507  fill_with_object_impl(start, words, zap);
508}
509
510void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
511{
512  DEBUG_ONLY(fill_args_check(start, words);)
513  HandleMark hm;  // Free handles before leaving.
514
515  // Multiple objects may be required depending on the filler array maximum size. Fill
516  // the range up to that with objects that are filler_array_max_size sized. The
517  // remainder is filled with a single object.
518  const size_t min = min_fill_size();
519  const size_t max = filler_array_max_size();
520  while (words > max) {
521    const size_t cur = (words - max) >= min ? max : max - min;
522    fill_with_array(start, cur, zap);
523    start += cur;
524    words -= cur;
525  }
526
527  fill_with_object_impl(start, words, zap);
528}
529
530HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
531  guarantee(false, "thread-local allocation buffers not supported");
532  return NULL;
533}
534
535void CollectedHeap::ensure_parsability(bool retire_tlabs) {
536  // The second disjunct in the assertion below makes a concession
537  // for the start-up verification done while the VM is being
538  // created. Callers be careful that you know that mutators
539  // aren't going to interfere -- for instance, this is permissible
540  // if we are still single-threaded and have either not yet
541  // started allocating (nothing much to verify) or we have
542  // started allocating but are now a full-fledged JavaThread
543  // (and have thus made our TLAB's) available for filling.
544  assert(SafepointSynchronize::is_at_safepoint() ||
545         !is_init_completed(),
546         "Should only be called at a safepoint or at start-up"
547         " otherwise concurrent mutator activity may make heap "
548         " unparsable again");
549  const bool use_tlab = UseTLAB;
550  const bool deferred = _defer_initial_card_mark;
551  // The main thread starts allocating via a TLAB even before it
552  // has added itself to the threads list at vm boot-up.
553  assert(!use_tlab || Threads::first() != NULL,
554         "Attempt to fill tlabs before main thread has been added"
555         " to threads list is doomed to failure!");
556  for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
557     if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
558#if defined(COMPILER2) || INCLUDE_JVMCI
559     // The deferred store barriers must all have been flushed to the
560     // card-table (or other remembered set structure) before GC starts
561     // processing the card-table (or other remembered set).
562     if (deferred) flush_deferred_store_barrier(thread);
563#else
564     assert(!deferred, "Should be false");
565     assert(thread->deferred_card_mark().is_empty(), "Should be empty");
566#endif
567  }
568}
569
570void CollectedHeap::accumulate_statistics_all_tlabs() {
571  if (UseTLAB) {
572    assert(SafepointSynchronize::is_at_safepoint() ||
573         !is_init_completed(),
574         "should only accumulate statistics on tlabs at safepoint");
575
576    ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
577  }
578}
579
580void CollectedHeap::resize_all_tlabs() {
581  if (UseTLAB) {
582    assert(SafepointSynchronize::is_at_safepoint() ||
583         !is_init_completed(),
584         "should only resize tlabs at safepoint");
585
586    ThreadLocalAllocBuffer::resize_all_tlabs();
587  }
588}
589
590void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
591  assert(timer != NULL, "timer is null");
592  if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
593    GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
594    HeapDumper::dump_heap();
595  }
596
597  Log(gc, classhisto) log;
598  if (log.is_trace()) {
599    GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
600    ResourceMark rm;
601    VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);
602    inspector.doit();
603  }
604}
605
606void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
607  full_gc_dump(timer, true);
608}
609
610void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
611  full_gc_dump(timer, false);
612}
613
614void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
615  // It is important to do this in a way such that concurrent readers can't
616  // temporarily think something is in the heap.  (Seen this happen in asserts.)
617  _reserved.set_word_size(0);
618  _reserved.set_start(start);
619  _reserved.set_end(end);
620}
621