psMarkSweep.cpp revision 9149:a8a8604f890f
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/stringTable.hpp"
27#include "classfile/systemDictionary.hpp"
28#include "code/codeCache.hpp"
29#include "gc/parallel/parallelScavengeHeap.hpp"
30#include "gc/parallel/psAdaptiveSizePolicy.hpp"
31#include "gc/parallel/psMarkSweep.hpp"
32#include "gc/parallel/psMarkSweepDecorator.hpp"
33#include "gc/parallel/psOldGen.hpp"
34#include "gc/parallel/psScavenge.hpp"
35#include "gc/parallel/psYoungGen.hpp"
36#include "gc/serial/markSweep.hpp"
37#include "gc/shared/gcCause.hpp"
38#include "gc/shared/gcHeapSummary.hpp"
39#include "gc/shared/gcId.hpp"
40#include "gc/shared/gcLocker.inline.hpp"
41#include "gc/shared/gcTimer.hpp"
42#include "gc/shared/gcTrace.hpp"
43#include "gc/shared/gcTraceTime.hpp"
44#include "gc/shared/isGCActiveMark.hpp"
45#include "gc/shared/referencePolicy.hpp"
46#include "gc/shared/referenceProcessor.hpp"
47#include "gc/shared/spaceDecorator.hpp"
48#include "oops/oop.inline.hpp"
49#include "runtime/biasedLocking.hpp"
50#include "runtime/fprofiler.hpp"
51#include "runtime/safepoint.hpp"
52#include "runtime/vmThread.hpp"
53#include "services/management.hpp"
54#include "services/memoryService.hpp"
55#include "utilities/events.hpp"
56#include "utilities/stack.inline.hpp"
57
58elapsedTimer        PSMarkSweep::_accumulated_time;
59jlong               PSMarkSweep::_time_of_last_gc   = 0;
60CollectorCounters*  PSMarkSweep::_counters = NULL;
61
62void PSMarkSweep::initialize() {
63  MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
64  set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
65  _counters = new CollectorCounters("PSMarkSweep", 1);
66}
67
68// This method contains all heap specific policy for invoking mark sweep.
69// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
70// the heap. It will do nothing further. If we need to bail out for policy
71// reasons, scavenge before full gc, or any other specialized behavior, it
72// needs to be added here.
73//
74// Note that this method should only be called from the vm_thread while
75// at a safepoint!
76//
77// Note that the all_soft_refs_clear flag in the collector policy
78// may be true because this method can be called without intervening
79// activity.  For example when the heap space is tight and full measure
80// are being taken to free space.
81
82void PSMarkSweep::invoke(bool maximum_heap_compaction) {
83  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
84  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
85  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
86
87  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
88  GCCause::Cause gc_cause = heap->gc_cause();
89  PSAdaptiveSizePolicy* policy = heap->size_policy();
90  IsGCActiveMark mark;
91
92  if (ScavengeBeforeFullGC) {
93    PSScavenge::invoke_no_policy();
94  }
95
96  const bool clear_all_soft_refs =
97    heap->collector_policy()->should_clear_all_soft_refs();
98
99  uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
100  UIntXFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
101  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
102}
103
104// This method contains no policy. You should probably
105// be calling invoke() instead.
106bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
107  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
108  assert(ref_processor() != NULL, "Sanity");
109
110  if (GC_locker::check_active_before_gc()) {
111    return false;
112  }
113
114  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
115  GCCause::Cause gc_cause = heap->gc_cause();
116
117  GCIdMark gc_id_mark;
118  _gc_timer->register_gc_start();
119  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
120
121  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
122
123  // The scope of casr should end after code that can change
124  // CollectorPolicy::_should_clear_all_soft_refs.
125  ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
126
127  PSYoungGen* young_gen = heap->young_gen();
128  PSOldGen* old_gen = heap->old_gen();
129
130  // Increment the invocation count
131  heap->increment_total_collections(true /* full */);
132
133  // Save information needed to minimize mangling
134  heap->record_gen_tops_before_GC();
135
136  // We need to track unique mark sweep invocations as well.
137  _total_invocations++;
138
139  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
140
141  heap->print_heap_before_gc();
142  heap->trace_heap_before_gc(_gc_tracer);
143
144  // Fill in TLABs
145  heap->accumulate_statistics_all_tlabs();
146  heap->ensure_parsability(true);  // retire TLABs
147
148  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
149    HandleMark hm;  // Discard invalid handles created during verification
150    Universe::verify(" VerifyBeforeGC:");
151  }
152
153  // Verify object start arrays
154  if (VerifyObjectStartArray &&
155      VerifyBeforeGC) {
156    old_gen->verify_object_start_array();
157  }
158
159  heap->pre_full_gc_dump(_gc_timer);
160
161  // Filled in below to track the state of the young gen after the collection.
162  bool eden_empty;
163  bool survivors_empty;
164  bool young_gen_empty;
165
166  {
167    HandleMark hm;
168
169    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
170    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
171    TraceCollectorStats tcs(counters());
172    TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
173
174    if (TraceOldGenTime) accumulated_time()->start();
175
176    // Let the size policy know we're starting
177    size_policy->major_collection_begin();
178
179    CodeCache::gc_prologue();
180    BiasedLocking::preserve_marks();
181
182    // Capture heap size before collection for printing.
183    size_t prev_used = heap->used();
184
185    // Capture metadata size before collection for sizing.
186    size_t metadata_prev_used = MetaspaceAux::used_bytes();
187
188    // For PrintGCDetails
189    size_t old_gen_prev_used = old_gen->used_in_bytes();
190    size_t young_gen_prev_used = young_gen->used_in_bytes();
191
192    allocate_stacks();
193
194#if defined(COMPILER2) || INCLUDE_JVMCI
195    DerivedPointerTable::clear();
196#endif
197
198    ref_processor()->enable_discovery();
199    ref_processor()->setup_policy(clear_all_softrefs);
200
201    mark_sweep_phase1(clear_all_softrefs);
202
203    mark_sweep_phase2();
204
205#if defined(COMPILER2) || INCLUDE_JVMCI
206    // Don't add any more derived pointers during phase3
207    assert(DerivedPointerTable::is_active(), "Sanity");
208    DerivedPointerTable::set_active(false);
209#endif
210
211    mark_sweep_phase3();
212
213    mark_sweep_phase4();
214
215    restore_marks();
216
217    deallocate_stacks();
218
219    if (ZapUnusedHeapArea) {
220      // Do a complete mangle (top to end) because the usage for
221      // scratch does not maintain a top pointer.
222      young_gen->to_space()->mangle_unused_area_complete();
223    }
224
225    eden_empty = young_gen->eden_space()->is_empty();
226    if (!eden_empty) {
227      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
228    }
229
230    // Update heap occupancy information which is used as
231    // input to soft ref clearing policy at the next gc.
232    Universe::update_heap_info_at_gc();
233
234    survivors_empty = young_gen->from_space()->is_empty() &&
235                      young_gen->to_space()->is_empty();
236    young_gen_empty = eden_empty && survivors_empty;
237
238    ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
239    MemRegion old_mr = heap->old_gen()->reserved();
240    if (young_gen_empty) {
241      modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
242    } else {
243      modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
244    }
245
246    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
247    ClassLoaderDataGraph::purge();
248    MetaspaceAux::verify_metrics();
249
250    BiasedLocking::restore_marks();
251    CodeCache::gc_epilogue();
252    JvmtiExport::gc_epilogue();
253
254#if defined(COMPILER2) || INCLUDE_JVMCI
255    DerivedPointerTable::update_pointers();
256#endif
257
258    ref_processor()->enqueue_discovered_references(NULL);
259
260    // Update time of last GC
261    reset_millis_since_last_gc();
262
263    // Let the size policy know we're done
264    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
265
266    if (UseAdaptiveSizePolicy) {
267
268      if (PrintAdaptiveSizePolicy) {
269        gclog_or_tty->print("AdaptiveSizeStart: ");
270        gclog_or_tty->stamp();
271        gclog_or_tty->print_cr(" collection: %d ",
272                       heap->total_collections());
273        if (Verbose) {
274          gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
275            " young_gen_capacity: " SIZE_FORMAT,
276            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
277        }
278      }
279
280      // Don't check if the size_policy is ready here.  Let
281      // the size_policy check that internally.
282      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
283          AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
284        // Swap the survivor spaces if from_space is empty. The
285        // resize_young_gen() called below is normally used after
286        // a successful young GC and swapping of survivor spaces;
287        // otherwise, it will fail to resize the young gen with
288        // the current implementation.
289        if (young_gen->from_space()->is_empty()) {
290          young_gen->from_space()->clear(SpaceDecorator::Mangle);
291          young_gen->swap_spaces();
292        }
293
294        // Calculate optimal free space amounts
295        assert(young_gen->max_size() >
296          young_gen->from_space()->capacity_in_bytes() +
297          young_gen->to_space()->capacity_in_bytes(),
298          "Sizes of space in young gen are out-of-bounds");
299
300        size_t young_live = young_gen->used_in_bytes();
301        size_t eden_live = young_gen->eden_space()->used_in_bytes();
302        size_t old_live = old_gen->used_in_bytes();
303        size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
304        size_t max_old_gen_size = old_gen->max_gen_size();
305        size_t max_eden_size = young_gen->max_size() -
306          young_gen->from_space()->capacity_in_bytes() -
307          young_gen->to_space()->capacity_in_bytes();
308
309        // Used for diagnostics
310        size_policy->clear_generation_free_space_flags();
311
312        size_policy->compute_generations_free_space(young_live,
313                                                    eden_live,
314                                                    old_live,
315                                                    cur_eden,
316                                                    max_old_gen_size,
317                                                    max_eden_size,
318                                                    true /* full gc*/);
319
320        size_policy->check_gc_overhead_limit(young_live,
321                                             eden_live,
322                                             max_old_gen_size,
323                                             max_eden_size,
324                                             true /* full gc*/,
325                                             gc_cause,
326                                             heap->collector_policy());
327
328        size_policy->decay_supplemental_growth(true /* full gc*/);
329
330        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
331
332        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
333                               size_policy->calculated_survivor_size_in_bytes());
334      }
335      if (PrintAdaptiveSizePolicy) {
336        gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
337                       heap->total_collections());
338      }
339    }
340
341    if (UsePerfData) {
342      heap->gc_policy_counters()->update_counters();
343      heap->gc_policy_counters()->update_old_capacity(
344        old_gen->capacity_in_bytes());
345      heap->gc_policy_counters()->update_young_capacity(
346        young_gen->capacity_in_bytes());
347    }
348
349    heap->resize_all_tlabs();
350
351    // We collected the heap, recalculate the metaspace capacity
352    MetaspaceGC::compute_new_size();
353
354    if (TraceOldGenTime) accumulated_time()->stop();
355
356    if (PrintGC) {
357      if (PrintGCDetails) {
358        // Don't print a GC timestamp here.  This is after the GC so
359        // would be confusing.
360        young_gen->print_used_change(young_gen_prev_used);
361        old_gen->print_used_change(old_gen_prev_used);
362      }
363      heap->print_heap_change(prev_used);
364      if (PrintGCDetails) {
365        MetaspaceAux::print_metaspace_change(metadata_prev_used);
366      }
367    }
368
369    // Track memory usage and detect low memory
370    MemoryService::track_memory_usage();
371    heap->update_counters();
372  }
373
374  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
375    HandleMark hm;  // Discard invalid handles created during verification
376    Universe::verify(" VerifyAfterGC:");
377  }
378
379  // Re-verify object start arrays
380  if (VerifyObjectStartArray &&
381      VerifyAfterGC) {
382    old_gen->verify_object_start_array();
383  }
384
385  if (ZapUnusedHeapArea) {
386    old_gen->object_space()->check_mangled_unused_area_complete();
387  }
388
389  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
390
391  heap->print_heap_after_gc();
392  heap->trace_heap_after_gc(_gc_tracer);
393
394  heap->post_full_gc_dump(_gc_timer);
395
396#ifdef TRACESPINNING
397  ParallelTaskTerminator::print_termination_counts();
398#endif
399
400  _gc_timer->register_gc_end();
401
402  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
403
404  return true;
405}
406
407bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
408                                             PSYoungGen* young_gen,
409                                             PSOldGen* old_gen) {
410  MutableSpace* const eden_space = young_gen->eden_space();
411  assert(!eden_space->is_empty(), "eden must be non-empty");
412  assert(young_gen->virtual_space()->alignment() ==
413         old_gen->virtual_space()->alignment(), "alignments do not match");
414
415  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
416    return false;
417  }
418
419  // Both generations must be completely committed.
420  if (young_gen->virtual_space()->uncommitted_size() != 0) {
421    return false;
422  }
423  if (old_gen->virtual_space()->uncommitted_size() != 0) {
424    return false;
425  }
426
427  // Figure out how much to take from eden.  Include the average amount promoted
428  // in the total; otherwise the next young gen GC will simply bail out to a
429  // full GC.
430  const size_t alignment = old_gen->virtual_space()->alignment();
431  const size_t eden_used = eden_space->used_in_bytes();
432  const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
433  const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
434  const size_t eden_capacity = eden_space->capacity_in_bytes();
435
436  if (absorb_size >= eden_capacity) {
437    return false; // Must leave some space in eden.
438  }
439
440  const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
441  if (new_young_size < young_gen->min_gen_size()) {
442    return false; // Respect young gen minimum size.
443  }
444
445  if (TraceAdaptiveGCBoundary && Verbose) {
446    gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
447                        "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
448                        "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
449                        "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
450                        absorb_size / K,
451                        eden_capacity / K, (eden_capacity - absorb_size) / K,
452                        young_gen->from_space()->used_in_bytes() / K,
453                        young_gen->to_space()->used_in_bytes() / K,
454                        young_gen->capacity_in_bytes() / K, new_young_size / K);
455  }
456
457  // Fill the unused part of the old gen.
458  MutableSpace* const old_space = old_gen->object_space();
459  HeapWord* const unused_start = old_space->top();
460  size_t const unused_words = pointer_delta(old_space->end(), unused_start);
461
462  if (unused_words > 0) {
463    if (unused_words < CollectedHeap::min_fill_size()) {
464      return false;  // If the old gen cannot be filled, must give up.
465    }
466    CollectedHeap::fill_with_objects(unused_start, unused_words);
467  }
468
469  // Take the live data from eden and set both top and end in the old gen to
470  // eden top.  (Need to set end because reset_after_change() mangles the region
471  // from end to virtual_space->high() in debug builds).
472  HeapWord* const new_top = eden_space->top();
473  old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
474                                        absorb_size);
475  young_gen->reset_after_change();
476  old_space->set_top(new_top);
477  old_space->set_end(new_top);
478  old_gen->reset_after_change();
479
480  // Update the object start array for the filler object and the data from eden.
481  ObjectStartArray* const start_array = old_gen->start_array();
482  for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
483    start_array->allocate_block(p);
484  }
485
486  // Could update the promoted average here, but it is not typically updated at
487  // full GCs and the value to use is unclear.  Something like
488  //
489  // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
490
491  size_policy->set_bytes_absorbed_from_eden(absorb_size);
492  return true;
493}
494
495void PSMarkSweep::allocate_stacks() {
496  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
497  PSYoungGen* young_gen = heap->young_gen();
498
499  MutableSpace* to_space = young_gen->to_space();
500  _preserved_marks = (PreservedMark*)to_space->top();
501  _preserved_count = 0;
502
503  // We want to calculate the size in bytes first.
504  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
505  // Now divide by the size of a PreservedMark
506  _preserved_count_max /= sizeof(PreservedMark);
507}
508
509
510void PSMarkSweep::deallocate_stacks() {
511  _preserved_mark_stack.clear(true);
512  _preserved_oop_stack.clear(true);
513  _marking_stack.clear();
514  _objarray_stack.clear(true);
515}
516
517void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
518  // Recursively traverse all live objects and mark them
519  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
520
521  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
522
523  // Need to clear claim bits before the tracing starts.
524  ClassLoaderDataGraph::clear_claimed_marks();
525
526  // General strong roots.
527  {
528    ParallelScavengeHeap::ParStrongRootsScope psrs;
529    Universe::oops_do(mark_and_push_closure());
530    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
531    CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
532    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
533    Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
534    ObjectSynchronizer::oops_do(mark_and_push_closure());
535    FlatProfiler::oops_do(mark_and_push_closure());
536    Management::oops_do(mark_and_push_closure());
537    JvmtiExport::oops_do(mark_and_push_closure());
538    SystemDictionary::always_strong_oops_do(mark_and_push_closure());
539    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
540    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
541    //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
542  }
543
544  // Flush marking stack.
545  follow_stack();
546
547  // Process reference objects found during marking
548  {
549    ref_processor()->setup_policy(clear_all_softrefs);
550    const ReferenceProcessorStats& stats =
551      ref_processor()->process_discovered_references(
552        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
553    gc_tracer()->report_gc_reference_stats(stats);
554  }
555
556  // This is the point where the entire marking should have completed.
557  assert(_marking_stack.is_empty(), "Marking should have completed");
558
559  // Unload classes and purge the SystemDictionary.
560  bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
561
562  // Unload nmethods.
563  CodeCache::do_unloading(is_alive_closure(), purged_class);
564
565  // Prune dead klasses from subklass/sibling/implementor lists.
566  Klass::clean_weak_klass_links(is_alive_closure());
567
568  // Delete entries for dead interned strings.
569  StringTable::unlink(is_alive_closure());
570
571  // Clean up unreferenced symbols in symbol table.
572  SymbolTable::unlink();
573  _gc_tracer->report_object_count_after_gc(is_alive_closure());
574}
575
576
577void PSMarkSweep::mark_sweep_phase2() {
578  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
579
580  // Now all live objects are marked, compute the new object addresses.
581
582  // It is not required that we traverse spaces in the same order in
583  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
584  // tracking expects us to do so. See comment under phase4.
585
586  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
587  PSOldGen* old_gen = heap->old_gen();
588
589  // Begin compacting into the old gen
590  PSMarkSweepDecorator::set_destination_decorator_tenured();
591
592  // This will also compact the young gen spaces.
593  old_gen->precompact();
594}
595
596// This should be moved to the shared markSweep code!
597class PSAlwaysTrueClosure: public BoolObjectClosure {
598public:
599  bool do_object_b(oop p) { return true; }
600};
601static PSAlwaysTrueClosure always_true;
602
603void PSMarkSweep::mark_sweep_phase3() {
604  // Adjust the pointers to reflect the new locations
605  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
606
607  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
608  PSYoungGen* young_gen = heap->young_gen();
609  PSOldGen* old_gen = heap->old_gen();
610
611  // Need to clear claim bits before the tracing starts.
612  ClassLoaderDataGraph::clear_claimed_marks();
613
614  // General strong roots.
615  Universe::oops_do(adjust_pointer_closure());
616  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
617  CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
618  Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
619  ObjectSynchronizer::oops_do(adjust_pointer_closure());
620  FlatProfiler::oops_do(adjust_pointer_closure());
621  Management::oops_do(adjust_pointer_closure());
622  JvmtiExport::oops_do(adjust_pointer_closure());
623  SystemDictionary::oops_do(adjust_pointer_closure());
624  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
625
626  // Now adjust pointers in remaining weak roots.  (All of which should
627  // have been cleared if they pointed to non-surviving objects.)
628  // Global (weak) JNI handles
629  JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
630
631  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
632  CodeCache::blobs_do(&adjust_from_blobs);
633  StringTable::oops_do(adjust_pointer_closure());
634  ref_processor()->weak_oops_do(adjust_pointer_closure());
635  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
636
637  adjust_marks();
638
639  young_gen->adjust_pointers();
640  old_gen->adjust_pointers();
641}
642
643void PSMarkSweep::mark_sweep_phase4() {
644  EventMark m("4 compact heap");
645  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
646
647  // All pointers are now adjusted, move objects accordingly
648
649  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
650  PSYoungGen* young_gen = heap->young_gen();
651  PSOldGen* old_gen = heap->old_gen();
652
653  old_gen->compact();
654  young_gen->compact();
655}
656
657jlong PSMarkSweep::millis_since_last_gc() {
658  // We need a monotonically non-decreasing time in ms but
659  // os::javaTimeMillis() does not guarantee monotonicity.
660  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
661  jlong ret_val = now - _time_of_last_gc;
662  // XXX See note in genCollectedHeap::millis_since_last_gc().
663  if (ret_val < 0) {
664    NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
665    return 0;
666  }
667  return ret_val;
668}
669
670void PSMarkSweep::reset_millis_since_last_gc() {
671  // We need a monotonically non-decreasing time in ms but
672  // os::javaTimeMillis() does not guarantee monotonicity.
673  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
674}
675