psMarkSweep.cpp revision 13249:a2753984d2c1
1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "aot/aotLoader.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/symbolTable.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "code/codeCache.hpp"
31#include "gc/parallel/parallelScavengeHeap.hpp"
32#include "gc/parallel/psAdaptiveSizePolicy.hpp"
33#include "gc/parallel/psMarkSweep.hpp"
34#include "gc/parallel/psMarkSweepDecorator.hpp"
35#include "gc/parallel/psOldGen.hpp"
36#include "gc/parallel/psScavenge.hpp"
37#include "gc/parallel/psYoungGen.hpp"
38#include "gc/serial/markSweep.hpp"
39#include "gc/shared/gcCause.hpp"
40#include "gc/shared/gcHeapSummary.hpp"
41#include "gc/shared/gcId.hpp"
42#include "gc/shared/gcLocker.inline.hpp"
43#include "gc/shared/gcTimer.hpp"
44#include "gc/shared/gcTrace.hpp"
45#include "gc/shared/gcTraceTime.inline.hpp"
46#include "gc/shared/isGCActiveMark.hpp"
47#include "gc/shared/referencePolicy.hpp"
48#include "gc/shared/referenceProcessor.hpp"
49#include "gc/shared/spaceDecorator.hpp"
50#include "logging/log.hpp"
51#include "oops/oop.inline.hpp"
52#include "runtime/biasedLocking.hpp"
53#include "runtime/fprofiler.hpp"
54#include "runtime/safepoint.hpp"
55#include "runtime/vmThread.hpp"
56#include "services/management.hpp"
57#include "services/memoryService.hpp"
58#include "utilities/align.hpp"
59#include "utilities/events.hpp"
60#include "utilities/stack.inline.hpp"
61
62elapsedTimer        PSMarkSweep::_accumulated_time;
63jlong               PSMarkSweep::_time_of_last_gc   = 0;
64CollectorCounters*  PSMarkSweep::_counters = NULL;
65
66void PSMarkSweep::initialize() {
67  MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
68  set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
69  _counters = new CollectorCounters("PSMarkSweep", 1);
70}
71
72// This method contains all heap specific policy for invoking mark sweep.
73// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
74// the heap. It will do nothing further. If we need to bail out for policy
75// reasons, scavenge before full gc, or any other specialized behavior, it
76// needs to be added here.
77//
78// Note that this method should only be called from the vm_thread while
79// at a safepoint!
80//
81// Note that the all_soft_refs_clear flag in the collector policy
82// may be true because this method can be called without intervening
83// activity.  For example when the heap space is tight and full measure
84// are being taken to free space.
85
86void PSMarkSweep::invoke(bool maximum_heap_compaction) {
87  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
88  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
89  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
90
91  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
92  GCCause::Cause gc_cause = heap->gc_cause();
93  PSAdaptiveSizePolicy* policy = heap->size_policy();
94  IsGCActiveMark mark;
95
96  if (ScavengeBeforeFullGC) {
97    PSScavenge::invoke_no_policy();
98  }
99
100  const bool clear_all_soft_refs =
101    heap->collector_policy()->should_clear_all_soft_refs();
102
103  uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
104  UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
105  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
106}
107
108// This method contains no policy. You should probably
109// be calling invoke() instead.
110bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
111  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
112  assert(ref_processor() != NULL, "Sanity");
113
114  if (GCLocker::check_active_before_gc()) {
115    return false;
116  }
117
118  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
119  GCCause::Cause gc_cause = heap->gc_cause();
120
121  GCIdMark gc_id_mark;
122  _gc_timer->register_gc_start();
123  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
124
125  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
126
127  // The scope of casr should end after code that can change
128  // CollectorPolicy::_should_clear_all_soft_refs.
129  ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
130
131  PSYoungGen* young_gen = heap->young_gen();
132  PSOldGen* old_gen = heap->old_gen();
133
134  // Increment the invocation count
135  heap->increment_total_collections(true /* full */);
136
137  // Save information needed to minimize mangling
138  heap->record_gen_tops_before_GC();
139
140  // We need to track unique mark sweep invocations as well.
141  _total_invocations++;
142
143  heap->print_heap_before_gc();
144  heap->trace_heap_before_gc(_gc_tracer);
145
146  // Fill in TLABs
147  heap->accumulate_statistics_all_tlabs();
148  heap->ensure_parsability(true);  // retire TLABs
149
150  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
151    HandleMark hm;  // Discard invalid handles created during verification
152    Universe::verify("Before GC");
153  }
154
155  // Verify object start arrays
156  if (VerifyObjectStartArray &&
157      VerifyBeforeGC) {
158    old_gen->verify_object_start_array();
159  }
160
161  // Filled in below to track the state of the young gen after the collection.
162  bool eden_empty;
163  bool survivors_empty;
164  bool young_gen_empty;
165
166  {
167    HandleMark hm;
168
169    GCTraceCPUTime tcpu;
170    GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
171
172    heap->pre_full_gc_dump(_gc_timer);
173
174    TraceCollectorStats tcs(counters());
175    TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
176
177    if (TraceOldGenTime) accumulated_time()->start();
178
179    // Let the size policy know we're starting
180    size_policy->major_collection_begin();
181
182    CodeCache::gc_prologue();
183    BiasedLocking::preserve_marks();
184
185    // Capture metadata size before collection for sizing.
186    size_t metadata_prev_used = MetaspaceAux::used_bytes();
187
188    size_t old_gen_prev_used = old_gen->used_in_bytes();
189    size_t young_gen_prev_used = young_gen->used_in_bytes();
190
191    allocate_stacks();
192
193#if defined(COMPILER2) || INCLUDE_JVMCI
194    DerivedPointerTable::clear();
195#endif
196
197    ref_processor()->enable_discovery();
198    ref_processor()->setup_policy(clear_all_softrefs);
199
200    mark_sweep_phase1(clear_all_softrefs);
201
202    mark_sweep_phase2();
203
204#if defined(COMPILER2) || INCLUDE_JVMCI
205    // Don't add any more derived pointers during phase3
206    assert(DerivedPointerTable::is_active(), "Sanity");
207    DerivedPointerTable::set_active(false);
208#endif
209
210    mark_sweep_phase3();
211
212    mark_sweep_phase4();
213
214    restore_marks();
215
216    deallocate_stacks();
217
218    if (ZapUnusedHeapArea) {
219      // Do a complete mangle (top to end) because the usage for
220      // scratch does not maintain a top pointer.
221      young_gen->to_space()->mangle_unused_area_complete();
222    }
223
224    eden_empty = young_gen->eden_space()->is_empty();
225    if (!eden_empty) {
226      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
227    }
228
229    // Update heap occupancy information which is used as
230    // input to soft ref clearing policy at the next gc.
231    Universe::update_heap_info_at_gc();
232
233    survivors_empty = young_gen->from_space()->is_empty() &&
234                      young_gen->to_space()->is_empty();
235    young_gen_empty = eden_empty && survivors_empty;
236
237    ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
238    MemRegion old_mr = heap->old_gen()->reserved();
239    if (young_gen_empty) {
240      modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
241    } else {
242      modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
243    }
244
245    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
246    ClassLoaderDataGraph::purge();
247    MetaspaceAux::verify_metrics();
248
249    BiasedLocking::restore_marks();
250    CodeCache::gc_epilogue();
251    JvmtiExport::gc_epilogue();
252
253#if defined(COMPILER2) || INCLUDE_JVMCI
254    DerivedPointerTable::update_pointers();
255#endif
256
257    ref_processor()->enqueue_discovered_references(NULL);
258
259    // Update time of last GC
260    reset_millis_since_last_gc();
261
262    // Let the size policy know we're done
263    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
264
265    if (UseAdaptiveSizePolicy) {
266
267     log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
268     log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
269                         old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
270
271      // Don't check if the size_policy is ready here.  Let
272      // the size_policy check that internally.
273      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
274          AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
275        // Swap the survivor spaces if from_space is empty. The
276        // resize_young_gen() called below is normally used after
277        // a successful young GC and swapping of survivor spaces;
278        // otherwise, it will fail to resize the young gen with
279        // the current implementation.
280        if (young_gen->from_space()->is_empty()) {
281          young_gen->from_space()->clear(SpaceDecorator::Mangle);
282          young_gen->swap_spaces();
283        }
284
285        // Calculate optimal free space amounts
286        assert(young_gen->max_size() >
287          young_gen->from_space()->capacity_in_bytes() +
288          young_gen->to_space()->capacity_in_bytes(),
289          "Sizes of space in young gen are out-of-bounds");
290
291        size_t young_live = young_gen->used_in_bytes();
292        size_t eden_live = young_gen->eden_space()->used_in_bytes();
293        size_t old_live = old_gen->used_in_bytes();
294        size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
295        size_t max_old_gen_size = old_gen->max_gen_size();
296        size_t max_eden_size = young_gen->max_size() -
297          young_gen->from_space()->capacity_in_bytes() -
298          young_gen->to_space()->capacity_in_bytes();
299
300        // Used for diagnostics
301        size_policy->clear_generation_free_space_flags();
302
303        size_policy->compute_generations_free_space(young_live,
304                                                    eden_live,
305                                                    old_live,
306                                                    cur_eden,
307                                                    max_old_gen_size,
308                                                    max_eden_size,
309                                                    true /* full gc*/);
310
311        size_policy->check_gc_overhead_limit(young_live,
312                                             eden_live,
313                                             max_old_gen_size,
314                                             max_eden_size,
315                                             true /* full gc*/,
316                                             gc_cause,
317                                             heap->collector_policy());
318
319        size_policy->decay_supplemental_growth(true /* full gc*/);
320
321        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
322
323        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
324                               size_policy->calculated_survivor_size_in_bytes());
325      }
326      log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
327    }
328
329    if (UsePerfData) {
330      heap->gc_policy_counters()->update_counters();
331      heap->gc_policy_counters()->update_old_capacity(
332        old_gen->capacity_in_bytes());
333      heap->gc_policy_counters()->update_young_capacity(
334        young_gen->capacity_in_bytes());
335    }
336
337    heap->resize_all_tlabs();
338
339    // We collected the heap, recalculate the metaspace capacity
340    MetaspaceGC::compute_new_size();
341
342    if (TraceOldGenTime) accumulated_time()->stop();
343
344    young_gen->print_used_change(young_gen_prev_used);
345    old_gen->print_used_change(old_gen_prev_used);
346    MetaspaceAux::print_metaspace_change(metadata_prev_used);
347
348    // Track memory usage and detect low memory
349    MemoryService::track_memory_usage();
350    heap->update_counters();
351
352    heap->post_full_gc_dump(_gc_timer);
353  }
354
355  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
356    HandleMark hm;  // Discard invalid handles created during verification
357    Universe::verify("After GC");
358  }
359
360  // Re-verify object start arrays
361  if (VerifyObjectStartArray &&
362      VerifyAfterGC) {
363    old_gen->verify_object_start_array();
364  }
365
366  if (ZapUnusedHeapArea) {
367    old_gen->object_space()->check_mangled_unused_area_complete();
368  }
369
370  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
371
372  heap->print_heap_after_gc();
373  heap->trace_heap_after_gc(_gc_tracer);
374
375#ifdef TRACESPINNING
376  ParallelTaskTerminator::print_termination_counts();
377#endif
378
379  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
380
381  _gc_timer->register_gc_end();
382
383  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
384
385  return true;
386}
387
388bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
389                                             PSYoungGen* young_gen,
390                                             PSOldGen* old_gen) {
391  MutableSpace* const eden_space = young_gen->eden_space();
392  assert(!eden_space->is_empty(), "eden must be non-empty");
393  assert(young_gen->virtual_space()->alignment() ==
394         old_gen->virtual_space()->alignment(), "alignments do not match");
395
396  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
397    return false;
398  }
399
400  // Both generations must be completely committed.
401  if (young_gen->virtual_space()->uncommitted_size() != 0) {
402    return false;
403  }
404  if (old_gen->virtual_space()->uncommitted_size() != 0) {
405    return false;
406  }
407
408  // Figure out how much to take from eden.  Include the average amount promoted
409  // in the total; otherwise the next young gen GC will simply bail out to a
410  // full GC.
411  const size_t alignment = old_gen->virtual_space()->alignment();
412  const size_t eden_used = eden_space->used_in_bytes();
413  const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
414  const size_t absorb_size = align_up(eden_used + promoted, alignment);
415  const size_t eden_capacity = eden_space->capacity_in_bytes();
416
417  if (absorb_size >= eden_capacity) {
418    return false; // Must leave some space in eden.
419  }
420
421  const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
422  if (new_young_size < young_gen->min_gen_size()) {
423    return false; // Respect young gen minimum size.
424  }
425
426  log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
427                        "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
428                        "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
429                        "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
430                        absorb_size / K,
431                        eden_capacity / K, (eden_capacity - absorb_size) / K,
432                        young_gen->from_space()->used_in_bytes() / K,
433                        young_gen->to_space()->used_in_bytes() / K,
434                        young_gen->capacity_in_bytes() / K, new_young_size / K);
435
436  // Fill the unused part of the old gen.
437  MutableSpace* const old_space = old_gen->object_space();
438  HeapWord* const unused_start = old_space->top();
439  size_t const unused_words = pointer_delta(old_space->end(), unused_start);
440
441  if (unused_words > 0) {
442    if (unused_words < CollectedHeap::min_fill_size()) {
443      return false;  // If the old gen cannot be filled, must give up.
444    }
445    CollectedHeap::fill_with_objects(unused_start, unused_words);
446  }
447
448  // Take the live data from eden and set both top and end in the old gen to
449  // eden top.  (Need to set end because reset_after_change() mangles the region
450  // from end to virtual_space->high() in debug builds).
451  HeapWord* const new_top = eden_space->top();
452  old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
453                                        absorb_size);
454  young_gen->reset_after_change();
455  old_space->set_top(new_top);
456  old_space->set_end(new_top);
457  old_gen->reset_after_change();
458
459  // Update the object start array for the filler object and the data from eden.
460  ObjectStartArray* const start_array = old_gen->start_array();
461  for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
462    start_array->allocate_block(p);
463  }
464
465  // Could update the promoted average here, but it is not typically updated at
466  // full GCs and the value to use is unclear.  Something like
467  //
468  // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
469
470  size_policy->set_bytes_absorbed_from_eden(absorb_size);
471  return true;
472}
473
474void PSMarkSweep::allocate_stacks() {
475  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
476  PSYoungGen* young_gen = heap->young_gen();
477
478  MutableSpace* to_space = young_gen->to_space();
479  _preserved_marks = (PreservedMark*)to_space->top();
480  _preserved_count = 0;
481
482  // We want to calculate the size in bytes first.
483  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
484  // Now divide by the size of a PreservedMark
485  _preserved_count_max /= sizeof(PreservedMark);
486}
487
488
489void PSMarkSweep::deallocate_stacks() {
490  _preserved_mark_stack.clear(true);
491  _preserved_oop_stack.clear(true);
492  _marking_stack.clear();
493  _objarray_stack.clear(true);
494}
495
496void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
497  // Recursively traverse all live objects and mark them
498  GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
499
500  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
501
502  // Need to clear claim bits before the tracing starts.
503  ClassLoaderDataGraph::clear_claimed_marks();
504
505  // General strong roots.
506  {
507    ParallelScavengeHeap::ParStrongRootsScope psrs;
508    Universe::oops_do(mark_and_push_closure());
509    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
510    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
511    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
512    ObjectSynchronizer::oops_do(mark_and_push_closure());
513    FlatProfiler::oops_do(mark_and_push_closure());
514    Management::oops_do(mark_and_push_closure());
515    JvmtiExport::oops_do(mark_and_push_closure());
516    SystemDictionary::always_strong_oops_do(mark_and_push_closure());
517    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
518    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
519    //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
520    AOTLoader::oops_do(mark_and_push_closure());
521  }
522
523  // Flush marking stack.
524  follow_stack();
525
526  // Process reference objects found during marking
527  {
528    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
529
530    ref_processor()->setup_policy(clear_all_softrefs);
531    const ReferenceProcessorStats& stats =
532      ref_processor()->process_discovered_references(
533        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
534    gc_tracer()->report_gc_reference_stats(stats);
535  }
536
537  // This is the point where the entire marking should have completed.
538  assert(_marking_stack.is_empty(), "Marking should have completed");
539
540  {
541    GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
542
543    // Unload classes and purge the SystemDictionary.
544    bool purged_class = SystemDictionary::do_unloading(is_alive_closure(), _gc_timer);
545
546    // Unload nmethods.
547    CodeCache::do_unloading(is_alive_closure(), purged_class);
548
549    // Prune dead klasses from subklass/sibling/implementor lists.
550    Klass::clean_weak_klass_links(is_alive_closure());
551  }
552
553  {
554    GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer);
555    // Delete entries for dead interned strings.
556    StringTable::unlink(is_alive_closure());
557  }
558
559  {
560    GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer);
561    // Clean up unreferenced symbols in symbol table.
562    SymbolTable::unlink();
563  }
564
565  _gc_tracer->report_object_count_after_gc(is_alive_closure());
566}
567
568
569void PSMarkSweep::mark_sweep_phase2() {
570  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
571
572  // Now all live objects are marked, compute the new object addresses.
573
574  // It is not required that we traverse spaces in the same order in
575  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
576  // tracking expects us to do so. See comment under phase4.
577
578  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
579  PSOldGen* old_gen = heap->old_gen();
580
581  // Begin compacting into the old gen
582  PSMarkSweepDecorator::set_destination_decorator_tenured();
583
584  // This will also compact the young gen spaces.
585  old_gen->precompact();
586}
587
588void PSMarkSweep::mark_sweep_phase3() {
589  // Adjust the pointers to reflect the new locations
590  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
591
592  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
593  PSYoungGen* young_gen = heap->young_gen();
594  PSOldGen* old_gen = heap->old_gen();
595
596  // Need to clear claim bits before the tracing starts.
597  ClassLoaderDataGraph::clear_claimed_marks();
598
599  // General strong roots.
600  Universe::oops_do(adjust_pointer_closure());
601  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
602  Threads::oops_do(adjust_pointer_closure(), NULL);
603  ObjectSynchronizer::oops_do(adjust_pointer_closure());
604  FlatProfiler::oops_do(adjust_pointer_closure());
605  Management::oops_do(adjust_pointer_closure());
606  JvmtiExport::oops_do(adjust_pointer_closure());
607  SystemDictionary::oops_do(adjust_pointer_closure());
608  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
609
610  // Now adjust pointers in remaining weak roots.  (All of which should
611  // have been cleared if they pointed to non-surviving objects.)
612  // Global (weak) JNI handles
613  JNIHandles::weak_oops_do(adjust_pointer_closure());
614
615  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
616  CodeCache::blobs_do(&adjust_from_blobs);
617  AOTLoader::oops_do(adjust_pointer_closure());
618  StringTable::oops_do(adjust_pointer_closure());
619  ref_processor()->weak_oops_do(adjust_pointer_closure());
620  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
621
622  adjust_marks();
623
624  young_gen->adjust_pointers();
625  old_gen->adjust_pointers();
626}
627
628void PSMarkSweep::mark_sweep_phase4() {
629  EventMark m("4 compact heap");
630  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
631
632  // All pointers are now adjusted, move objects accordingly
633
634  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
635  PSYoungGen* young_gen = heap->young_gen();
636  PSOldGen* old_gen = heap->old_gen();
637
638  old_gen->compact();
639  young_gen->compact();
640}
641
642jlong PSMarkSweep::millis_since_last_gc() {
643  // We need a monotonically non-decreasing time in ms but
644  // os::javaTimeMillis() does not guarantee monotonicity.
645  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
646  jlong ret_val = now - _time_of_last_gc;
647  // XXX See note in genCollectedHeap::millis_since_last_gc().
648  if (ret_val < 0) {
649    NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
650    return 0;
651  }
652  return ret_val;
653}
654
655void PSMarkSweep::reset_millis_since_last_gc() {
656  // We need a monotonically non-decreasing time in ms but
657  // os::javaTimeMillis() does not guarantee monotonicity.
658  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
659}
660