1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "aot/aotLoader.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/symbolTable.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "code/codeCache.hpp"
31#include "gc/parallel/parallelScavengeHeap.hpp"
32#include "gc/parallel/psAdaptiveSizePolicy.hpp"
33#include "gc/parallel/psMarkSweep.hpp"
34#include "gc/parallel/psMarkSweepDecorator.hpp"
35#include "gc/parallel/psOldGen.hpp"
36#include "gc/parallel/psScavenge.hpp"
37#include "gc/parallel/psYoungGen.hpp"
38#include "gc/serial/markSweep.hpp"
39#include "gc/shared/gcCause.hpp"
40#include "gc/shared/gcHeapSummary.hpp"
41#include "gc/shared/gcId.hpp"
42#include "gc/shared/gcLocker.inline.hpp"
43#include "gc/shared/gcTimer.hpp"
44#include "gc/shared/gcTrace.hpp"
45#include "gc/shared/gcTraceTime.inline.hpp"
46#include "gc/shared/isGCActiveMark.hpp"
47#include "gc/shared/referencePolicy.hpp"
48#include "gc/shared/referenceProcessor.hpp"
49#include "gc/shared/spaceDecorator.hpp"
50#include "logging/log.hpp"
51#include "oops/oop.inline.hpp"
52#include "runtime/biasedLocking.hpp"
53#include "runtime/safepoint.hpp"
54#include "runtime/vmThread.hpp"
55#include "services/management.hpp"
56#include "services/memoryService.hpp"
57#include "utilities/align.hpp"
58#include "utilities/events.hpp"
59#include "utilities/stack.inline.hpp"
60
61elapsedTimer        PSMarkSweep::_accumulated_time;
62jlong               PSMarkSweep::_time_of_last_gc   = 0;
63CollectorCounters*  PSMarkSweep::_counters = NULL;
64
65void PSMarkSweep::initialize() {
66  MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
67  set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
68  _counters = new CollectorCounters("PSMarkSweep", 1);
69}
70
71// This method contains all heap specific policy for invoking mark sweep.
72// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
73// the heap. It will do nothing further. If we need to bail out for policy
74// reasons, scavenge before full gc, or any other specialized behavior, it
75// needs to be added here.
76//
77// Note that this method should only be called from the vm_thread while
78// at a safepoint!
79//
80// Note that the all_soft_refs_clear flag in the collector policy
81// may be true because this method can be called without intervening
82// activity.  For example when the heap space is tight and full measure
83// are being taken to free space.
84
85void PSMarkSweep::invoke(bool maximum_heap_compaction) {
86  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
87  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
88  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
89
90  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
91  GCCause::Cause gc_cause = heap->gc_cause();
92  PSAdaptiveSizePolicy* policy = heap->size_policy();
93  IsGCActiveMark mark;
94
95  if (ScavengeBeforeFullGC) {
96    PSScavenge::invoke_no_policy();
97  }
98
99  const bool clear_all_soft_refs =
100    heap->collector_policy()->should_clear_all_soft_refs();
101
102  uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
103  UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
104  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
105}
106
107// This method contains no policy. You should probably
108// be calling invoke() instead.
109bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
110  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
111  assert(ref_processor() != NULL, "Sanity");
112
113  if (GCLocker::check_active_before_gc()) {
114    return false;
115  }
116
117  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
118  GCCause::Cause gc_cause = heap->gc_cause();
119
120  GCIdMark gc_id_mark;
121  _gc_timer->register_gc_start();
122  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
123
124  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
125
126  // The scope of casr should end after code that can change
127  // CollectorPolicy::_should_clear_all_soft_refs.
128  ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
129
130  PSYoungGen* young_gen = heap->young_gen();
131  PSOldGen* old_gen = heap->old_gen();
132
133  // Increment the invocation count
134  heap->increment_total_collections(true /* full */);
135
136  // Save information needed to minimize mangling
137  heap->record_gen_tops_before_GC();
138
139  // We need to track unique mark sweep invocations as well.
140  _total_invocations++;
141
142  heap->print_heap_before_gc();
143  heap->trace_heap_before_gc(_gc_tracer);
144
145  // Fill in TLABs
146  heap->accumulate_statistics_all_tlabs();
147  heap->ensure_parsability(true);  // retire TLABs
148
149  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
150    HandleMark hm;  // Discard invalid handles created during verification
151    Universe::verify("Before GC");
152  }
153
154  // Verify object start arrays
155  if (VerifyObjectStartArray &&
156      VerifyBeforeGC) {
157    old_gen->verify_object_start_array();
158  }
159
160  // Filled in below to track the state of the young gen after the collection.
161  bool eden_empty;
162  bool survivors_empty;
163  bool young_gen_empty;
164
165  {
166    HandleMark hm;
167
168    GCTraceCPUTime tcpu;
169    GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
170
171    heap->pre_full_gc_dump(_gc_timer);
172
173    TraceCollectorStats tcs(counters());
174    TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
175
176    if (TraceOldGenTime) accumulated_time()->start();
177
178    // Let the size policy know we're starting
179    size_policy->major_collection_begin();
180
181    CodeCache::gc_prologue();
182    BiasedLocking::preserve_marks();
183
184    // Capture metadata size before collection for sizing.
185    size_t metadata_prev_used = MetaspaceAux::used_bytes();
186
187    size_t old_gen_prev_used = old_gen->used_in_bytes();
188    size_t young_gen_prev_used = young_gen->used_in_bytes();
189
190    allocate_stacks();
191
192#if defined(COMPILER2) || INCLUDE_JVMCI
193    DerivedPointerTable::clear();
194#endif
195
196    ref_processor()->enable_discovery();
197    ref_processor()->setup_policy(clear_all_softrefs);
198
199    mark_sweep_phase1(clear_all_softrefs);
200
201    mark_sweep_phase2();
202
203#if defined(COMPILER2) || INCLUDE_JVMCI
204    // Don't add any more derived pointers during phase3
205    assert(DerivedPointerTable::is_active(), "Sanity");
206    DerivedPointerTable::set_active(false);
207#endif
208
209    mark_sweep_phase3();
210
211    mark_sweep_phase4();
212
213    restore_marks();
214
215    deallocate_stacks();
216
217    if (ZapUnusedHeapArea) {
218      // Do a complete mangle (top to end) because the usage for
219      // scratch does not maintain a top pointer.
220      young_gen->to_space()->mangle_unused_area_complete();
221    }
222
223    eden_empty = young_gen->eden_space()->is_empty();
224    if (!eden_empty) {
225      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
226    }
227
228    // Update heap occupancy information which is used as
229    // input to soft ref clearing policy at the next gc.
230    Universe::update_heap_info_at_gc();
231
232    survivors_empty = young_gen->from_space()->is_empty() &&
233                      young_gen->to_space()->is_empty();
234    young_gen_empty = eden_empty && survivors_empty;
235
236    ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
237    MemRegion old_mr = heap->old_gen()->reserved();
238    if (young_gen_empty) {
239      modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
240    } else {
241      modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
242    }
243
244    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
245    ClassLoaderDataGraph::purge();
246    MetaspaceAux::verify_metrics();
247
248    BiasedLocking::restore_marks();
249    CodeCache::gc_epilogue();
250    JvmtiExport::gc_epilogue();
251
252#if defined(COMPILER2) || INCLUDE_JVMCI
253    DerivedPointerTable::update_pointers();
254#endif
255
256    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
257
258    ref_processor()->enqueue_discovered_references(NULL, &pt);
259
260    pt.print_enqueue_phase();
261
262    // Update time of last GC
263    reset_millis_since_last_gc();
264
265    // Let the size policy know we're done
266    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
267
268    if (UseAdaptiveSizePolicy) {
269
270     log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
271     log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
272                         old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
273
274      // Don't check if the size_policy is ready here.  Let
275      // the size_policy check that internally.
276      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
277          AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
278        // Swap the survivor spaces if from_space is empty. The
279        // resize_young_gen() called below is normally used after
280        // a successful young GC and swapping of survivor spaces;
281        // otherwise, it will fail to resize the young gen with
282        // the current implementation.
283        if (young_gen->from_space()->is_empty()) {
284          young_gen->from_space()->clear(SpaceDecorator::Mangle);
285          young_gen->swap_spaces();
286        }
287
288        // Calculate optimal free space amounts
289        assert(young_gen->max_size() >
290          young_gen->from_space()->capacity_in_bytes() +
291          young_gen->to_space()->capacity_in_bytes(),
292          "Sizes of space in young gen are out-of-bounds");
293
294        size_t young_live = young_gen->used_in_bytes();
295        size_t eden_live = young_gen->eden_space()->used_in_bytes();
296        size_t old_live = old_gen->used_in_bytes();
297        size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
298        size_t max_old_gen_size = old_gen->max_gen_size();
299        size_t max_eden_size = young_gen->max_size() -
300          young_gen->from_space()->capacity_in_bytes() -
301          young_gen->to_space()->capacity_in_bytes();
302
303        // Used for diagnostics
304        size_policy->clear_generation_free_space_flags();
305
306        size_policy->compute_generations_free_space(young_live,
307                                                    eden_live,
308                                                    old_live,
309                                                    cur_eden,
310                                                    max_old_gen_size,
311                                                    max_eden_size,
312                                                    true /* full gc*/);
313
314        size_policy->check_gc_overhead_limit(young_live,
315                                             eden_live,
316                                             max_old_gen_size,
317                                             max_eden_size,
318                                             true /* full gc*/,
319                                             gc_cause,
320                                             heap->collector_policy());
321
322        size_policy->decay_supplemental_growth(true /* full gc*/);
323
324        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
325
326        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
327                               size_policy->calculated_survivor_size_in_bytes());
328      }
329      log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
330    }
331
332    if (UsePerfData) {
333      heap->gc_policy_counters()->update_counters();
334      heap->gc_policy_counters()->update_old_capacity(
335        old_gen->capacity_in_bytes());
336      heap->gc_policy_counters()->update_young_capacity(
337        young_gen->capacity_in_bytes());
338    }
339
340    heap->resize_all_tlabs();
341
342    // We collected the heap, recalculate the metaspace capacity
343    MetaspaceGC::compute_new_size();
344
345    if (TraceOldGenTime) accumulated_time()->stop();
346
347    young_gen->print_used_change(young_gen_prev_used);
348    old_gen->print_used_change(old_gen_prev_used);
349    MetaspaceAux::print_metaspace_change(metadata_prev_used);
350
351    // Track memory usage and detect low memory
352    MemoryService::track_memory_usage();
353    heap->update_counters();
354
355    heap->post_full_gc_dump(_gc_timer);
356  }
357
358  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
359    HandleMark hm;  // Discard invalid handles created during verification
360    Universe::verify("After GC");
361  }
362
363  // Re-verify object start arrays
364  if (VerifyObjectStartArray &&
365      VerifyAfterGC) {
366    old_gen->verify_object_start_array();
367  }
368
369  if (ZapUnusedHeapArea) {
370    old_gen->object_space()->check_mangled_unused_area_complete();
371  }
372
373  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
374
375  heap->print_heap_after_gc();
376  heap->trace_heap_after_gc(_gc_tracer);
377
378#ifdef TRACESPINNING
379  ParallelTaskTerminator::print_termination_counts();
380#endif
381
382  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
383
384  _gc_timer->register_gc_end();
385
386  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
387
388  return true;
389}
390
391bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
392                                             PSYoungGen* young_gen,
393                                             PSOldGen* old_gen) {
394  MutableSpace* const eden_space = young_gen->eden_space();
395  assert(!eden_space->is_empty(), "eden must be non-empty");
396  assert(young_gen->virtual_space()->alignment() ==
397         old_gen->virtual_space()->alignment(), "alignments do not match");
398
399  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
400    return false;
401  }
402
403  // Both generations must be completely committed.
404  if (young_gen->virtual_space()->uncommitted_size() != 0) {
405    return false;
406  }
407  if (old_gen->virtual_space()->uncommitted_size() != 0) {
408    return false;
409  }
410
411  // Figure out how much to take from eden.  Include the average amount promoted
412  // in the total; otherwise the next young gen GC will simply bail out to a
413  // full GC.
414  const size_t alignment = old_gen->virtual_space()->alignment();
415  const size_t eden_used = eden_space->used_in_bytes();
416  const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
417  const size_t absorb_size = align_up(eden_used + promoted, alignment);
418  const size_t eden_capacity = eden_space->capacity_in_bytes();
419
420  if (absorb_size >= eden_capacity) {
421    return false; // Must leave some space in eden.
422  }
423
424  const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
425  if (new_young_size < young_gen->min_gen_size()) {
426    return false; // Respect young gen minimum size.
427  }
428
429  log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
430                        "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
431                        "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
432                        "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
433                        absorb_size / K,
434                        eden_capacity / K, (eden_capacity - absorb_size) / K,
435                        young_gen->from_space()->used_in_bytes() / K,
436                        young_gen->to_space()->used_in_bytes() / K,
437                        young_gen->capacity_in_bytes() / K, new_young_size / K);
438
439  // Fill the unused part of the old gen.
440  MutableSpace* const old_space = old_gen->object_space();
441  HeapWord* const unused_start = old_space->top();
442  size_t const unused_words = pointer_delta(old_space->end(), unused_start);
443
444  if (unused_words > 0) {
445    if (unused_words < CollectedHeap::min_fill_size()) {
446      return false;  // If the old gen cannot be filled, must give up.
447    }
448    CollectedHeap::fill_with_objects(unused_start, unused_words);
449  }
450
451  // Take the live data from eden and set both top and end in the old gen to
452  // eden top.  (Need to set end because reset_after_change() mangles the region
453  // from end to virtual_space->high() in debug builds).
454  HeapWord* const new_top = eden_space->top();
455  old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
456                                        absorb_size);
457  young_gen->reset_after_change();
458  old_space->set_top(new_top);
459  old_space->set_end(new_top);
460  old_gen->reset_after_change();
461
462  // Update the object start array for the filler object and the data from eden.
463  ObjectStartArray* const start_array = old_gen->start_array();
464  for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
465    start_array->allocate_block(p);
466  }
467
468  // Could update the promoted average here, but it is not typically updated at
469  // full GCs and the value to use is unclear.  Something like
470  //
471  // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
472
473  size_policy->set_bytes_absorbed_from_eden(absorb_size);
474  return true;
475}
476
477void PSMarkSweep::allocate_stacks() {
478  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
479  PSYoungGen* young_gen = heap->young_gen();
480
481  MutableSpace* to_space = young_gen->to_space();
482  _preserved_marks = (PreservedMark*)to_space->top();
483  _preserved_count = 0;
484
485  // We want to calculate the size in bytes first.
486  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
487  // Now divide by the size of a PreservedMark
488  _preserved_count_max /= sizeof(PreservedMark);
489}
490
491
492void PSMarkSweep::deallocate_stacks() {
493  _preserved_mark_stack.clear(true);
494  _preserved_oop_stack.clear(true);
495  _marking_stack.clear();
496  _objarray_stack.clear(true);
497}
498
499void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
500  // Recursively traverse all live objects and mark them
501  GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
502
503  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
504
505  // Need to clear claim bits before the tracing starts.
506  ClassLoaderDataGraph::clear_claimed_marks();
507
508  // General strong roots.
509  {
510    ParallelScavengeHeap::ParStrongRootsScope psrs;
511    Universe::oops_do(mark_and_push_closure());
512    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
513    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
514    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
515    ObjectSynchronizer::oops_do(mark_and_push_closure());
516    Management::oops_do(mark_and_push_closure());
517    JvmtiExport::oops_do(mark_and_push_closure());
518    SystemDictionary::always_strong_oops_do(mark_and_push_closure());
519    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
520    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
521    //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
522    AOTLoader::oops_do(mark_and_push_closure());
523  }
524
525  // Flush marking stack.
526  follow_stack();
527
528  // Process reference objects found during marking
529  {
530    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
531
532    ref_processor()->setup_policy(clear_all_softrefs);
533    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
534    const ReferenceProcessorStats& stats =
535      ref_processor()->process_discovered_references(
536        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
537    gc_tracer()->report_gc_reference_stats(stats);
538    pt.print_all_references();
539  }
540
541  // This is the point where the entire marking should have completed.
542  assert(_marking_stack.is_empty(), "Marking should have completed");
543
544  {
545    GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
546
547    // Unload classes and purge the SystemDictionary.
548    bool purged_class = SystemDictionary::do_unloading(is_alive_closure(), _gc_timer);
549
550    // Unload nmethods.
551    CodeCache::do_unloading(is_alive_closure(), purged_class);
552
553    // Prune dead klasses from subklass/sibling/implementor lists.
554    Klass::clean_weak_klass_links(is_alive_closure());
555  }
556
557  {
558    GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer);
559    // Delete entries for dead interned strings.
560    StringTable::unlink(is_alive_closure());
561  }
562
563  {
564    GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer);
565    // Clean up unreferenced symbols in symbol table.
566    SymbolTable::unlink();
567  }
568
569  _gc_tracer->report_object_count_after_gc(is_alive_closure());
570}
571
572
573void PSMarkSweep::mark_sweep_phase2() {
574  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
575
576  // Now all live objects are marked, compute the new object addresses.
577
578  // It is not required that we traverse spaces in the same order in
579  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
580  // tracking expects us to do so. See comment under phase4.
581
582  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
583  PSOldGen* old_gen = heap->old_gen();
584
585  // Begin compacting into the old gen
586  PSMarkSweepDecorator::set_destination_decorator_tenured();
587
588  // This will also compact the young gen spaces.
589  old_gen->precompact();
590}
591
592void PSMarkSweep::mark_sweep_phase3() {
593  // Adjust the pointers to reflect the new locations
594  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
595
596  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
597  PSYoungGen* young_gen = heap->young_gen();
598  PSOldGen* old_gen = heap->old_gen();
599
600  // Need to clear claim bits before the tracing starts.
601  ClassLoaderDataGraph::clear_claimed_marks();
602
603  // General strong roots.
604  Universe::oops_do(adjust_pointer_closure());
605  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
606  Threads::oops_do(adjust_pointer_closure(), NULL);
607  ObjectSynchronizer::oops_do(adjust_pointer_closure());
608  Management::oops_do(adjust_pointer_closure());
609  JvmtiExport::oops_do(adjust_pointer_closure());
610  SystemDictionary::oops_do(adjust_pointer_closure());
611  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
612
613  // Now adjust pointers in remaining weak roots.  (All of which should
614  // have been cleared if they pointed to non-surviving objects.)
615  // Global (weak) JNI handles
616  JNIHandles::weak_oops_do(adjust_pointer_closure());
617
618  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
619  CodeCache::blobs_do(&adjust_from_blobs);
620  AOTLoader::oops_do(adjust_pointer_closure());
621  StringTable::oops_do(adjust_pointer_closure());
622  ref_processor()->weak_oops_do(adjust_pointer_closure());
623  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
624
625  adjust_marks();
626
627  young_gen->adjust_pointers();
628  old_gen->adjust_pointers();
629}
630
631void PSMarkSweep::mark_sweep_phase4() {
632  EventMark m("4 compact heap");
633  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
634
635  // All pointers are now adjusted, move objects accordingly
636
637  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
638  PSYoungGen* young_gen = heap->young_gen();
639  PSOldGen* old_gen = heap->old_gen();
640
641  old_gen->compact();
642  young_gen->compact();
643}
644
645jlong PSMarkSweep::millis_since_last_gc() {
646  // We need a monotonically non-decreasing time in ms but
647  // os::javaTimeMillis() does not guarantee monotonicity.
648  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
649  jlong ret_val = now - _time_of_last_gc;
650  // XXX See note in genCollectedHeap::millis_since_last_gc().
651  if (ret_val < 0) {
652    NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
653    return 0;
654  }
655  return ret_val;
656}
657
658void PSMarkSweep::reset_millis_since_last_gc() {
659  // We need a monotonically non-decreasing time in ms but
660  // os::javaTimeMillis() does not guarantee monotonicity.
661  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
662}
663