1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderData.hpp"
27#include "gc/g1/concurrentMarkThread.inline.hpp"
28#include "gc/g1/g1Analytics.hpp"
29#include "gc/g1/g1CollectedHeap.inline.hpp"
30#include "gc/g1/g1ConcurrentMark.inline.hpp"
31#include "gc/g1/g1MMUTracker.hpp"
32#include "gc/g1/g1Policy.hpp"
33#include "gc/g1/suspendibleThreadSet.hpp"
34#include "gc/g1/vm_operations_g1.hpp"
35#include "gc/shared/concurrentGCPhaseManager.hpp"
36#include "gc/shared/gcId.hpp"
37#include "gc/shared/gcTrace.hpp"
38#include "gc/shared/gcTraceTime.inline.hpp"
39#include "logging/log.hpp"
40#include "memory/resourceArea.hpp"
41#include "runtime/vmThread.hpp"
42#include "utilities/debug.hpp"
43
44// ======= Concurrent Mark Thread ========
45
46// Check order in EXPAND_CURRENT_PHASES
47STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
48              ConcurrentGCPhaseManager::IDLE_PHASE);
49
50#define EXPAND_CONCURRENT_PHASES(expander)                              \
51  expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL)  \
52  expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL)          \
53  expander(CONCURRENT_CYCLE,, "Concurrent Cycle")                       \
54  expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks")      \
55  expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions")          \
56  expander(CONCURRENT_MARK,, "Concurrent Mark")                         \
57  expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots")              \
58  expander(BEFORE_REMARK,, NULL)                                        \
59  expander(REMARK,, NULL)                                               \
60  expander(CREATE_LIVE_DATA,, "Concurrent Create Live Data")            \
61  expander(COMPLETE_CLEANUP,, "Concurrent Complete Cleanup")            \
62  expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark")  \
63  /* */
64
65class G1ConcurrentPhase : public AllStatic {
66public:
67  enum {
68#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
69    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
70#undef CONCURRENT_PHASE_ENUM
71    PHASE_ID_LIMIT
72  };
73};
74
75// The CM thread is created when the G1 garbage collector is used
76
77ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
78  ConcurrentGCThread(),
79  _cm(cm),
80  _state(Idle),
81  _phase_manager_stack(),
82  _vtime_accum(0.0),
83  _vtime_mark_accum(0.0) {
84
85  set_name("G1 Main Marker");
86  create_and_start();
87}
88
89class CMCheckpointRootsFinalClosure: public VoidClosure {
90
91  G1ConcurrentMark* _cm;
92public:
93
94  CMCheckpointRootsFinalClosure(G1ConcurrentMark* cm) :
95    _cm(cm) {}
96
97  void do_void(){
98    _cm->checkpointRootsFinal(false); // !clear_all_soft_refs
99  }
100};
101
102class CMCleanUp: public VoidClosure {
103  G1ConcurrentMark* _cm;
104public:
105
106  CMCleanUp(G1ConcurrentMark* cm) :
107    _cm(cm) {}
108
109  void do_void(){
110    _cm->cleanup();
111  }
112};
113
114// Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
115void ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
116  const G1Analytics* analytics = g1_policy->analytics();
117  if (g1_policy->adaptive_young_list_length()) {
118    double now = os::elapsedTime();
119    double prediction_ms = remark ? analytics->predict_remark_time_ms()
120                                  : analytics->predict_cleanup_time_ms();
121    G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
122    jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
123    os::sleep(this, sleep_time_ms, false);
124  }
125}
126
127class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
128  G1ConcurrentMark* _cm;
129
130 public:
131  G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
132    GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
133    _cm(cm)
134  {
135    _cm->gc_timer_cm()->register_gc_concurrent_start(title);
136  }
137
138  ~G1ConcPhaseTimer() {
139    _cm->gc_timer_cm()->register_gc_concurrent_end();
140  }
141};
142
143static const char* const concurrent_phase_names[] = {
144#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
145  EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
146#undef CONCURRENT_PHASE_NAME
147  NULL                          // terminator
148};
149// Verify dense enum assumption.  +1 for terminator.
150STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
151              ARRAY_SIZE(concurrent_phase_names));
152
153// Returns the phase number for name, or a negative value if unknown.
154static int lookup_concurrent_phase(const char* name) {
155  const char* const* names = concurrent_phase_names;
156  for (uint i = 0; names[i] != NULL; ++i) {
157    if (strcmp(name, names[i]) == 0) {
158      return static_cast<int>(i);
159    }
160  }
161  return -1;
162}
163
164// The phase must be valid and must have a title.
165static const char* lookup_concurrent_phase_title(int phase) {
166  static const char* const titles[] = {
167#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
168    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
169#undef CONCURRENT_PHASE_TITLE
170  };
171  // Verify dense enum assumption.
172  STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
173
174  assert(0 <= phase, "precondition");
175  assert((uint)phase < ARRAY_SIZE(titles), "precondition");
176  const char* title = titles[phase];
177  assert(title != NULL, "precondition");
178  return title;
179}
180
181class G1ConcPhaseManager : public StackObj {
182  G1ConcurrentMark* _cm;
183  ConcurrentGCPhaseManager _manager;
184
185public:
186  G1ConcPhaseManager(int phase, ConcurrentMarkThread* thread) :
187    _cm(thread->cm()),
188    _manager(phase, thread->phase_manager_stack())
189  { }
190
191  ~G1ConcPhaseManager() {
192    // Deactivate the manager if marking aborted, to avoid blocking on
193    // phase exit when the phase has been requested.
194    if (_cm->has_aborted()) {
195      _manager.deactivate();
196    }
197  }
198
199  void set_phase(int phase, bool force) {
200    _manager.set_phase(phase, force);
201  }
202};
203
204// Combine phase management and timing into one convenient utility.
205class G1ConcPhase : public StackObj {
206  G1ConcPhaseTimer _timer;
207  G1ConcPhaseManager _manager;
208
209public:
210  G1ConcPhase(int phase, ConcurrentMarkThread* thread) :
211    _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
212    _manager(phase, thread)
213  { }
214};
215
216const char* const* ConcurrentMarkThread::concurrent_phases() const {
217  return concurrent_phase_names;
218}
219
220bool ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
221  int phase = lookup_concurrent_phase(phase_name);
222  if (phase < 0) return false;
223
224  while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
225                                                   phase_manager_stack())) {
226    assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
227    if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
228      // If idle and the goal is !idle, start a collection.
229      G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
230    }
231  }
232  return true;
233}
234
235void ConcurrentMarkThread::run_service() {
236  _vtime_start = os::elapsedVTime();
237
238  G1CollectedHeap* g1h = G1CollectedHeap::heap();
239  G1Policy* g1_policy = g1h->g1_policy();
240
241  G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
242
243  while (!should_terminate()) {
244    // wait until started is set.
245    sleepBeforeNextCycle();
246    if (should_terminate()) {
247      break;
248    }
249
250    cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
251
252    GCIdMark gc_id_mark;
253
254    cm()->concurrent_cycle_start();
255
256    assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
257
258    GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
259    {
260      ResourceMark rm;
261      HandleMark   hm;
262      double cycle_start = os::elapsedVTime();
263
264      {
265        G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
266        ClassLoaderDataGraph::clear_claimed_marks();
267      }
268
269      // We have to ensure that we finish scanning the root regions
270      // before the next GC takes place. To ensure this we have to
271      // make sure that we do not join the STS until the root regions
272      // have been scanned. If we did then it's possible that a
273      // subsequent GC could block us from joining the STS and proceed
274      // without the root regions have been scanned which would be a
275      // correctness issue.
276
277      {
278        G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
279        _cm->scan_root_regions();
280      }
281
282      // It would be nice to use the G1ConcPhase class here but
283      // the "end" logging is inside the loop and not at the end of
284      // a scope. Also, the timer doesn't support nesting.
285      // Mimicking the same log output instead.
286      {
287        G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
288        jlong mark_start = os::elapsed_counter();
289        const char* cm_title =
290          lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
291        log_info(gc, marking)("%s (%.3fs)",
292                              cm_title,
293                              TimeHelper::counter_to_seconds(mark_start));
294        for (uint iter = 1; !cm()->has_aborted(); ++iter) {
295          // Concurrent marking.
296          {
297            G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
298            _cm->mark_from_roots();
299          }
300          if (cm()->has_aborted()) break;
301
302          // Provide a control point after mark_from_roots.
303          {
304            G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
305          }
306          if (cm()->has_aborted()) break;
307
308          // Delay remark pause for MMU.
309          double mark_end_time = os::elapsedVTime();
310          jlong mark_end = os::elapsed_counter();
311          _vtime_mark_accum += (mark_end_time - cycle_start);
312          delay_to_keep_mmu(g1_policy, true /* remark */);
313          if (cm()->has_aborted()) break;
314
315          // Pause Remark.
316          log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
317                                cm_title,
318                                TimeHelper::counter_to_seconds(mark_start),
319                                TimeHelper::counter_to_seconds(mark_end),
320                                TimeHelper::counter_to_millis(mark_end - mark_start));
321          mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
322          CMCheckpointRootsFinalClosure final_cl(_cm);
323          VM_CGC_Operation op(&final_cl, "Pause Remark");
324          VMThread::execute(&op);
325          if (cm()->has_aborted()) {
326            break;
327          } else if (!cm()->restart_for_overflow()) {
328            break;              // Exit loop if no restart requested.
329          } else {
330            // Loop to restart for overflow.
331            mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
332            log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
333                                  cm_title, iter);
334          }
335        }
336      }
337
338      if (!cm()->has_aborted()) {
339        G1ConcPhase p(G1ConcurrentPhase::CREATE_LIVE_DATA, this);
340        cm()->create_live_data();
341      }
342
343      double end_time = os::elapsedVTime();
344      // Update the total virtual time before doing this, since it will try
345      // to measure it to get the vtime for this marking.  We purposely
346      // neglect the presumably-short "completeCleanup" phase here.
347      _vtime_accum = (end_time - _vtime_start);
348
349      if (!cm()->has_aborted()) {
350        delay_to_keep_mmu(g1_policy, false /* cleanup */);
351
352        CMCleanUp cl_cl(_cm);
353        VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
354        VMThread::execute(&op);
355      } else {
356        // We don't want to update the marking status if a GC pause
357        // is already underway.
358        SuspendibleThreadSetJoiner sts_join;
359        g1h->collector_state()->set_mark_in_progress(false);
360      }
361
362      // Check if cleanup set the free_regions_coming flag. If it
363      // hasn't, we can just skip the next step.
364      if (g1h->free_regions_coming()) {
365        // The following will finish freeing up any regions that we
366        // found to be empty during cleanup. We'll do this part
367        // without joining the suspendible set. If an evacuation pause
368        // takes place, then we would carry on freeing regions in
369        // case they are needed by the pause. If a Full GC takes
370        // place, it would wait for us to process the regions
371        // reclaimed by cleanup.
372
373        // Now do the concurrent cleanup operation.
374        G1ConcPhase p(G1ConcurrentPhase::COMPLETE_CLEANUP, this);
375        _cm->complete_cleanup();
376
377        // Notify anyone who's waiting that there are no more free
378        // regions coming. We have to do this before we join the STS
379        // (in fact, we should not attempt to join the STS in the
380        // interval between finishing the cleanup pause and clearing
381        // the free_regions_coming flag) otherwise we might deadlock:
382        // a GC worker could be blocked waiting for the notification
383        // whereas this thread will be blocked for the pause to finish
384        // while it's trying to join the STS, which is conditional on
385        // the GC workers finishing.
386        g1h->reset_free_regions_coming();
387      }
388      guarantee(cm()->cleanup_list_is_empty(),
389                "at this point there should be no regions on the cleanup list");
390
391      // There is a tricky race before recording that the concurrent
392      // cleanup has completed and a potential Full GC starting around
393      // the same time. We want to make sure that the Full GC calls
394      // abort() on concurrent mark after
395      // record_concurrent_mark_cleanup_completed(), since abort() is
396      // the method that will reset the concurrent mark state. If we
397      // end up calling record_concurrent_mark_cleanup_completed()
398      // after abort() then we might incorrectly undo some of the work
399      // abort() did. Checking the has_aborted() flag after joining
400      // the STS allows the correct ordering of the two methods. There
401      // are two scenarios:
402      //
403      // a) If we reach here before the Full GC, the fact that we have
404      // joined the STS means that the Full GC cannot start until we
405      // leave the STS, so record_concurrent_mark_cleanup_completed()
406      // will complete before abort() is called.
407      //
408      // b) If we reach here during the Full GC, we'll be held up from
409      // joining the STS until the Full GC is done, which means that
410      // abort() will have completed and has_aborted() will return
411      // true to prevent us from calling
412      // record_concurrent_mark_cleanup_completed() (and, in fact, it's
413      // not needed any more as the concurrent mark state has been
414      // already reset).
415      {
416        SuspendibleThreadSetJoiner sts_join;
417        if (!cm()->has_aborted()) {
418          g1_policy->record_concurrent_mark_cleanup_completed();
419        } else {
420          log_info(gc, marking)("Concurrent Mark Abort");
421        }
422      }
423
424      // We now want to allow clearing of the marking bitmap to be
425      // suspended by a collection pause.
426      // We may have aborted just before the remark. Do not bother clearing the
427      // bitmap then, as it has been done during mark abort.
428      if (!cm()->has_aborted()) {
429        G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
430        _cm->cleanup_for_next_mark();
431      } else {
432        assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
433      }
434    }
435
436    // Update the number of full collections that have been
437    // completed. This will also notify the FullGCCount_lock in case a
438    // Java thread is waiting for a full GC to happen (e.g., it
439    // called System.gc() with +ExplicitGCInvokesConcurrent).
440    {
441      SuspendibleThreadSetJoiner sts_join;
442      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
443
444      cm()->concurrent_cycle_end();
445    }
446
447    cpmanager.set_phase(G1ConcurrentPhase::IDLE, cm()->has_aborted() /* force */);
448  }
449  _cm->root_regions()->cancel_scan();
450}
451
452void ConcurrentMarkThread::stop_service() {
453  MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
454  CGC_lock->notify_all();
455}
456
457void ConcurrentMarkThread::sleepBeforeNextCycle() {
458  // We join here because we don't want to do the "shouldConcurrentMark()"
459  // below while the world is otherwise stopped.
460  assert(!in_progress(), "should have been cleared");
461
462  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
463  while (!started() && !should_terminate()) {
464    CGC_lock->wait(Mutex::_no_safepoint_check_flag);
465  }
466
467  if (started()) {
468    set_in_progress();
469  }
470}
471