1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
28#include "gc/cms/concurrentMarkSweepThread.hpp"
29#include "gc/shared/gcId.hpp"
30#include "gc/shared/genCollectedHeap.hpp"
31#include "oops/oop.inline.hpp"
32#include "runtime/init.hpp"
33#include "runtime/interfaceSupport.hpp"
34#include "runtime/java.hpp"
35#include "runtime/javaCalls.hpp"
36#include "runtime/mutexLocker.hpp"
37#include "runtime/os.hpp"
38#include "runtime/vmThread.hpp"
39
40// ======= Concurrent Mark Sweep Thread ========
41
42ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
43CMSCollector* ConcurrentMarkSweepThread::_collector         = NULL;
44int  ConcurrentMarkSweepThread::_CMS_flag                   = CMS_nil;
45
46volatile jint ConcurrentMarkSweepThread::_pending_yields    = 0;
47
48ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
49  : ConcurrentGCThread() {
50  assert(UseConcMarkSweepGC,  "UseConcMarkSweepGC should be set");
51  assert(_cmst == NULL, "CMS thread already created");
52  _cmst = this;
53  assert(_collector == NULL, "Collector already set");
54  _collector = collector;
55
56  set_name("CMS Main Thread");
57
58  // An old comment here said: "Priority should be just less
59  // than that of VMThread".  Since the VMThread runs at
60  // NearMaxPriority, the old comment was inaccurate, but
61  // changing the default priority to NearMaxPriority-1
62  // could change current behavior, so the default of
63  // NearMaxPriority stays in place.
64  //
65  // Note that there's a possibility of the VMThread
66  // starving if UseCriticalCMSThreadPriority is on.
67  // That won't happen on Solaris for various reasons,
68  // but may well happen on non-Solaris platforms.
69  create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority);
70}
71
72void ConcurrentMarkSweepThread::run_service() {
73  assert(this == cmst(), "just checking");
74
75  if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
76    log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
77  }
78
79  while (!should_terminate()) {
80    sleepBeforeNextCycle();
81    if (should_terminate()) break;
82    GCIdMark gc_id_mark;
83    GCCause::Cause cause = _collector->_full_gc_requested ?
84      _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
85    _collector->collect_in_background(cause);
86  }
87
88  // Check that the state of any protocol for synchronization
89  // between background (CMS) and foreground collector is "clean"
90  // (i.e. will not potentially block the foreground collector,
91  // requiring action by us).
92  verify_ok_to_terminate();
93}
94
95#ifndef PRODUCT
96void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
97  assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
98           cms_thread_wants_cms_token()),
99         "Must renounce all worldly possessions and desires for nirvana");
100  _collector->verify_ok_to_terminate();
101}
102#endif
103
104// create and start a new ConcurrentMarkSweep Thread for given CMS generation
105ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
106  guarantee(_cmst == NULL, "start() called twice!");
107  ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
108  assert(_cmst == th, "Where did the just-created CMS thread go?");
109  return th;
110}
111
112void ConcurrentMarkSweepThread::stop_service() {
113  // Now post a notify on CGC_lock so as to nudge
114  // CMS thread(s) that might be slumbering in
115  // sleepBeforeNextCycle.
116  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
117  CGC_lock->notify_all();
118}
119
120void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
121  assert(tc != NULL, "Null ThreadClosure");
122  if (cmst() != NULL && !cmst()->has_terminated()) {
123    tc->do_thread(cmst());
124  }
125  assert(Universe::is_fully_initialized(),
126         "Called too early, make sure heap is fully initialized");
127  if (_collector != NULL) {
128    AbstractWorkGang* gang = _collector->conc_workers();
129    if (gang != NULL) {
130      gang->threads_do(tc);
131    }
132  }
133}
134
135void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
136  if (cmst() != NULL && !cmst()->has_terminated()) {
137    cmst()->print_on(st);
138    st->cr();
139  }
140  if (_collector != NULL) {
141    AbstractWorkGang* gang = _collector->conc_workers();
142    if (gang != NULL) {
143      gang->print_worker_threads_on(st);
144    }
145  }
146}
147
148void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
149  assert(UseConcMarkSweepGC, "just checking");
150
151  MutexLockerEx x(CGC_lock,
152                  Mutex::_no_safepoint_check_flag);
153  if (!is_cms_thread) {
154    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
155    CMSSynchronousYieldRequest yr;
156    while (CMS_flag_is_set(CMS_cms_has_token)) {
157      // indicate that we want to get the token
158      set_CMS_flag(CMS_vm_wants_token);
159      CGC_lock->wait(true);
160    }
161    // claim the token and proceed
162    clear_CMS_flag(CMS_vm_wants_token);
163    set_CMS_flag(CMS_vm_has_token);
164  } else {
165    assert(Thread::current()->is_ConcurrentGC_thread(),
166           "Not a CMS thread");
167    // The following barrier assumes there's only one CMS thread.
168    // This will need to be modified is there are more CMS threads than one.
169    while (CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token)) {
170      set_CMS_flag(CMS_cms_wants_token);
171      CGC_lock->wait(true);
172    }
173    // claim the token
174    clear_CMS_flag(CMS_cms_wants_token);
175    set_CMS_flag(CMS_cms_has_token);
176  }
177}
178
179void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
180  assert(UseConcMarkSweepGC, "just checking");
181
182  MutexLockerEx x(CGC_lock,
183                  Mutex::_no_safepoint_check_flag);
184  if (!is_cms_thread) {
185    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
186    assert(CMS_flag_is_set(CMS_vm_has_token), "just checking");
187    clear_CMS_flag(CMS_vm_has_token);
188    if (CMS_flag_is_set(CMS_cms_wants_token)) {
189      // wake-up a waiting CMS thread
190      CGC_lock->notify();
191    }
192    assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
193           "Should have been cleared");
194  } else {
195    assert(Thread::current()->is_ConcurrentGC_thread(),
196           "Not a CMS thread");
197    assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
198    clear_CMS_flag(CMS_cms_has_token);
199    if (CMS_flag_is_set(CMS_vm_wants_token)) {
200      // wake-up a waiting VM thread
201      CGC_lock->notify();
202    }
203    assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
204           "Should have been cleared");
205  }
206}
207
208// Wait until any cms_lock event
209void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
210  MutexLockerEx x(CGC_lock,
211                  Mutex::_no_safepoint_check_flag);
212  if (should_terminate() || _collector->_full_gc_requested) {
213    return;
214  }
215  set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
216  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
217  clear_CMS_flag(CMS_cms_wants_token);
218  assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
219         "Should not be set");
220}
221
222// Wait until the next synchronous GC, a concurrent full gc request,
223// or a timeout, whichever is earlier.
224void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
225  // Wait time in millis or 0 value representing infinite wait for a scavenge
226  assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
227
228  GenCollectedHeap* gch = GenCollectedHeap::heap();
229  double start_time_secs = os::elapsedTime();
230  double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
231
232  // Total collections count before waiting loop
233  unsigned int before_count;
234  {
235    MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
236    before_count = gch->total_collections();
237  }
238
239  unsigned int loop_count = 0;
240
241  while(!should_terminate()) {
242    double now_time = os::elapsedTime();
243    long wait_time_millis;
244
245    if(t_millis != 0) {
246      // New wait limit
247      wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
248      if(wait_time_millis <= 0) {
249        // Wait time is over
250        break;
251      }
252    } else {
253      // No wait limit, wait if necessary forever
254      wait_time_millis = 0;
255    }
256
257    // Wait until the next event or the remaining timeout
258    {
259      MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
260
261      if (should_terminate() || _collector->_full_gc_requested) {
262        return;
263      }
264      set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
265      assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
266      CGC_lock->wait(Mutex::_no_safepoint_check_flag, wait_time_millis);
267      clear_CMS_flag(CMS_cms_wants_token);
268      assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
269             "Should not be set");
270    }
271
272    // Extra wait time check before entering the heap lock to get the collection count
273    if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
274      // Wait time is over
275      break;
276    }
277
278    // Total collections count after the event
279    unsigned int after_count;
280    {
281      MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
282      after_count = gch->total_collections();
283    }
284
285    if(before_count != after_count) {
286      // There was a collection - success
287      break;
288    }
289
290    // Too many loops warning
291    if(++loop_count == 0) {
292      log_warning(gc)("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
293    }
294  }
295}
296
297void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
298  while (!should_terminate()) {
299    if(CMSWaitDuration >= 0) {
300      // Wait until the next synchronous GC, a concurrent full gc
301      // request or a timeout, whichever is earlier.
302      wait_on_cms_lock_for_scavenge(CMSWaitDuration);
303    } else {
304      // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
305      wait_on_cms_lock(CMSCheckInterval);
306    }
307    // Check if we should start a CMS collection cycle
308    if (_collector->shouldConcurrentCollect()) {
309      return;
310    }
311    // .. collection criterion not yet met, let's go back
312    // and wait some more
313  }
314}
315