safepoint.cpp revision 9173:3f28db271235
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/stringTable.hpp"
27#include "classfile/systemDictionary.hpp"
28#include "code/codeCache.hpp"
29#include "code/icBuffer.hpp"
30#include "code/nmethod.hpp"
31#include "code/pcDesc.hpp"
32#include "code/scopeDesc.hpp"
33#include "gc/shared/collectedHeap.hpp"
34#include "gc/shared/gcLocker.inline.hpp"
35#include "interpreter/interpreter.hpp"
36#include "memory/resourceArea.hpp"
37#include "memory/universe.inline.hpp"
38#include "oops/oop.inline.hpp"
39#include "oops/symbol.hpp"
40#include "runtime/atomic.inline.hpp"
41#include "runtime/compilationPolicy.hpp"
42#include "runtime/deoptimization.hpp"
43#include "runtime/frame.inline.hpp"
44#include "runtime/interfaceSupport.hpp"
45#include "runtime/mutexLocker.hpp"
46#include "runtime/orderAccess.inline.hpp"
47#include "runtime/osThread.hpp"
48#include "runtime/safepoint.hpp"
49#include "runtime/signature.hpp"
50#include "runtime/stubCodeGenerator.hpp"
51#include "runtime/stubRoutines.hpp"
52#include "runtime/sweeper.hpp"
53#include "runtime/synchronizer.hpp"
54#include "runtime/thread.inline.hpp"
55#include "services/runtimeService.hpp"
56#include "utilities/events.hpp"
57#include "utilities/macros.hpp"
58#if INCLUDE_ALL_GCS
59#include "gc/cms/concurrentMarkSweepThread.hpp"
60#include "gc/g1/suspendibleThreadSet.hpp"
61#endif // INCLUDE_ALL_GCS
62#ifdef COMPILER1
63#include "c1/c1_globals.hpp"
64#endif
65
66// --------------------------------------------------------------------------------------------------
67// Implementation of Safepoint begin/end
68
69SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
70volatile int  SafepointSynchronize::_waiting_to_block = 0;
71volatile int SafepointSynchronize::_safepoint_counter = 0;
72int SafepointSynchronize::_current_jni_active_count = 0;
73long  SafepointSynchronize::_end_of_last_safepoint = 0;
74static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
75static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
76static bool timeout_error_printed = false;
77
78// Roll all threads forward to a safepoint and suspend them all
79void SafepointSynchronize::begin() {
80
81  Thread* myThread = Thread::current();
82  assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
83
84  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
85    _safepoint_begin_time = os::javaTimeNanos();
86    _ts_of_current_safepoint = tty->time_stamp().seconds();
87  }
88
89#if INCLUDE_ALL_GCS
90  if (UseConcMarkSweepGC) {
91    // In the future we should investigate whether CMS can use the
92    // more-general mechanism below.  DLD (01/05).
93    ConcurrentMarkSweepThread::synchronize(false);
94  } else if (UseG1GC) {
95    SuspendibleThreadSet::synchronize();
96  }
97#endif // INCLUDE_ALL_GCS
98
99  // By getting the Threads_lock, we assure that no threads are about to start or
100  // exit. It is released again in SafepointSynchronize::end().
101  Threads_lock->lock();
102
103  assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
104
105  int nof_threads = Threads::number_of_threads();
106
107  if (TraceSafepoint) {
108    tty->print_cr("Safepoint synchronization initiated. (%d)", nof_threads);
109  }
110
111  RuntimeService::record_safepoint_begin();
112
113  MutexLocker mu(Safepoint_lock);
114
115  // Reset the count of active JNI critical threads
116  _current_jni_active_count = 0;
117
118  // Set number of threads to wait for, before we initiate the callbacks
119  _waiting_to_block = nof_threads;
120  TryingToBlock     = 0 ;
121  int still_running = nof_threads;
122
123  // Save the starting time, so that it can be compared to see if this has taken
124  // too long to complete.
125  jlong safepoint_limit_time;
126  timeout_error_printed = false;
127
128  // PrintSafepointStatisticsTimeout can be specified separately. When
129  // specified, PrintSafepointStatistics will be set to true in
130  // deferred_initialize_stat method. The initialization has to be done
131  // early enough to avoid any races. See bug 6880029 for details.
132  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
133    deferred_initialize_stat();
134  }
135
136  // Begin the process of bringing the system to a safepoint.
137  // Java threads can be in several different states and are
138  // stopped by different mechanisms:
139  //
140  //  1. Running interpreted
141  //     The interpreter dispatch table is changed to force it to
142  //     check for a safepoint condition between bytecodes.
143  //  2. Running in native code
144  //     When returning from the native code, a Java thread must check
145  //     the safepoint _state to see if we must block.  If the
146  //     VM thread sees a Java thread in native, it does
147  //     not wait for this thread to block.  The order of the memory
148  //     writes and reads of both the safepoint state and the Java
149  //     threads state is critical.  In order to guarantee that the
150  //     memory writes are serialized with respect to each other,
151  //     the VM thread issues a memory barrier instruction
152  //     (on MP systems).  In order to avoid the overhead of issuing
153  //     a memory barrier for each Java thread making native calls, each Java
154  //     thread performs a write to a single memory page after changing
155  //     the thread state.  The VM thread performs a sequence of
156  //     mprotect OS calls which forces all previous writes from all
157  //     Java threads to be serialized.  This is done in the
158  //     os::serialize_thread_states() call.  This has proven to be
159  //     much more efficient than executing a membar instruction
160  //     on every call to native code.
161  //  3. Running compiled Code
162  //     Compiled code reads a global (Safepoint Polling) page that
163  //     is set to fault if we are trying to get to a safepoint.
164  //  4. Blocked
165  //     A thread which is blocked will not be allowed to return from the
166  //     block condition until the safepoint operation is complete.
167  //  5. In VM or Transitioning between states
168  //     If a Java thread is currently running in the VM or transitioning
169  //     between states, the safepointing code will wait for the thread to
170  //     block itself when it attempts transitions to a new state.
171  //
172  _state            = _synchronizing;
173  OrderAccess::fence();
174
175  // Flush all thread states to memory
176  if (!UseMembar) {
177    os::serialize_thread_states();
178  }
179
180  // Make interpreter safepoint aware
181  Interpreter::notice_safepoints();
182
183  if (DeferPollingPageLoopCount < 0) {
184    // Make polling safepoint aware
185    guarantee (PageArmed == 0, "invariant") ;
186    PageArmed = 1 ;
187    os::make_polling_page_unreadable();
188  }
189
190  // Consider using active_processor_count() ... but that call is expensive.
191  int ncpus = os::processor_count() ;
192
193#ifdef ASSERT
194  for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
195    assert(cur->safepoint_state()->is_running(), "Illegal initial state");
196    // Clear the visited flag to ensure that the critical counts are collected properly.
197    cur->set_visited_for_critical_count(false);
198  }
199#endif // ASSERT
200
201  if (SafepointTimeout)
202    safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
203
204  // Iterate through all threads until it have been determined how to stop them all at a safepoint
205  unsigned int iterations = 0;
206  int steps = 0 ;
207  while(still_running > 0) {
208    for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
209      assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
210      ThreadSafepointState *cur_state = cur->safepoint_state();
211      if (cur_state->is_running()) {
212        cur_state->examine_state_of_thread();
213        if (!cur_state->is_running()) {
214           still_running--;
215           // consider adjusting steps downward:
216           //   steps = 0
217           //   steps -= NNN
218           //   steps >>= 1
219           //   steps = MIN(steps, 2000-100)
220           //   if (iterations != 0) steps -= NNN
221        }
222        if (TraceSafepoint && Verbose) cur_state->print();
223      }
224    }
225
226    if (PrintSafepointStatistics && iterations == 0) {
227      begin_statistics(nof_threads, still_running);
228    }
229
230    if (still_running > 0) {
231      // Check for if it takes to long
232      if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
233        print_safepoint_timeout(_spinning_timeout);
234      }
235
236      // Spin to avoid context switching.
237      // There's a tension between allowing the mutators to run (and rendezvous)
238      // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
239      // a mutator might otherwise use profitably to reach a safepoint.  Excessive
240      // spinning by the VM thread on a saturated system can increase rendezvous latency.
241      // Blocking or yielding incur their own penalties in the form of context switching
242      // and the resultant loss of $ residency.
243      //
244      // Further complicating matters is that yield() does not work as naively expected
245      // on many platforms -- yield() does not guarantee that any other ready threads
246      // will run.   As such we revert to naked_short_sleep() after some number of iterations.
247      // nakes_short_sleep() is implemented as a short unconditional sleep.
248      // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
249      // can actually increase the time it takes the VM thread to detect that a system-wide
250      // stop-the-world safepoint has been reached.  In a pathological scenario such as that
251      // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
252      // In that case the mutators will be stalled waiting for the safepoint to complete and the
253      // the VMthread will be sleeping, waiting for the mutators to rendezvous.  The VMthread
254      // will eventually wake up and detect that all mutators are safe, at which point
255      // we'll again make progress.
256      //
257      // Beware too that that the VMThread typically runs at elevated priority.
258      // Its default priority is higher than the default mutator priority.
259      // Obviously, this complicates spinning.
260      //
261      // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
262      // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
263      //
264      // See the comments in synchronizer.cpp for additional remarks on spinning.
265      //
266      // In the future we might:
267      // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
268      //    This is tricky as the path used by a thread exiting the JVM (say on
269      //    on JNI call-out) simply stores into its state field.  The burden
270      //    is placed on the VM thread, which must poll (spin).
271      // 2. Find something useful to do while spinning.  If the safepoint is GC-related
272      //    we might aggressively scan the stacks of threads that are already safe.
273      // 3. Use Solaris schedctl to examine the state of the still-running mutators.
274      //    If all the mutators are ONPROC there's no reason to sleep or yield.
275      // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
276      // 5. Check system saturation.  If the system is not fully saturated then
277      //    simply spin and avoid sleep/yield.
278      // 6. As still-running mutators rendezvous they could unpark the sleeping
279      //    VMthread.  This works well for still-running mutators that become
280      //    safe.  The VMthread must still poll for mutators that call-out.
281      // 7. Drive the policy on time-since-begin instead of iterations.
282      // 8. Consider making the spin duration a function of the # of CPUs:
283      //    Spin = (((ncpus-1) * M) + K) + F(still_running)
284      //    Alternately, instead of counting iterations of the outer loop
285      //    we could count the # of threads visited in the inner loop, above.
286      // 9. On windows consider using the return value from SwitchThreadTo()
287      //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
288
289      if (int(iterations) == DeferPollingPageLoopCount) {
290         guarantee (PageArmed == 0, "invariant") ;
291         PageArmed = 1 ;
292         os::make_polling_page_unreadable();
293      }
294
295      // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
296      // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
297      ++steps ;
298      if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
299        SpinPause() ;     // MP-Polite spin
300      } else
301      if (steps < DeferThrSuspendLoopCount) {
302        os::naked_yield() ;
303      } else {
304        os::naked_short_sleep(1);
305      }
306
307      iterations ++ ;
308    }
309    assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
310  }
311  assert(still_running == 0, "sanity check");
312
313  if (PrintSafepointStatistics) {
314    update_statistics_on_spin_end();
315  }
316
317  // wait until all threads are stopped
318  while (_waiting_to_block > 0) {
319    if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
320    if (!SafepointTimeout || timeout_error_printed) {
321      Safepoint_lock->wait(true);  // true, means with no safepoint checks
322    } else {
323      // Compute remaining time
324      jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
325
326      // If there is no remaining time, then there is an error
327      if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
328        print_safepoint_timeout(_blocking_timeout);
329      }
330    }
331  }
332  assert(_waiting_to_block == 0, "sanity check");
333
334#ifndef PRODUCT
335  if (SafepointTimeout) {
336    jlong current_time = os::javaTimeNanos();
337    if (safepoint_limit_time < current_time) {
338      tty->print_cr("# SafepointSynchronize: Finished after "
339                    INT64_FORMAT_W(6) " ms",
340                    ((current_time - safepoint_limit_time) / MICROUNITS +
341                     (jlong)SafepointTimeoutDelay));
342    }
343  }
344#endif
345
346  assert((_safepoint_counter & 0x1) == 0, "must be even");
347  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
348  _safepoint_counter ++;
349
350  // Record state
351  _state = _synchronized;
352
353  OrderAccess::fence();
354
355#ifdef ASSERT
356  for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
357    // make sure all the threads were visited
358    assert(cur->was_visited_for_critical_count(), "missed a thread");
359  }
360#endif // ASSERT
361
362  // Update the count of active JNI critical regions
363  GC_locker::set_jni_lock_count(_current_jni_active_count);
364
365  if (TraceSafepoint) {
366    VM_Operation *op = VMThread::vm_operation();
367    tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
368  }
369
370  RuntimeService::record_safepoint_synchronized();
371  if (PrintSafepointStatistics) {
372    update_statistics_on_sync_end(os::javaTimeNanos());
373  }
374
375  // Call stuff that needs to be run when a safepoint is just about to be completed
376  do_cleanup_tasks();
377
378  if (PrintSafepointStatistics) {
379    // Record how much time spend on the above cleanup tasks
380    update_statistics_on_cleanup_end(os::javaTimeNanos());
381  }
382}
383
384// Wake up all threads, so they are ready to resume execution after the safepoint
385// operation has been carried out
386void SafepointSynchronize::end() {
387
388  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
389  assert((_safepoint_counter & 0x1) == 1, "must be odd");
390  _safepoint_counter ++;
391  // memory fence isn't required here since an odd _safepoint_counter
392  // value can do no harm and a fence is issued below anyway.
393
394  DEBUG_ONLY(Thread* myThread = Thread::current();)
395  assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
396
397  if (PrintSafepointStatistics) {
398    end_statistics(os::javaTimeNanos());
399  }
400
401#ifdef ASSERT
402  // A pending_exception cannot be installed during a safepoint.  The threads
403  // may install an async exception after they come back from a safepoint into
404  // pending_exception after they unblock.  But that should happen later.
405  for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
406    assert (!(cur->has_pending_exception() &&
407              cur->safepoint_state()->is_at_poll_safepoint()),
408            "safepoint installed a pending exception");
409  }
410#endif // ASSERT
411
412  if (PageArmed) {
413    // Make polling safepoint aware
414    os::make_polling_page_readable();
415    PageArmed = 0 ;
416  }
417
418  // Remove safepoint check from interpreter
419  Interpreter::ignore_safepoints();
420
421  {
422    MutexLocker mu(Safepoint_lock);
423
424    assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
425
426    // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
427    // when they get restarted.
428    _state = _not_synchronized;
429    OrderAccess::fence();
430
431    if (TraceSafepoint) {
432       tty->print_cr("Leaving safepoint region");
433    }
434
435    // Start suspended threads
436    for(JavaThread *current = Threads::first(); current; current = current->next()) {
437      // A problem occurring on Solaris is when attempting to restart threads
438      // the first #cpus - 1 go well, but then the VMThread is preempted when we get
439      // to the next one (since it has been running the longest).  We then have
440      // to wait for a cpu to become available before we can continue restarting
441      // threads.
442      // FIXME: This causes the performance of the VM to degrade when active and with
443      // large numbers of threads.  Apparently this is due to the synchronous nature
444      // of suspending threads.
445      //
446      // TODO-FIXME: the comments above are vestigial and no longer apply.
447      // Furthermore, using solaris' schedctl in this particular context confers no benefit
448      if (VMThreadHintNoPreempt) {
449        os::hint_no_preempt();
450      }
451      ThreadSafepointState* cur_state = current->safepoint_state();
452      assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
453      cur_state->restart();
454      assert(cur_state->is_running(), "safepoint state has not been reset");
455    }
456
457    RuntimeService::record_safepoint_end();
458
459    // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
460    // blocked in signal_thread_blocked
461    Threads_lock->unlock();
462
463  }
464#if INCLUDE_ALL_GCS
465  // If there are any concurrent GC threads resume them.
466  if (UseConcMarkSweepGC) {
467    ConcurrentMarkSweepThread::desynchronize(false);
468  } else if (UseG1GC) {
469    SuspendibleThreadSet::desynchronize();
470  }
471#endif // INCLUDE_ALL_GCS
472  // record this time so VMThread can keep track how much time has elapsed
473  // since last safepoint.
474  _end_of_last_safepoint = os::javaTimeMillis();
475}
476
477bool SafepointSynchronize::is_cleanup_needed() {
478  // Need a safepoint if some inline cache buffers is non-empty
479  if (!InlineCacheBuffer::is_empty()) return true;
480  return false;
481}
482
483
484
485// Various cleaning tasks that should be done periodically at safepoints
486void SafepointSynchronize::do_cleanup_tasks() {
487  {
488    TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime);
489    ObjectSynchronizer::deflate_idle_monitors();
490  }
491
492  {
493    TraceTime t2("updating inline caches", TraceSafepointCleanupTime);
494    InlineCacheBuffer::update_inline_caches();
495  }
496  {
497    TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);
498    CompilationPolicy::policy()->do_safepoint_work();
499  }
500
501  {
502    TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
503    NMethodSweeper::mark_active_nmethods();
504  }
505
506  if (SymbolTable::needs_rehashing()) {
507    TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime);
508    SymbolTable::rehash_table();
509  }
510
511  if (StringTable::needs_rehashing()) {
512    TraceTime t6("rehashing string table", TraceSafepointCleanupTime);
513    StringTable::rehash_table();
514  }
515
516  // rotate log files?
517  if (UseGCLogFileRotation) {
518    gclog_or_tty->rotate_log(false);
519  }
520
521  {
522    // CMS delays purging the CLDG until the beginning of the next safepoint and to
523    // make sure concurrent sweep is done
524    TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
525    ClassLoaderDataGraph::purge_if_needed();
526  }
527}
528
529
530bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
531  switch(state) {
532  case _thread_in_native:
533    // native threads are safe if they have no java stack or have walkable stack
534    return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
535
536   // blocked threads should have already have walkable stack
537  case _thread_blocked:
538    assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
539    return true;
540
541  default:
542    return false;
543  }
544}
545
546
547// See if the thread is running inside a lazy critical native and
548// update the thread critical count if so.  Also set a suspend flag to
549// cause the native wrapper to return into the JVM to do the unlock
550// once the native finishes.
551void SafepointSynchronize::check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) {
552  if (state == _thread_in_native &&
553      thread->has_last_Java_frame() &&
554      thread->frame_anchor()->walkable()) {
555    // This thread might be in a critical native nmethod so look at
556    // the top of the stack and increment the critical count if it
557    // is.
558    frame wrapper_frame = thread->last_frame();
559    CodeBlob* stub_cb = wrapper_frame.cb();
560    if (stub_cb != NULL &&
561        stub_cb->is_nmethod() &&
562        stub_cb->as_nmethod_or_null()->is_lazy_critical_native()) {
563      // A thread could potentially be in a critical native across
564      // more than one safepoint, so only update the critical state on
565      // the first one.  When it returns it will perform the unlock.
566      if (!thread->do_critical_native_unlock()) {
567#ifdef ASSERT
568        if (!thread->in_critical()) {
569          GC_locker::increment_debug_jni_lock_count();
570        }
571#endif
572        thread->enter_critical();
573        // Make sure the native wrapper calls back on return to
574        // perform the needed critical unlock.
575        thread->set_critical_native_unlock();
576      }
577    }
578  }
579}
580
581
582
583// -------------------------------------------------------------------------------------------------------
584// Implementation of Safepoint callback point
585
586void SafepointSynchronize::block(JavaThread *thread) {
587  assert(thread != NULL, "thread must be set");
588  assert(thread->is_Java_thread(), "not a Java thread");
589
590  // Threads shouldn't block if they are in the middle of printing, but...
591  ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id());
592
593  // Only bail from the block() call if the thread is gone from the
594  // thread list; starting to exit should still block.
595  if (thread->is_terminated()) {
596     // block current thread if we come here from native code when VM is gone
597     thread->block_if_vm_exited();
598
599     // otherwise do nothing
600     return;
601  }
602
603  JavaThreadState state = thread->thread_state();
604  thread->frame_anchor()->make_walkable(thread);
605
606  // Check that we have a valid thread_state at this point
607  switch(state) {
608    case _thread_in_vm_trans:
609    case _thread_in_Java:        // From compiled code
610
611      // We are highly likely to block on the Safepoint_lock. In order to avoid blocking in this case,
612      // we pretend we are still in the VM.
613      thread->set_thread_state(_thread_in_vm);
614
615      if (is_synchronizing()) {
616         Atomic::inc (&TryingToBlock) ;
617      }
618
619      // We will always be holding the Safepoint_lock when we are examine the state
620      // of a thread. Hence, the instructions between the Safepoint_lock->lock() and
621      // Safepoint_lock->unlock() are happening atomic with regards to the safepoint code
622      Safepoint_lock->lock_without_safepoint_check();
623      if (is_synchronizing()) {
624        // Decrement the number of threads to wait for and signal vm thread
625        assert(_waiting_to_block > 0, "sanity check");
626        _waiting_to_block--;
627        thread->safepoint_state()->set_has_called_back(true);
628
629        DEBUG_ONLY(thread->set_visited_for_critical_count(true));
630        if (thread->in_critical()) {
631          // Notice that this thread is in a critical section
632          increment_jni_active_count();
633        }
634
635        // Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
636        if (_waiting_to_block == 0) {
637          Safepoint_lock->notify_all();
638        }
639      }
640
641      // We transition the thread to state _thread_blocked here, but
642      // we can't do our usual check for external suspension and then
643      // self-suspend after the lock_without_safepoint_check() call
644      // below because we are often called during transitions while
645      // we hold different locks. That would leave us suspended while
646      // holding a resource which results in deadlocks.
647      thread->set_thread_state(_thread_blocked);
648      Safepoint_lock->unlock();
649
650      // We now try to acquire the threads lock. Since this lock is hold by the VM thread during
651      // the entire safepoint, the threads will all line up here during the safepoint.
652      Threads_lock->lock_without_safepoint_check();
653      // restore original state. This is important if the thread comes from compiled code, so it
654      // will continue to execute with the _thread_in_Java state.
655      thread->set_thread_state(state);
656      Threads_lock->unlock();
657      break;
658
659    case _thread_in_native_trans:
660    case _thread_blocked_trans:
661    case _thread_new_trans:
662      if (thread->safepoint_state()->type() == ThreadSafepointState::_call_back) {
663        thread->print_thread_state();
664        fatal("Deadlock in safepoint code.  "
665              "Should have called back to the VM before blocking.");
666      }
667
668      // We transition the thread to state _thread_blocked here, but
669      // we can't do our usual check for external suspension and then
670      // self-suspend after the lock_without_safepoint_check() call
671      // below because we are often called during transitions while
672      // we hold different locks. That would leave us suspended while
673      // holding a resource which results in deadlocks.
674      thread->set_thread_state(_thread_blocked);
675
676      // It is not safe to suspend a thread if we discover it is in _thread_in_native_trans. Hence,
677      // the safepoint code might still be waiting for it to block. We need to change the state here,
678      // so it can see that it is at a safepoint.
679
680      // Block until the safepoint operation is completed.
681      Threads_lock->lock_without_safepoint_check();
682
683      // Restore state
684      thread->set_thread_state(state);
685
686      Threads_lock->unlock();
687      break;
688
689    default:
690     fatal("Illegal threadstate encountered: %d", state);
691  }
692
693  // Check for pending. async. exceptions or suspends - except if the
694  // thread was blocked inside the VM. has_special_runtime_exit_condition()
695  // is called last since it grabs a lock and we only want to do that when
696  // we must.
697  //
698  // Note: we never deliver an async exception at a polling point as the
699  // compiler may not have an exception handler for it. The polling
700  // code will notice the async and deoptimize and the exception will
701  // be delivered. (Polling at a return point is ok though). Sure is
702  // a lot of bother for a deprecated feature...
703  //
704  // We don't deliver an async exception if the thread state is
705  // _thread_in_native_trans so JNI functions won't be called with
706  // a surprising pending exception. If the thread state is going back to java,
707  // async exception is checked in check_special_condition_for_native_trans().
708
709  if (state != _thread_blocked_trans &&
710      state != _thread_in_vm_trans &&
711      thread->has_special_runtime_exit_condition()) {
712    thread->handle_special_runtime_exit_condition(
713      !thread->is_at_poll_safepoint() && (state != _thread_in_native_trans));
714  }
715}
716
717// ------------------------------------------------------------------------------------------------------
718// Exception handlers
719
720
721void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
722  assert(thread->is_Java_thread(), "polling reference encountered by VM thread");
723  assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
724  assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
725
726  if (ShowSafepointMsgs) {
727    tty->print("handle_polling_page_exception: ");
728  }
729
730  if (PrintSafepointStatistics) {
731    inc_page_trap_count();
732  }
733
734  ThreadSafepointState* state = thread->safepoint_state();
735
736  state->handle_polling_page_exception();
737}
738
739
740void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
741  if (!timeout_error_printed) {
742    timeout_error_printed = true;
743    // Print out the thread info which didn't reach the safepoint for debugging
744    // purposes (useful when there are lots of threads in the debugger).
745    tty->cr();
746    tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
747    if (reason ==  _spinning_timeout) {
748      tty->print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint.");
749    } else if (reason == _blocking_timeout) {
750      tty->print_cr("# SafepointSynchronize::begin: Timed out while waiting for threads to stop.");
751    }
752
753    tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
754    ThreadSafepointState *cur_state;
755    ResourceMark rm;
756    for(JavaThread *cur_thread = Threads::first(); cur_thread;
757        cur_thread = cur_thread->next()) {
758      cur_state = cur_thread->safepoint_state();
759
760      if (cur_thread->thread_state() != _thread_blocked &&
761          ((reason == _spinning_timeout && cur_state->is_running()) ||
762           (reason == _blocking_timeout && !cur_state->has_called_back()))) {
763        tty->print("# ");
764        cur_thread->print();
765        tty->cr();
766      }
767    }
768    tty->print_cr("# SafepointSynchronize::begin: (End of list)");
769  }
770
771  // To debug the long safepoint, specify both DieOnSafepointTimeout &
772  // ShowMessageBoxOnError.
773  if (DieOnSafepointTimeout) {
774    VM_Operation *op = VMThread::vm_operation();
775    fatal("Safepoint sync time longer than " INTX_FORMAT "ms detected when executing %s.",
776          SafepointTimeoutDelay,
777          op != NULL ? op->name() : "no vm operation");
778  }
779}
780
781
782// -------------------------------------------------------------------------------------------------------
783// Implementation of ThreadSafepointState
784
785ThreadSafepointState::ThreadSafepointState(JavaThread *thread) {
786  _thread = thread;
787  _type   = _running;
788  _has_called_back = false;
789  _at_poll_safepoint = false;
790}
791
792void ThreadSafepointState::create(JavaThread *thread) {
793  ThreadSafepointState *state = new ThreadSafepointState(thread);
794  thread->set_safepoint_state(state);
795}
796
797void ThreadSafepointState::destroy(JavaThread *thread) {
798  if (thread->safepoint_state()) {
799    delete(thread->safepoint_state());
800    thread->set_safepoint_state(NULL);
801  }
802}
803
804void ThreadSafepointState::examine_state_of_thread() {
805  assert(is_running(), "better be running or just have hit safepoint poll");
806
807  JavaThreadState state = _thread->thread_state();
808
809  // Save the state at the start of safepoint processing.
810  _orig_thread_state = state;
811
812  // Check for a thread that is suspended. Note that thread resume tries
813  // to grab the Threads_lock which we own here, so a thread cannot be
814  // resumed during safepoint synchronization.
815
816  // We check to see if this thread is suspended without locking to
817  // avoid deadlocking with a third thread that is waiting for this
818  // thread to be suspended. The third thread can notice the safepoint
819  // that we're trying to start at the beginning of its SR_lock->wait()
820  // call. If that happens, then the third thread will block on the
821  // safepoint while still holding the underlying SR_lock. We won't be
822  // able to get the SR_lock and we'll deadlock.
823  //
824  // We don't need to grab the SR_lock here for two reasons:
825  // 1) The suspend flags are both volatile and are set with an
826  //    Atomic::cmpxchg() call so we should see the suspended
827  //    state right away.
828  // 2) We're being called from the safepoint polling loop; if
829  //    we don't see the suspended state on this iteration, then
830  //    we'll come around again.
831  //
832  bool is_suspended = _thread->is_ext_suspended();
833  if (is_suspended) {
834    roll_forward(_at_safepoint);
835    return;
836  }
837
838  // Some JavaThread states have an initial safepoint state of
839  // running, but are actually at a safepoint. We will happily
840  // agree and update the safepoint state here.
841  if (SafepointSynchronize::safepoint_safe(_thread, state)) {
842    SafepointSynchronize::check_for_lazy_critical_native(_thread, state);
843    roll_forward(_at_safepoint);
844    return;
845  }
846
847  if (state == _thread_in_vm) {
848    roll_forward(_call_back);
849    return;
850  }
851
852  // All other thread states will continue to run until they
853  // transition and self-block in state _blocked
854  // Safepoint polling in compiled code causes the Java threads to do the same.
855  // Note: new threads may require a malloc so they must be allowed to finish
856
857  assert(is_running(), "examine_state_of_thread on non-running thread");
858  return;
859}
860
861// Returns true is thread could not be rolled forward at present position.
862void ThreadSafepointState::roll_forward(suspend_type type) {
863  _type = type;
864
865  switch(_type) {
866    case _at_safepoint:
867      SafepointSynchronize::signal_thread_at_safepoint();
868      DEBUG_ONLY(_thread->set_visited_for_critical_count(true));
869      if (_thread->in_critical()) {
870        // Notice that this thread is in a critical section
871        SafepointSynchronize::increment_jni_active_count();
872      }
873      break;
874
875    case _call_back:
876      set_has_called_back(false);
877      break;
878
879    case _running:
880    default:
881      ShouldNotReachHere();
882  }
883}
884
885void ThreadSafepointState::restart() {
886  switch(type()) {
887    case _at_safepoint:
888    case _call_back:
889      break;
890
891    case _running:
892    default:
893       tty->print_cr("restart thread " INTPTR_FORMAT " with state %d",
894                     p2i(_thread), _type);
895       _thread->print();
896      ShouldNotReachHere();
897  }
898  _type = _running;
899  set_has_called_back(false);
900}
901
902
903void ThreadSafepointState::print_on(outputStream *st) const {
904  const char *s;
905
906  switch(_type) {
907    case _running                : s = "_running";              break;
908    case _at_safepoint           : s = "_at_safepoint";         break;
909    case _call_back              : s = "_call_back";            break;
910    default:
911      ShouldNotReachHere();
912  }
913
914  st->print_cr("Thread: " INTPTR_FORMAT
915              "  [0x%2x] State: %s _has_called_back %d _at_poll_safepoint %d",
916               p2i(_thread), _thread->osthread()->thread_id(), s, _has_called_back,
917               _at_poll_safepoint);
918
919  _thread->print_thread_state_on(st);
920}
921
922
923// ---------------------------------------------------------------------------------------------------------------------
924
925// Block the thread at the safepoint poll or poll return.
926void ThreadSafepointState::handle_polling_page_exception() {
927
928  // Check state.  block() will set thread state to thread_in_vm which will
929  // cause the safepoint state _type to become _call_back.
930  assert(type() == ThreadSafepointState::_running,
931         "polling page exception on thread not running state");
932
933  // Step 1: Find the nmethod from the return address
934  if (ShowSafepointMsgs && Verbose) {
935    tty->print_cr("Polling page exception at " INTPTR_FORMAT, p2i(thread()->saved_exception_pc()));
936  }
937  address real_return_addr = thread()->saved_exception_pc();
938
939  CodeBlob *cb = CodeCache::find_blob(real_return_addr);
940  assert(cb != NULL && cb->is_nmethod(), "return address should be in nmethod");
941  nmethod* nm = (nmethod*)cb;
942
943  // Find frame of caller
944  frame stub_fr = thread()->last_frame();
945  CodeBlob* stub_cb = stub_fr.cb();
946  assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub");
947  RegisterMap map(thread(), true);
948  frame caller_fr = stub_fr.sender(&map);
949
950  // Should only be poll_return or poll
951  assert( nm->is_at_poll_or_poll_return(real_return_addr), "should not be at call" );
952
953  // This is a poll immediately before a return. The exception handling code
954  // has already had the effect of causing the return to occur, so the execution
955  // will continue immediately after the call. In addition, the oopmap at the
956  // return point does not mark the return value as an oop (if it is), so
957  // it needs a handle here to be updated.
958  if( nm->is_at_poll_return(real_return_addr) ) {
959    // See if return type is an oop.
960    bool return_oop = nm->method()->is_returning_oop();
961    Handle return_value;
962    if (return_oop) {
963      // The oop result has been saved on the stack together with all
964      // the other registers. In order to preserve it over GCs we need
965      // to keep it in a handle.
966      oop result = caller_fr.saved_oop_result(&map);
967      assert(result == NULL || result->is_oop(), "must be oop");
968      return_value = Handle(thread(), result);
969      assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
970    }
971
972    // Block the thread
973    SafepointSynchronize::block(thread());
974
975    // restore oop result, if any
976    if (return_oop) {
977      caller_fr.set_saved_oop_result(&map, return_value());
978    }
979  }
980
981  // This is a safepoint poll. Verify the return address and block.
982  else {
983    set_at_poll_safepoint(true);
984
985    // verify the blob built the "return address" correctly
986    assert(real_return_addr == caller_fr.pc(), "must match");
987
988    // Block the thread
989    SafepointSynchronize::block(thread());
990    set_at_poll_safepoint(false);
991
992    // If we have a pending async exception deoptimize the frame
993    // as otherwise we may never deliver it.
994    if (thread()->has_async_condition()) {
995      ThreadInVMfromJavaNoAsyncException __tiv(thread());
996      Deoptimization::deoptimize_frame(thread(), caller_fr.id());
997    }
998
999    // If an exception has been installed we must check for a pending deoptimization
1000    // Deoptimize frame if exception has been thrown.
1001
1002    if (thread()->has_pending_exception() ) {
1003      RegisterMap map(thread(), true);
1004      frame caller_fr = stub_fr.sender(&map);
1005      if (caller_fr.is_deoptimized_frame()) {
1006        // The exception patch will destroy registers that are still
1007        // live and will be needed during deoptimization. Defer the
1008        // Async exception should have deferred the exception until the
1009        // next safepoint which will be detected when we get into
1010        // the interpreter so if we have an exception now things
1011        // are messed up.
1012
1013        fatal("Exception installed and deoptimization is pending");
1014      }
1015    }
1016  }
1017}
1018
1019
1020//
1021//                     Statistics & Instrumentations
1022//
1023SafepointSynchronize::SafepointStats*  SafepointSynchronize::_safepoint_stats = NULL;
1024jlong  SafepointSynchronize::_safepoint_begin_time = 0;
1025int    SafepointSynchronize::_cur_stat_index = 0;
1026julong SafepointSynchronize::_safepoint_reasons[VM_Operation::VMOp_Terminating];
1027julong SafepointSynchronize::_coalesced_vmop_count = 0;
1028jlong  SafepointSynchronize::_max_sync_time = 0;
1029jlong  SafepointSynchronize::_max_vmop_time = 0;
1030float  SafepointSynchronize::_ts_of_current_safepoint = 0.0f;
1031
1032static jlong  cleanup_end_time = 0;
1033static bool   need_to_track_page_armed_status = false;
1034static bool   init_done = false;
1035
1036// Helper method to print the header.
1037static void print_header() {
1038  tty->print("         vmop                    "
1039             "[threads: total initially_running wait_to_block]    ");
1040  tty->print("[time: spin block sync cleanup vmop] ");
1041
1042  // no page armed status printed out if it is always armed.
1043  if (need_to_track_page_armed_status) {
1044    tty->print("page_armed ");
1045  }
1046
1047  tty->print_cr("page_trap_count");
1048}
1049
1050void SafepointSynchronize::deferred_initialize_stat() {
1051  if (init_done) return;
1052
1053  // If PrintSafepointStatisticsTimeout is specified, the statistics data will
1054  // be printed right away, in which case, _safepoint_stats will regress to
1055  // a single element array. Otherwise, it is a circular ring buffer with default
1056  // size of PrintSafepointStatisticsCount.
1057  int stats_array_size;
1058  if (PrintSafepointStatisticsTimeout > 0) {
1059    stats_array_size = 1;
1060    PrintSafepointStatistics = true;
1061  } else {
1062    stats_array_size = PrintSafepointStatisticsCount;
1063  }
1064  _safepoint_stats = (SafepointStats*)os::malloc(stats_array_size
1065                                                 * sizeof(SafepointStats), mtInternal);
1066  guarantee(_safepoint_stats != NULL,
1067            "not enough memory for safepoint instrumentation data");
1068
1069  if (DeferPollingPageLoopCount >= 0) {
1070    need_to_track_page_armed_status = true;
1071  }
1072  init_done = true;
1073}
1074
1075void SafepointSynchronize::begin_statistics(int nof_threads, int nof_running) {
1076  assert(init_done, "safepoint statistics array hasn't been initialized");
1077  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1078
1079  spstat->_time_stamp = _ts_of_current_safepoint;
1080
1081  VM_Operation *op = VMThread::vm_operation();
1082  spstat->_vmop_type = (op != NULL ? op->type() : -1);
1083  if (op != NULL) {
1084    _safepoint_reasons[spstat->_vmop_type]++;
1085  }
1086
1087  spstat->_nof_total_threads = nof_threads;
1088  spstat->_nof_initial_running_threads = nof_running;
1089  spstat->_nof_threads_hit_page_trap = 0;
1090
1091  // Records the start time of spinning. The real time spent on spinning
1092  // will be adjusted when spin is done. Same trick is applied for time
1093  // spent on waiting for threads to block.
1094  if (nof_running != 0) {
1095    spstat->_time_to_spin = os::javaTimeNanos();
1096  }  else {
1097    spstat->_time_to_spin = 0;
1098  }
1099}
1100
1101void SafepointSynchronize::update_statistics_on_spin_end() {
1102  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1103
1104  jlong cur_time = os::javaTimeNanos();
1105
1106  spstat->_nof_threads_wait_to_block = _waiting_to_block;
1107  if (spstat->_nof_initial_running_threads != 0) {
1108    spstat->_time_to_spin = cur_time - spstat->_time_to_spin;
1109  }
1110
1111  if (need_to_track_page_armed_status) {
1112    spstat->_page_armed = (PageArmed == 1);
1113  }
1114
1115  // Records the start time of waiting for to block. Updated when block is done.
1116  if (_waiting_to_block != 0) {
1117    spstat->_time_to_wait_to_block = cur_time;
1118  } else {
1119    spstat->_time_to_wait_to_block = 0;
1120  }
1121}
1122
1123void SafepointSynchronize::update_statistics_on_sync_end(jlong end_time) {
1124  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1125
1126  if (spstat->_nof_threads_wait_to_block != 0) {
1127    spstat->_time_to_wait_to_block = end_time -
1128      spstat->_time_to_wait_to_block;
1129  }
1130
1131  // Records the end time of sync which will be used to calculate the total
1132  // vm operation time. Again, the real time spending in syncing will be deducted
1133  // from the start of the sync time later when end_statistics is called.
1134  spstat->_time_to_sync = end_time - _safepoint_begin_time;
1135  if (spstat->_time_to_sync > _max_sync_time) {
1136    _max_sync_time = spstat->_time_to_sync;
1137  }
1138
1139  spstat->_time_to_do_cleanups = end_time;
1140}
1141
1142void SafepointSynchronize::update_statistics_on_cleanup_end(jlong end_time) {
1143  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1144
1145  // Record how long spent in cleanup tasks.
1146  spstat->_time_to_do_cleanups = end_time - spstat->_time_to_do_cleanups;
1147
1148  cleanup_end_time = end_time;
1149}
1150
1151void SafepointSynchronize::end_statistics(jlong vmop_end_time) {
1152  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1153
1154  // Update the vm operation time.
1155  spstat->_time_to_exec_vmop = vmop_end_time -  cleanup_end_time;
1156  if (spstat->_time_to_exec_vmop > _max_vmop_time) {
1157    _max_vmop_time = spstat->_time_to_exec_vmop;
1158  }
1159  // Only the sync time longer than the specified
1160  // PrintSafepointStatisticsTimeout will be printed out right away.
1161  // By default, it is -1 meaning all samples will be put into the list.
1162  if ( PrintSafepointStatisticsTimeout > 0) {
1163    if (spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) {
1164      print_statistics();
1165    }
1166  } else {
1167    // The safepoint statistics will be printed out when the _safepoin_stats
1168    // array fills up.
1169    if (_cur_stat_index == PrintSafepointStatisticsCount - 1) {
1170      print_statistics();
1171      _cur_stat_index = 0;
1172    } else {
1173      _cur_stat_index++;
1174    }
1175  }
1176}
1177
1178void SafepointSynchronize::print_statistics() {
1179  SafepointStats* sstats = _safepoint_stats;
1180
1181  for (int index = 0; index <= _cur_stat_index; index++) {
1182    if (index % 30 == 0) {
1183      print_header();
1184    }
1185    sstats = &_safepoint_stats[index];
1186    tty->print("%.3f: ", sstats->_time_stamp);
1187    tty->print("%-26s       ["
1188               INT32_FORMAT_W(8) INT32_FORMAT_W(11) INT32_FORMAT_W(15)
1189               "    ]    ",
1190               sstats->_vmop_type == -1 ? "no vm operation" :
1191               VM_Operation::name(sstats->_vmop_type),
1192               sstats->_nof_total_threads,
1193               sstats->_nof_initial_running_threads,
1194               sstats->_nof_threads_wait_to_block);
1195    // "/ MICROUNITS " is to convert the unit from nanos to millis.
1196    tty->print("  ["
1197               INT64_FORMAT_W(6) INT64_FORMAT_W(6)
1198               INT64_FORMAT_W(6) INT64_FORMAT_W(6)
1199               INT64_FORMAT_W(6) "    ]  ",
1200               sstats->_time_to_spin / MICROUNITS,
1201               sstats->_time_to_wait_to_block / MICROUNITS,
1202               sstats->_time_to_sync / MICROUNITS,
1203               sstats->_time_to_do_cleanups / MICROUNITS,
1204               sstats->_time_to_exec_vmop / MICROUNITS);
1205
1206    if (need_to_track_page_armed_status) {
1207      tty->print(INT32_FORMAT "         ", sstats->_page_armed);
1208    }
1209    tty->print_cr(INT32_FORMAT "   ", sstats->_nof_threads_hit_page_trap);
1210  }
1211}
1212
1213// This method will be called when VM exits. It will first call
1214// print_statistics to print out the rest of the sampling.  Then
1215// it tries to summarize the sampling.
1216void SafepointSynchronize::print_stat_on_exit() {
1217  if (_safepoint_stats == NULL) return;
1218
1219  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1220
1221  // During VM exit, end_statistics may not get called and in that
1222  // case, if the sync time is less than PrintSafepointStatisticsTimeout,
1223  // don't print it out.
1224  // Approximate the vm op time.
1225  _safepoint_stats[_cur_stat_index]._time_to_exec_vmop =
1226    os::javaTimeNanos() - cleanup_end_time;
1227
1228  if ( PrintSafepointStatisticsTimeout < 0 ||
1229       spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) {
1230    print_statistics();
1231  }
1232  tty->cr();
1233
1234  // Print out polling page sampling status.
1235  if (!need_to_track_page_armed_status) {
1236    tty->print_cr("Polling page always armed");
1237  } else {
1238    tty->print_cr("Defer polling page loop count = " INTX_FORMAT "\n",
1239                  DeferPollingPageLoopCount);
1240  }
1241
1242  for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) {
1243    if (_safepoint_reasons[index] != 0) {
1244      tty->print_cr("%-26s" UINT64_FORMAT_W(10), VM_Operation::name(index),
1245                    _safepoint_reasons[index]);
1246    }
1247  }
1248
1249  tty->print_cr(UINT64_FORMAT_W(5) " VM operations coalesced during safepoint",
1250                _coalesced_vmop_count);
1251  tty->print_cr("Maximum sync time  " INT64_FORMAT_W(5) " ms",
1252                _max_sync_time / MICROUNITS);
1253  tty->print_cr("Maximum vm operation time (except for Exit VM operation)  "
1254                INT64_FORMAT_W(5) " ms",
1255                _max_vmop_time / MICROUNITS);
1256}
1257
1258// ------------------------------------------------------------------------------------------------
1259// Non-product code
1260
1261#ifndef PRODUCT
1262
1263void SafepointSynchronize::print_state() {
1264  if (_state == _not_synchronized) {
1265    tty->print_cr("not synchronized");
1266  } else if (_state == _synchronizing || _state == _synchronized) {
1267    tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" :
1268                  "synchronized");
1269
1270    for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
1271       cur->safepoint_state()->print();
1272    }
1273  }
1274}
1275
1276void SafepointSynchronize::safepoint_msg(const char* format, ...) {
1277  if (ShowSafepointMsgs) {
1278    va_list ap;
1279    va_start(ap, format);
1280    tty->vprint_cr(format, ap);
1281    va_end(ap);
1282  }
1283}
1284
1285#endif // !PRODUCT
1286