safepoint.cpp revision 9056:dc9930a04ab0
150472Speter/*
21664Sphk * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
369040Sben * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
469040Sben *
569040Sben * This code is free software; you can redistribute it and/or modify it
669040Sben * under the terms of the GNU General Public License version 2 only, as
73023Srgrimes * published by the Free Software Foundation.
83023Srgrimes *
93023Srgrimes * This code is distributed in the hope that it will be useful, but WITHOUT
101664Sphk * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
113023Srgrimes * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
123023Srgrimes * version 2 for more details (a copy is included in the LICENSE file that
131664Sphk * accompanied this code).
141664Sphk *
151664Sphk * You should have received a copy of the GNU General Public License version
1672679Skris * 2 along with this work; if not, write to the Free Software Foundation,
1772878Skris * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
1872878Skris *
1972878Skris * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
2072878Skris * or visit www.oracle.com if you need additional information or have any
2172878Skris * questions.
2272878Skris *
2372878Skris */
2472878Skris
2572878Skris#include "precompiled.hpp"
2672878Skris#include "classfile/stringTable.hpp"
2772679Skris#include "classfile/systemDictionary.hpp"
2872878Skris#include "code/codeCache.hpp"
2972878Skris#include "code/icBuffer.hpp"
3072878Skris#include "code/nmethod.hpp"
3158648Skris#include "code/pcDesc.hpp"
3268917Sdougb#include "code/scopeDesc.hpp"
3358648Skris#include "gc/shared/collectedHeap.hpp"
3468917Sdougb#include "gc/shared/gcLocker.inline.hpp"
3558648Skris#include "interpreter/interpreter.hpp"
3658648Skris#include "memory/resourceArea.hpp"
3758648Skris#include "memory/universe.inline.hpp"
381664Sphk#include "oops/oop.inline.hpp"
3929281Sjkh#include "oops/symbol.hpp"
401664Sphk#include "runtime/atomic.inline.hpp"
4159006Sobrien#include "runtime/compilationPolicy.hpp"
4259006Sobrien#include "runtime/deoptimization.hpp"
4359006Sobrien#include "runtime/frame.inline.hpp"
4459006Sobrien#include "runtime/interfaceSupport.hpp"
451664Sphk#include "runtime/mutexLocker.hpp"
4659006Sobrien#include "runtime/orderAccess.inline.hpp"
4759006Sobrien#include "runtime/osThread.hpp"
4862136Sobrien#include "runtime/safepoint.hpp"
4962136Sobrien#include "runtime/signature.hpp"
5062136Sobrien#include "runtime/stubCodeGenerator.hpp"
5162136Sobrien#include "runtime/stubRoutines.hpp"
5262136Sobrien#include "runtime/sweeper.hpp"
5362136Sobrien#include "runtime/synchronizer.hpp"
5462136Sobrien#include "runtime/thread.inline.hpp"
5562136Sobrien#include "services/runtimeService.hpp"
5662136Sobrien#include "utilities/events.hpp"
5768917Sdougb#include "utilities/macros.hpp"
5868263Sobrien#if INCLUDE_ALL_GCS
5968263Sobrien#include "gc/cms/concurrentMarkSweepThread.hpp"
6068263Sobrien#include "gc/g1/suspendibleThreadSet.hpp"
6165380Sobrien#endif // INCLUDE_ALL_GCS
6265380Sobrien#ifdef COMPILER1
6365380Sobrien#include "c1/c1_globals.hpp"
6442325Sobrien#endif
6535222Sache
6635222SachePRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
6765884Sache
6865957Sache// --------------------------------------------------------------------------------------------------
6965884Sache// Implementation of Safepoint begin/end
7068559Sru
7164576SimpSafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
7234651Sjkhvolatile int  SafepointSynchronize::_waiting_to_block = 0;
7350883Smarkmvolatile int SafepointSynchronize::_safepoint_counter = 0;
7450883Smarkmint SafepointSynchronize::_current_jni_active_count = 0;
7550883Smarkmlong  SafepointSynchronize::_end_of_last_safepoint = 0;
7664803Sbrianstatic volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
7764803Sbrianstatic volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
7864803Sbrianstatic bool timeout_error_printed = false;
7968705Sgreen
8068705Sgreen// Roll all threads forward to a safepoint and suspend them all
8168705Sgreenvoid SafepointSynchronize::begin() {
8251299Speter
8357542Skris  Thread* myThread = Thread::current();
8459124Sasmodai  assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
8557542Skris
8661139Shoek  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
8758859Ssheldonh    _safepoint_begin_time = os::javaTimeNanos();
8859884Schuckr    _ts_of_current_safepoint = tty->time_stamp().seconds();
8957764Skris  }
9057542Skris
9157542Skris#if INCLUDE_ALL_GCS
9257542Skris  if (UseConcMarkSweepGC) {
9358418Sobrien    // In the future we should investigate whether CMS can use the
9459338Sobrien    // more-general mechanism below.  DLD (01/05).
9558280Skris    ConcurrentMarkSweepThread::synchronize(false);
9657553Skris  } else if (UseG1GC) {
9757542Skris    SuspendibleThreadSet::synchronize();
9857542Skris  }
9957542Skris#endif // INCLUDE_ALL_GCS
10072679Skris
10165381Sobrien  // By getting the Threads_lock, we assure that no threads are about to start or
10257553Skris  // exit. It is released again in SafepointSynchronize::end().
10357542Skris  Threads_lock->lock();
10458390Sdan
10535206Sphk  assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
10661744Sobrien
10761744Sobrien  int nof_threads = Threads::number_of_threads();
10861744Sobrien
10957458Smarkm  if (TraceSafepoint) {
11062482Speter    tty->print_cr("Safepoint synchronization initiated. (%d)", nof_threads);
11162482Speter  }
11262482Speter
11362482Speter  RuntimeService::record_safepoint_begin();
11462482Speter
11562482Speter  MutexLocker mu(Safepoint_lock);
11662482Speter
11762482Speter  // Reset the count of active JNI critical threads
11862482Speter  _current_jni_active_count = 0;
11963123Speter
12062482Speter  // Set number of threads to wait for, before we initiate the callbacks
12157071Srwatson  _waiting_to_block = nof_threads;
12257071Srwatson  TryingToBlock     = 0 ;
12357071Srwatson  int still_running = nof_threads;
1241684Scsgr
1251684Scsgr  // Save the starting time, so that it can be compared to see if this has taken
1261684Scsgr  // too long to complete.
1279509Srgrimes  jlong safepoint_limit_time;
1281697Sache  timeout_error_printed = false;
1291697Sache
13020847Speter  // PrintSafepointStatisticsTimeout can be specified separately. When
13120847Speter  // specified, PrintSafepointStatistics will be set to true in
13220847Speter  // deferred_initialize_stat method. The initialization has to be done
13320847Speter  // early enough to avoid any races. See bug 6880029 for details.
13420847Speter  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
13520847Speter    deferred_initialize_stat();
13647318Sobrien  }
13747430Sobrien
13820847Speter  // Begin the process of bringing the system to a safepoint.
13920847Speter  // Java threads can be in several different states and are
14014403Sasami  // stopped by different mechanisms:
14114403Sasami  //
14214403Sasami  //  1. Running interpreted
14314403Sasami  //     The interpreter dispatch table is changed to force it to
14414403Sasami  //     check for a safepoint condition between bytecodes.
14514403Sasami  //  2. Running in native code
1461697Sache  //     When returning from the native code, a Java thread must check
1471697Sache  //     the safepoint _state to see if we must block.  If the
1481697Sache  //     VM thread sees a Java thread in native, it does
14925424Sandreas  //     not wait for this thread to block.  The order of the memory
1501733Sadam  //     writes and reads of both the safepoint state and the Java
1511733Sadam  //     threads state is critical.  In order to guarantee that the
15214102Sadam  //     memory writes are serialized with respect to each other,
15314102Sadam  //     the VM thread issues a memory barrier instruction
15414102Sadam  //     (on MP systems).  In order to avoid the overhead of issuing
15514102Sadam  //     a memory barrier for each Java thread making native calls, each Java
1561733Sadam  //     thread performs a write to a single memory page after changing
1571740Sadam  //     the thread state.  The VM thread performs a sequence of
1583023Srgrimes  //     mprotect OS calls which forces all previous writes from all
1591733Sadam  //     Java threads to be serialized.  This is done in the
16018927Spst  //     os::serialize_thread_states() call.  This has proven to be
16126522Sbde  //     much more efficient than executing a membar instruction
16226522Sbde  //     on every call to native code.
1631733Sadam  //  3. Running compiled Code
16418927Spst  //     Compiled code reads a global (Safepoint Polling) page that
16549190Snik  //     is set to fault if we are trying to get to a safepoint.
16618927Spst  //  4. Blocked
16749190Snik  //     A thread which is blocked will not be allowed to return from the
16849190Snik  //     block condition until the safepoint operation is complete.
16918928Spst  //  5. In VM or Transitioning between states
17018927Spst  //     If a Java thread is currently running in the VM or transitioning
17126522Sbde  //     between states, the safepointing code will wait for the thread to
17226522Sbde  //     block itself when it attempts transitions to a new state.
17318927Spst  //
17426522Sbde  _state            = _synchronizing;
17518927Spst  OrderAccess::fence();
17668310Sps
17768310Sps  // Flush all thread states to memory
17868310Sps  if (!UseMembar) {
17968310Sps    os::serialize_thread_states();
18018927Spst  }
18168310Sps
18268310Sps  // Make interpreter safepoint aware
1834224Sphk  Interpreter::notice_safepoints();
18415334Sasami
18515334Sasami  if (DeferPollingPageLoopCount < 0) {
1863023Srgrimes    // Make polling safepoint aware
1874224Sphk    guarantee (PageArmed == 0, "invariant") ;
1883023Srgrimes    PageArmed = 1 ;
1893023Srgrimes    os::make_polling_page_unreadable();
19015212Sasami  }
19115212Sasami
19215212Sasami  // Consider using active_processor_count() ... but that call is expensive.
19335221Sache  int ncpus = os::processor_count() ;
19415212Sasami
19515334Sasami#ifdef ASSERT
19615334Sasami  for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
19715334Sasami    assert(cur->safepoint_state()->is_running(), "Illegal initial state");
19815212Sasami    // Clear the visited flag to ensure that the critical counts are collected properly.
19915334Sasami    cur->set_visited_for_critical_count(false);
20015212Sasami  }
20115212Sasami#endif // ASSERT
20229949Sjkh
20329949Sjkh  if (SafepointTimeout)
20429949Sjkh    safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
20529949Sjkh
20622638Sjkh  // Iterate through all threads until it have been determined how to stop them all at a safepoint
20729949Sjkh  unsigned int iterations = 0;
20829949Sjkh  int steps = 0 ;
20929949Sjkh  while(still_running > 0) {
21068481Sjkh    for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
21168481Sjkh      assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
21268481Sjkh      ThreadSafepointState *cur_state = cur->safepoint_state();
21368481Sjkh      if (cur_state->is_running()) {
21435222Sache        cur_state->examine_state_of_thread();
21535222Sache        if (!cur_state->is_running()) {
21635222Sache           still_running--;
21768481Sjkh           // consider adjusting steps downward:
21868481Sjkh           //   steps = 0
21969820Sdes           //   steps -= NNN
22068481Sjkh           //   steps >>= 1
22168481Sjkh           //   steps = MIN(steps, 2000-100)
22268481Sjkh           //   if (iterations != 0) steps -= NNN
22368481Sjkh        }
22468481Sjkh        if (TraceSafepoint && Verbose) cur_state->print();
22536263Sjkh      }
22624951Sasami    }
22724951Sasami
22824951Sasami    if (PrintSafepointStatistics && iterations == 0) {
22924951Sasami      begin_statistics(nof_threads, still_running);
23024951Sasami    }
23124951Sasami
23224951Sasami    if (still_running > 0) {
23342876Sasami      // Check for if it takes to long
23424951Sasami      if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
23524951Sasami        print_safepoint_timeout(_spinning_timeout);
23624951Sasami      }
23724951Sasami
23824951Sasami      // Spin to avoid context switching.
23924951Sasami      // There's a tension between allowing the mutators to run (and rendezvous)
24024951Sasami      // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
24124951Sasami      // a mutator might otherwise use profitably to reach a safepoint.  Excessive
24224951Sasami      // spinning by the VM thread on a saturated system can increase rendezvous latency.
24324951Sasami      // Blocking or yielding incur their own penalties in the form of context switching
24424951Sasami      // and the resultant loss of $ residency.
24533880Sfenner      //
24624951Sasami      // Further complicating matters is that yield() does not work as naively expected
24724951Sasami      // on many platforms -- yield() does not guarantee that any other ready threads
24824951Sasami      // will run.   As such we revert to naked_short_sleep() after some number of iterations.
24967431Sknu      // nakes_short_sleep() is implemented as a short unconditional sleep.
25024951Sasami      // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
25133880Sfenner      // can actually increase the time it takes the VM thread to detect that a system-wide
25267431Sknu      // stop-the-world safepoint has been reached.  In a pathological scenario such as that
25368164Srse      // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
25433880Sfenner      // In that case the mutators will be stalled waiting for the safepoint to complete and the
25533880Sfenner      // the VMthread will be sleeping, waiting for the mutators to rendezvous.  The VMthread
25647726Sbillf      // will eventually wake up and detect that all mutators are safe, at which point
25767431Sknu      // we'll again make progress.
25842876Sasami      //
25942876Sasami      // Beware too that that the VMThread typically runs at elevated priority.
26042876Sasami      // Its default priority is higher than the default mutator priority.
26144748Sbillf      // Obviously, this complicates spinning.
26244748Sbillf      //
26367491Sknu      // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
26467491Sknu      // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
26567431Sknu      //
26667431Sknu      // See the comments in synchronizer.cpp for additional remarks on spinning.
26724951Sasami      //
26867431Sknu      // In the future we might:
26967431Sknu      // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
27067431Sknu      //    This is tricky as the path used by a thread exiting the JVM (say on
27167431Sknu      //    on JNI call-out) simply stores into its state field.  The burden
27267431Sknu      //    is placed on the VM thread, which must poll (spin).
27367431Sknu      // 2. Find something useful to do while spinning.  If the safepoint is GC-related
27424951Sasami      //    we might aggressively scan the stacks of threads that are already safe.
27567431Sknu      // 3. Use Solaris schedctl to examine the state of the still-running mutators.
27667431Sknu      //    If all the mutators are ONPROC there's no reason to sleep or yield.
2773241Scsgr      // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
27829931Smarkm      // 5. Check system saturation.  If the system is not fully saturated then
2793241Scsgr      //    simply spin and avoid sleep/yield.
28029931Smarkm      // 6. As still-running mutators rendezvous they could unpark the sleeping
28110758Sache      //    VMthread.  This works well for still-running mutators that become
28210758Sache      //    safe.  The VMthread must still poll for mutators that call-out.
28356553Smarkm      // 7. Drive the policy on time-since-begin instead of iterations.
28456553Smarkm      // 8. Consider making the spin duration a function of the # of CPUs:
28556553Smarkm      //    Spin = (((ncpus-1) * M) + K) + F(still_running)
28656553Smarkm      //    Alternately, instead of counting iterations of the outer loop
28756553Smarkm      //    we could count the # of threads visited in the inner loop, above.
28856553Smarkm      // 9. On windows consider using the return value from SwitchThreadTo()
28956553Smarkm      //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
29056553Smarkm
29156553Smarkm      if (int(iterations) == DeferPollingPageLoopCount) {
29256553Smarkm         guarantee (PageArmed == 0, "invariant") ;
29338003Sdima         PageArmed = 1 ;
29456553Smarkm         os::make_polling_page_unreadable();
29556553Smarkm      }
29638003Sdima
29756553Smarkm      // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
29838003Sdima      // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
29938003Sdima      ++steps ;
30037162Sjkh      if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
30137162Sjkh        SpinPause() ;     // MP-Polite spin
30237162Sjkh      } else
30310758Sache      if (steps < DeferThrSuspendLoopCount) {
30410758Sache        os::naked_yield() ;
30518716Sache      } else {
30620545Sache        os::naked_short_sleep(1);
30749777Ssheldonh      }
30864605Sjoe
30918716Sache      iterations ++ ;
31018716Sache    }
31147651Sbillf    assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
31254670Sbillf  }
31324225Sjoerg  assert(still_running == 0, "sanity check");
31424225Sjoerg
31524225Sjoerg  if (PrintSafepointStatistics) {
31624225Sjoerg    update_statistics_on_spin_end();
31724225Sjoerg  }
31824225Sjoerg
31924225Sjoerg  // wait until all threads are stopped
32058449Snik  while (_waiting_to_block > 0) {
32158449Snik    if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
32258449Snik    if (!SafepointTimeout || timeout_error_printed) {
32358449Snik      Safepoint_lock->wait(true);  // true, means with no safepoint checks
32458449Snik    } else {
32558449Snik      // Compute remaining time
32665970Sgshapiro      jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
32765970Sgshapiro
32865970Sgshapiro      // If there is no remaining time, then there is an error
32972846Sgshapiro      if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
33072846Sgshapiro        print_safepoint_timeout(_blocking_timeout);
33172846Sgshapiro      }
33272846Sgshapiro    }
33372846Sgshapiro  }
33472846Sgshapiro  assert(_waiting_to_block == 0, "sanity check");
33572846Sgshapiro
33672846Sgshapiro#ifndef PRODUCT
33765970Sgshapiro  if (SafepointTimeout) {
33865970Sgshapiro    jlong current_time = os::javaTimeNanos();
33965970Sgshapiro    if (safepoint_limit_time < current_time) {
34065970Sgshapiro      tty->print_cr("# SafepointSynchronize: Finished after "
34165970Sgshapiro                    INT64_FORMAT_W(6) " ms",
34265970Sgshapiro                    ((current_time - safepoint_limit_time) / MICROUNITS +
34365970Sgshapiro                     SafepointTimeoutDelay));
34465970Sgshapiro    }
34569380Sgshapiro  }
34669380Sgshapiro#endif
34769380Sgshapiro
34869380Sgshapiro  assert((_safepoint_counter & 0x1) == 0, "must be even");
34969380Sgshapiro  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
35069380Sgshapiro  _safepoint_counter ++;
35165970Sgshapiro
35265970Sgshapiro  // Record state
35365970Sgshapiro  _state = _synchronized;
35465970Sgshapiro
355  OrderAccess::fence();
356
357#ifdef ASSERT
358  for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
359    // make sure all the threads were visited
360    assert(cur->was_visited_for_critical_count(), "missed a thread");
361  }
362#endif // ASSERT
363
364  // Update the count of active JNI critical regions
365  GC_locker::set_jni_lock_count(_current_jni_active_count);
366
367  if (TraceSafepoint) {
368    VM_Operation *op = VMThread::vm_operation();
369    tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
370  }
371
372  RuntimeService::record_safepoint_synchronized();
373  if (PrintSafepointStatistics) {
374    update_statistics_on_sync_end(os::javaTimeNanos());
375  }
376
377  // Call stuff that needs to be run when a safepoint is just about to be completed
378  do_cleanup_tasks();
379
380  if (PrintSafepointStatistics) {
381    // Record how much time spend on the above cleanup tasks
382    update_statistics_on_cleanup_end(os::javaTimeNanos());
383  }
384}
385
386// Wake up all threads, so they are ready to resume execution after the safepoint
387// operation has been carried out
388void SafepointSynchronize::end() {
389
390  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
391  assert((_safepoint_counter & 0x1) == 1, "must be odd");
392  _safepoint_counter ++;
393  // memory fence isn't required here since an odd _safepoint_counter
394  // value can do no harm and a fence is issued below anyway.
395
396  DEBUG_ONLY(Thread* myThread = Thread::current();)
397  assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
398
399  if (PrintSafepointStatistics) {
400    end_statistics(os::javaTimeNanos());
401  }
402
403#ifdef ASSERT
404  // A pending_exception cannot be installed during a safepoint.  The threads
405  // may install an async exception after they come back from a safepoint into
406  // pending_exception after they unblock.  But that should happen later.
407  for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
408    assert (!(cur->has_pending_exception() &&
409              cur->safepoint_state()->is_at_poll_safepoint()),
410            "safepoint installed a pending exception");
411  }
412#endif // ASSERT
413
414  if (PageArmed) {
415    // Make polling safepoint aware
416    os::make_polling_page_readable();
417    PageArmed = 0 ;
418  }
419
420  // Remove safepoint check from interpreter
421  Interpreter::ignore_safepoints();
422
423  {
424    MutexLocker mu(Safepoint_lock);
425
426    assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
427
428    // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
429    // when they get restarted.
430    _state = _not_synchronized;
431    OrderAccess::fence();
432
433    if (TraceSafepoint) {
434       tty->print_cr("Leaving safepoint region");
435    }
436
437    // Start suspended threads
438    for(JavaThread *current = Threads::first(); current; current = current->next()) {
439      // A problem occurring on Solaris is when attempting to restart threads
440      // the first #cpus - 1 go well, but then the VMThread is preempted when we get
441      // to the next one (since it has been running the longest).  We then have
442      // to wait for a cpu to become available before we can continue restarting
443      // threads.
444      // FIXME: This causes the performance of the VM to degrade when active and with
445      // large numbers of threads.  Apparently this is due to the synchronous nature
446      // of suspending threads.
447      //
448      // TODO-FIXME: the comments above are vestigial and no longer apply.
449      // Furthermore, using solaris' schedctl in this particular context confers no benefit
450      if (VMThreadHintNoPreempt) {
451        os::hint_no_preempt();
452      }
453      ThreadSafepointState* cur_state = current->safepoint_state();
454      assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
455      cur_state->restart();
456      assert(cur_state->is_running(), "safepoint state has not been reset");
457    }
458
459    RuntimeService::record_safepoint_end();
460
461    // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
462    // blocked in signal_thread_blocked
463    Threads_lock->unlock();
464
465  }
466#if INCLUDE_ALL_GCS
467  // If there are any concurrent GC threads resume them.
468  if (UseConcMarkSweepGC) {
469    ConcurrentMarkSweepThread::desynchronize(false);
470  } else if (UseG1GC) {
471    SuspendibleThreadSet::desynchronize();
472  }
473#endif // INCLUDE_ALL_GCS
474  // record this time so VMThread can keep track how much time has elapsed
475  // since last safepoint.
476  _end_of_last_safepoint = os::javaTimeMillis();
477}
478
479bool SafepointSynchronize::is_cleanup_needed() {
480  // Need a safepoint if some inline cache buffers is non-empty
481  if (!InlineCacheBuffer::is_empty()) return true;
482  return false;
483}
484
485
486
487// Various cleaning tasks that should be done periodically at safepoints
488void SafepointSynchronize::do_cleanup_tasks() {
489  {
490    TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime);
491    ObjectSynchronizer::deflate_idle_monitors();
492  }
493
494  {
495    TraceTime t2("updating inline caches", TraceSafepointCleanupTime);
496    InlineCacheBuffer::update_inline_caches();
497  }
498  {
499    TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);
500    CompilationPolicy::policy()->do_safepoint_work();
501  }
502
503  {
504    TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
505    NMethodSweeper::mark_active_nmethods();
506  }
507
508  if (SymbolTable::needs_rehashing()) {
509    TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime);
510    SymbolTable::rehash_table();
511  }
512
513  if (StringTable::needs_rehashing()) {
514    TraceTime t6("rehashing string table", TraceSafepointCleanupTime);
515    StringTable::rehash_table();
516  }
517
518  // rotate log files?
519  if (UseGCLogFileRotation) {
520    gclog_or_tty->rotate_log(false);
521  }
522
523  {
524    // CMS delays purging the CLDG until the beginning of the next safepoint and to
525    // make sure concurrent sweep is done
526    TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
527    ClassLoaderDataGraph::purge_if_needed();
528  }
529}
530
531
532bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
533  switch(state) {
534  case _thread_in_native:
535    // native threads are safe if they have no java stack or have walkable stack
536    return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
537
538   // blocked threads should have already have walkable stack
539  case _thread_blocked:
540    assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
541    return true;
542
543  default:
544    return false;
545  }
546}
547
548
549// See if the thread is running inside a lazy critical native and
550// update the thread critical count if so.  Also set a suspend flag to
551// cause the native wrapper to return into the JVM to do the unlock
552// once the native finishes.
553void SafepointSynchronize::check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) {
554  if (state == _thread_in_native &&
555      thread->has_last_Java_frame() &&
556      thread->frame_anchor()->walkable()) {
557    // This thread might be in a critical native nmethod so look at
558    // the top of the stack and increment the critical count if it
559    // is.
560    frame wrapper_frame = thread->last_frame();
561    CodeBlob* stub_cb = wrapper_frame.cb();
562    if (stub_cb != NULL &&
563        stub_cb->is_nmethod() &&
564        stub_cb->as_nmethod_or_null()->is_lazy_critical_native()) {
565      // A thread could potentially be in a critical native across
566      // more than one safepoint, so only update the critical state on
567      // the first one.  When it returns it will perform the unlock.
568      if (!thread->do_critical_native_unlock()) {
569#ifdef ASSERT
570        if (!thread->in_critical()) {
571          GC_locker::increment_debug_jni_lock_count();
572        }
573#endif
574        thread->enter_critical();
575        // Make sure the native wrapper calls back on return to
576        // perform the needed critical unlock.
577        thread->set_critical_native_unlock();
578      }
579    }
580  }
581}
582
583
584
585// -------------------------------------------------------------------------------------------------------
586// Implementation of Safepoint callback point
587
588void SafepointSynchronize::block(JavaThread *thread) {
589  assert(thread != NULL, "thread must be set");
590  assert(thread->is_Java_thread(), "not a Java thread");
591
592  // Threads shouldn't block if they are in the middle of printing, but...
593  ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id());
594
595  // Only bail from the block() call if the thread is gone from the
596  // thread list; starting to exit should still block.
597  if (thread->is_terminated()) {
598     // block current thread if we come here from native code when VM is gone
599     thread->block_if_vm_exited();
600
601     // otherwise do nothing
602     return;
603  }
604
605  JavaThreadState state = thread->thread_state();
606  thread->frame_anchor()->make_walkable(thread);
607
608  // Check that we have a valid thread_state at this point
609  switch(state) {
610    case _thread_in_vm_trans:
611    case _thread_in_Java:        // From compiled code
612
613      // We are highly likely to block on the Safepoint_lock. In order to avoid blocking in this case,
614      // we pretend we are still in the VM.
615      thread->set_thread_state(_thread_in_vm);
616
617      if (is_synchronizing()) {
618         Atomic::inc (&TryingToBlock) ;
619      }
620
621      // We will always be holding the Safepoint_lock when we are examine the state
622      // of a thread. Hence, the instructions between the Safepoint_lock->lock() and
623      // Safepoint_lock->unlock() are happening atomic with regards to the safepoint code
624      Safepoint_lock->lock_without_safepoint_check();
625      if (is_synchronizing()) {
626        // Decrement the number of threads to wait for and signal vm thread
627        assert(_waiting_to_block > 0, "sanity check");
628        _waiting_to_block--;
629        thread->safepoint_state()->set_has_called_back(true);
630
631        DEBUG_ONLY(thread->set_visited_for_critical_count(true));
632        if (thread->in_critical()) {
633          // Notice that this thread is in a critical section
634          increment_jni_active_count();
635        }
636
637        // Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
638        if (_waiting_to_block == 0) {
639          Safepoint_lock->notify_all();
640        }
641      }
642
643      // We transition the thread to state _thread_blocked here, but
644      // we can't do our usual check for external suspension and then
645      // self-suspend after the lock_without_safepoint_check() call
646      // below because we are often called during transitions while
647      // we hold different locks. That would leave us suspended while
648      // holding a resource which results in deadlocks.
649      thread->set_thread_state(_thread_blocked);
650      Safepoint_lock->unlock();
651
652      // We now try to acquire the threads lock. Since this lock is hold by the VM thread during
653      // the entire safepoint, the threads will all line up here during the safepoint.
654      Threads_lock->lock_without_safepoint_check();
655      // restore original state. This is important if the thread comes from compiled code, so it
656      // will continue to execute with the _thread_in_Java state.
657      thread->set_thread_state(state);
658      Threads_lock->unlock();
659      break;
660
661    case _thread_in_native_trans:
662    case _thread_blocked_trans:
663    case _thread_new_trans:
664      if (thread->safepoint_state()->type() == ThreadSafepointState::_call_back) {
665        thread->print_thread_state();
666        fatal("Deadlock in safepoint code.  "
667              "Should have called back to the VM before blocking.");
668      }
669
670      // We transition the thread to state _thread_blocked here, but
671      // we can't do our usual check for external suspension and then
672      // self-suspend after the lock_without_safepoint_check() call
673      // below because we are often called during transitions while
674      // we hold different locks. That would leave us suspended while
675      // holding a resource which results in deadlocks.
676      thread->set_thread_state(_thread_blocked);
677
678      // It is not safe to suspend a thread if we discover it is in _thread_in_native_trans. Hence,
679      // the safepoint code might still be waiting for it to block. We need to change the state here,
680      // so it can see that it is at a safepoint.
681
682      // Block until the safepoint operation is completed.
683      Threads_lock->lock_without_safepoint_check();
684
685      // Restore state
686      thread->set_thread_state(state);
687
688      Threads_lock->unlock();
689      break;
690
691    default:
692     fatal("Illegal threadstate encountered: %d", state);
693  }
694
695  // Check for pending. async. exceptions or suspends - except if the
696  // thread was blocked inside the VM. has_special_runtime_exit_condition()
697  // is called last since it grabs a lock and we only want to do that when
698  // we must.
699  //
700  // Note: we never deliver an async exception at a polling point as the
701  // compiler may not have an exception handler for it. The polling
702  // code will notice the async and deoptimize and the exception will
703  // be delivered. (Polling at a return point is ok though). Sure is
704  // a lot of bother for a deprecated feature...
705  //
706  // We don't deliver an async exception if the thread state is
707  // _thread_in_native_trans so JNI functions won't be called with
708  // a surprising pending exception. If the thread state is going back to java,
709  // async exception is checked in check_special_condition_for_native_trans().
710
711  if (state != _thread_blocked_trans &&
712      state != _thread_in_vm_trans &&
713      thread->has_special_runtime_exit_condition()) {
714    thread->handle_special_runtime_exit_condition(
715      !thread->is_at_poll_safepoint() && (state != _thread_in_native_trans));
716  }
717}
718
719// ------------------------------------------------------------------------------------------------------
720// Exception handlers
721
722
723void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
724  assert(thread->is_Java_thread(), "polling reference encountered by VM thread");
725  assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
726  assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
727
728  if (ShowSafepointMsgs) {
729    tty->print("handle_polling_page_exception: ");
730  }
731
732  if (PrintSafepointStatistics) {
733    inc_page_trap_count();
734  }
735
736  ThreadSafepointState* state = thread->safepoint_state();
737
738  state->handle_polling_page_exception();
739}
740
741
742void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
743  if (!timeout_error_printed) {
744    timeout_error_printed = true;
745    // Print out the thread info which didn't reach the safepoint for debugging
746    // purposes (useful when there are lots of threads in the debugger).
747    tty->cr();
748    tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
749    if (reason ==  _spinning_timeout) {
750      tty->print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint.");
751    } else if (reason == _blocking_timeout) {
752      tty->print_cr("# SafepointSynchronize::begin: Timed out while waiting for threads to stop.");
753    }
754
755    tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
756    ThreadSafepointState *cur_state;
757    ResourceMark rm;
758    for(JavaThread *cur_thread = Threads::first(); cur_thread;
759        cur_thread = cur_thread->next()) {
760      cur_state = cur_thread->safepoint_state();
761
762      if (cur_thread->thread_state() != _thread_blocked &&
763          ((reason == _spinning_timeout && cur_state->is_running()) ||
764           (reason == _blocking_timeout && !cur_state->has_called_back()))) {
765        tty->print("# ");
766        cur_thread->print();
767        tty->cr();
768      }
769    }
770    tty->print_cr("# SafepointSynchronize::begin: (End of list)");
771  }
772
773  // To debug the long safepoint, specify both DieOnSafepointTimeout &
774  // ShowMessageBoxOnError.
775  if (DieOnSafepointTimeout) {
776    VM_Operation *op = VMThread::vm_operation();
777    fatal("Safepoint sync time longer than " INTX_FORMAT "ms detected when executing %s.",
778          SafepointTimeoutDelay,
779          op != NULL ? op->name() : "no vm operation");
780  }
781}
782
783
784// -------------------------------------------------------------------------------------------------------
785// Implementation of ThreadSafepointState
786
787ThreadSafepointState::ThreadSafepointState(JavaThread *thread) {
788  _thread = thread;
789  _type   = _running;
790  _has_called_back = false;
791  _at_poll_safepoint = false;
792}
793
794void ThreadSafepointState::create(JavaThread *thread) {
795  ThreadSafepointState *state = new ThreadSafepointState(thread);
796  thread->set_safepoint_state(state);
797}
798
799void ThreadSafepointState::destroy(JavaThread *thread) {
800  if (thread->safepoint_state()) {
801    delete(thread->safepoint_state());
802    thread->set_safepoint_state(NULL);
803  }
804}
805
806void ThreadSafepointState::examine_state_of_thread() {
807  assert(is_running(), "better be running or just have hit safepoint poll");
808
809  JavaThreadState state = _thread->thread_state();
810
811  // Save the state at the start of safepoint processing.
812  _orig_thread_state = state;
813
814  // Check for a thread that is suspended. Note that thread resume tries
815  // to grab the Threads_lock which we own here, so a thread cannot be
816  // resumed during safepoint synchronization.
817
818  // We check to see if this thread is suspended without locking to
819  // avoid deadlocking with a third thread that is waiting for this
820  // thread to be suspended. The third thread can notice the safepoint
821  // that we're trying to start at the beginning of its SR_lock->wait()
822  // call. If that happens, then the third thread will block on the
823  // safepoint while still holding the underlying SR_lock. We won't be
824  // able to get the SR_lock and we'll deadlock.
825  //
826  // We don't need to grab the SR_lock here for two reasons:
827  // 1) The suspend flags are both volatile and are set with an
828  //    Atomic::cmpxchg() call so we should see the suspended
829  //    state right away.
830  // 2) We're being called from the safepoint polling loop; if
831  //    we don't see the suspended state on this iteration, then
832  //    we'll come around again.
833  //
834  bool is_suspended = _thread->is_ext_suspended();
835  if (is_suspended) {
836    roll_forward(_at_safepoint);
837    return;
838  }
839
840  // Some JavaThread states have an initial safepoint state of
841  // running, but are actually at a safepoint. We will happily
842  // agree and update the safepoint state here.
843  if (SafepointSynchronize::safepoint_safe(_thread, state)) {
844    SafepointSynchronize::check_for_lazy_critical_native(_thread, state);
845    roll_forward(_at_safepoint);
846    return;
847  }
848
849  if (state == _thread_in_vm) {
850    roll_forward(_call_back);
851    return;
852  }
853
854  // All other thread states will continue to run until they
855  // transition and self-block in state _blocked
856  // Safepoint polling in compiled code causes the Java threads to do the same.
857  // Note: new threads may require a malloc so they must be allowed to finish
858
859  assert(is_running(), "examine_state_of_thread on non-running thread");
860  return;
861}
862
863// Returns true is thread could not be rolled forward at present position.
864void ThreadSafepointState::roll_forward(suspend_type type) {
865  _type = type;
866
867  switch(_type) {
868    case _at_safepoint:
869      SafepointSynchronize::signal_thread_at_safepoint();
870      DEBUG_ONLY(_thread->set_visited_for_critical_count(true));
871      if (_thread->in_critical()) {
872        // Notice that this thread is in a critical section
873        SafepointSynchronize::increment_jni_active_count();
874      }
875      break;
876
877    case _call_back:
878      set_has_called_back(false);
879      break;
880
881    case _running:
882    default:
883      ShouldNotReachHere();
884  }
885}
886
887void ThreadSafepointState::restart() {
888  switch(type()) {
889    case _at_safepoint:
890    case _call_back:
891      break;
892
893    case _running:
894    default:
895       tty->print_cr("restart thread " INTPTR_FORMAT " with state %d",
896                      _thread, _type);
897       _thread->print();
898      ShouldNotReachHere();
899  }
900  _type = _running;
901  set_has_called_back(false);
902}
903
904
905void ThreadSafepointState::print_on(outputStream *st) const {
906  const char *s;
907
908  switch(_type) {
909    case _running                : s = "_running";              break;
910    case _at_safepoint           : s = "_at_safepoint";         break;
911    case _call_back              : s = "_call_back";            break;
912    default:
913      ShouldNotReachHere();
914  }
915
916  st->print_cr("Thread: " INTPTR_FORMAT
917              "  [0x%2x] State: %s _has_called_back %d _at_poll_safepoint %d",
918               _thread, _thread->osthread()->thread_id(), s, _has_called_back,
919               _at_poll_safepoint);
920
921  _thread->print_thread_state_on(st);
922}
923
924
925// ---------------------------------------------------------------------------------------------------------------------
926
927// Block the thread at the safepoint poll or poll return.
928void ThreadSafepointState::handle_polling_page_exception() {
929
930  // Check state.  block() will set thread state to thread_in_vm which will
931  // cause the safepoint state _type to become _call_back.
932  assert(type() == ThreadSafepointState::_running,
933         "polling page exception on thread not running state");
934
935  // Step 1: Find the nmethod from the return address
936  if (ShowSafepointMsgs && Verbose) {
937    tty->print_cr("Polling page exception at " INTPTR_FORMAT, thread()->saved_exception_pc());
938  }
939  address real_return_addr = thread()->saved_exception_pc();
940
941  CodeBlob *cb = CodeCache::find_blob(real_return_addr);
942  assert(cb != NULL && cb->is_nmethod(), "return address should be in nmethod");
943  nmethod* nm = (nmethod*)cb;
944
945  // Find frame of caller
946  frame stub_fr = thread()->last_frame();
947  CodeBlob* stub_cb = stub_fr.cb();
948  assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub");
949  RegisterMap map(thread(), true);
950  frame caller_fr = stub_fr.sender(&map);
951
952  // Should only be poll_return or poll
953  assert( nm->is_at_poll_or_poll_return(real_return_addr), "should not be at call" );
954
955  // This is a poll immediately before a return. The exception handling code
956  // has already had the effect of causing the return to occur, so the execution
957  // will continue immediately after the call. In addition, the oopmap at the
958  // return point does not mark the return value as an oop (if it is), so
959  // it needs a handle here to be updated.
960  if( nm->is_at_poll_return(real_return_addr) ) {
961    // See if return type is an oop.
962    bool return_oop = nm->method()->is_returning_oop();
963    Handle return_value;
964    if (return_oop) {
965      // The oop result has been saved on the stack together with all
966      // the other registers. In order to preserve it over GCs we need
967      // to keep it in a handle.
968      oop result = caller_fr.saved_oop_result(&map);
969      assert(result == NULL || result->is_oop(), "must be oop");
970      return_value = Handle(thread(), result);
971      assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
972    }
973
974    // Block the thread
975    SafepointSynchronize::block(thread());
976
977    // restore oop result, if any
978    if (return_oop) {
979      caller_fr.set_saved_oop_result(&map, return_value());
980    }
981  }
982
983  // This is a safepoint poll. Verify the return address and block.
984  else {
985    set_at_poll_safepoint(true);
986
987    // verify the blob built the "return address" correctly
988    assert(real_return_addr == caller_fr.pc(), "must match");
989
990    // Block the thread
991    SafepointSynchronize::block(thread());
992    set_at_poll_safepoint(false);
993
994    // If we have a pending async exception deoptimize the frame
995    // as otherwise we may never deliver it.
996    if (thread()->has_async_condition()) {
997      ThreadInVMfromJavaNoAsyncException __tiv(thread());
998      Deoptimization::deoptimize_frame(thread(), caller_fr.id());
999    }
1000
1001    // If an exception has been installed we must check for a pending deoptimization
1002    // Deoptimize frame if exception has been thrown.
1003
1004    if (thread()->has_pending_exception() ) {
1005      RegisterMap map(thread(), true);
1006      frame caller_fr = stub_fr.sender(&map);
1007      if (caller_fr.is_deoptimized_frame()) {
1008        // The exception patch will destroy registers that are still
1009        // live and will be needed during deoptimization. Defer the
1010        // Async exception should have deferred the exception until the
1011        // next safepoint which will be detected when we get into
1012        // the interpreter so if we have an exception now things
1013        // are messed up.
1014
1015        fatal("Exception installed and deoptimization is pending");
1016      }
1017    }
1018  }
1019}
1020
1021
1022//
1023//                     Statistics & Instrumentations
1024//
1025SafepointSynchronize::SafepointStats*  SafepointSynchronize::_safepoint_stats = NULL;
1026jlong  SafepointSynchronize::_safepoint_begin_time = 0;
1027int    SafepointSynchronize::_cur_stat_index = 0;
1028julong SafepointSynchronize::_safepoint_reasons[VM_Operation::VMOp_Terminating];
1029julong SafepointSynchronize::_coalesced_vmop_count = 0;
1030jlong  SafepointSynchronize::_max_sync_time = 0;
1031jlong  SafepointSynchronize::_max_vmop_time = 0;
1032float  SafepointSynchronize::_ts_of_current_safepoint = 0.0f;
1033
1034static jlong  cleanup_end_time = 0;
1035static bool   need_to_track_page_armed_status = false;
1036static bool   init_done = false;
1037
1038// Helper method to print the header.
1039static void print_header() {
1040  tty->print("         vmop                    "
1041             "[threads: total initially_running wait_to_block]    ");
1042  tty->print("[time: spin block sync cleanup vmop] ");
1043
1044  // no page armed status printed out if it is always armed.
1045  if (need_to_track_page_armed_status) {
1046    tty->print("page_armed ");
1047  }
1048
1049  tty->print_cr("page_trap_count");
1050}
1051
1052void SafepointSynchronize::deferred_initialize_stat() {
1053  if (init_done) return;
1054
1055  if (PrintSafepointStatisticsCount <= 0) {
1056    fatal("Wrong PrintSafepointStatisticsCount");
1057  }
1058
1059  // If PrintSafepointStatisticsTimeout is specified, the statistics data will
1060  // be printed right away, in which case, _safepoint_stats will regress to
1061  // a single element array. Otherwise, it is a circular ring buffer with default
1062  // size of PrintSafepointStatisticsCount.
1063  int stats_array_size;
1064  if (PrintSafepointStatisticsTimeout > 0) {
1065    stats_array_size = 1;
1066    PrintSafepointStatistics = true;
1067  } else {
1068    stats_array_size = PrintSafepointStatisticsCount;
1069  }
1070  _safepoint_stats = (SafepointStats*)os::malloc(stats_array_size
1071                                                 * sizeof(SafepointStats), mtInternal);
1072  guarantee(_safepoint_stats != NULL,
1073            "not enough memory for safepoint instrumentation data");
1074
1075  if (DeferPollingPageLoopCount >= 0) {
1076    need_to_track_page_armed_status = true;
1077  }
1078  init_done = true;
1079}
1080
1081void SafepointSynchronize::begin_statistics(int nof_threads, int nof_running) {
1082  assert(init_done, "safepoint statistics array hasn't been initialized");
1083  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1084
1085  spstat->_time_stamp = _ts_of_current_safepoint;
1086
1087  VM_Operation *op = VMThread::vm_operation();
1088  spstat->_vmop_type = (op != NULL ? op->type() : -1);
1089  if (op != NULL) {
1090    _safepoint_reasons[spstat->_vmop_type]++;
1091  }
1092
1093  spstat->_nof_total_threads = nof_threads;
1094  spstat->_nof_initial_running_threads = nof_running;
1095  spstat->_nof_threads_hit_page_trap = 0;
1096
1097  // Records the start time of spinning. The real time spent on spinning
1098  // will be adjusted when spin is done. Same trick is applied for time
1099  // spent on waiting for threads to block.
1100  if (nof_running != 0) {
1101    spstat->_time_to_spin = os::javaTimeNanos();
1102  }  else {
1103    spstat->_time_to_spin = 0;
1104  }
1105}
1106
1107void SafepointSynchronize::update_statistics_on_spin_end() {
1108  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1109
1110  jlong cur_time = os::javaTimeNanos();
1111
1112  spstat->_nof_threads_wait_to_block = _waiting_to_block;
1113  if (spstat->_nof_initial_running_threads != 0) {
1114    spstat->_time_to_spin = cur_time - spstat->_time_to_spin;
1115  }
1116
1117  if (need_to_track_page_armed_status) {
1118    spstat->_page_armed = (PageArmed == 1);
1119  }
1120
1121  // Records the start time of waiting for to block. Updated when block is done.
1122  if (_waiting_to_block != 0) {
1123    spstat->_time_to_wait_to_block = cur_time;
1124  } else {
1125    spstat->_time_to_wait_to_block = 0;
1126  }
1127}
1128
1129void SafepointSynchronize::update_statistics_on_sync_end(jlong end_time) {
1130  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1131
1132  if (spstat->_nof_threads_wait_to_block != 0) {
1133    spstat->_time_to_wait_to_block = end_time -
1134      spstat->_time_to_wait_to_block;
1135  }
1136
1137  // Records the end time of sync which will be used to calculate the total
1138  // vm operation time. Again, the real time spending in syncing will be deducted
1139  // from the start of the sync time later when end_statistics is called.
1140  spstat->_time_to_sync = end_time - _safepoint_begin_time;
1141  if (spstat->_time_to_sync > _max_sync_time) {
1142    _max_sync_time = spstat->_time_to_sync;
1143  }
1144
1145  spstat->_time_to_do_cleanups = end_time;
1146}
1147
1148void SafepointSynchronize::update_statistics_on_cleanup_end(jlong end_time) {
1149  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1150
1151  // Record how long spent in cleanup tasks.
1152  spstat->_time_to_do_cleanups = end_time - spstat->_time_to_do_cleanups;
1153
1154  cleanup_end_time = end_time;
1155}
1156
1157void SafepointSynchronize::end_statistics(jlong vmop_end_time) {
1158  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1159
1160  // Update the vm operation time.
1161  spstat->_time_to_exec_vmop = vmop_end_time -  cleanup_end_time;
1162  if (spstat->_time_to_exec_vmop > _max_vmop_time) {
1163    _max_vmop_time = spstat->_time_to_exec_vmop;
1164  }
1165  // Only the sync time longer than the specified
1166  // PrintSafepointStatisticsTimeout will be printed out right away.
1167  // By default, it is -1 meaning all samples will be put into the list.
1168  if ( PrintSafepointStatisticsTimeout > 0) {
1169    if (spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
1170      print_statistics();
1171    }
1172  } else {
1173    // The safepoint statistics will be printed out when the _safepoin_stats
1174    // array fills up.
1175    if (_cur_stat_index == PrintSafepointStatisticsCount - 1) {
1176      print_statistics();
1177      _cur_stat_index = 0;
1178    } else {
1179      _cur_stat_index++;
1180    }
1181  }
1182}
1183
1184void SafepointSynchronize::print_statistics() {
1185  SafepointStats* sstats = _safepoint_stats;
1186
1187  for (int index = 0; index <= _cur_stat_index; index++) {
1188    if (index % 30 == 0) {
1189      print_header();
1190    }
1191    sstats = &_safepoint_stats[index];
1192    tty->print("%.3f: ", sstats->_time_stamp);
1193    tty->print("%-26s       ["
1194               INT32_FORMAT_W(8) INT32_FORMAT_W(11) INT32_FORMAT_W(15)
1195               "    ]    ",
1196               sstats->_vmop_type == -1 ? "no vm operation" :
1197               VM_Operation::name(sstats->_vmop_type),
1198               sstats->_nof_total_threads,
1199               sstats->_nof_initial_running_threads,
1200               sstats->_nof_threads_wait_to_block);
1201    // "/ MICROUNITS " is to convert the unit from nanos to millis.
1202    tty->print("  ["
1203               INT64_FORMAT_W(6) INT64_FORMAT_W(6)
1204               INT64_FORMAT_W(6) INT64_FORMAT_W(6)
1205               INT64_FORMAT_W(6) "    ]  ",
1206               sstats->_time_to_spin / MICROUNITS,
1207               sstats->_time_to_wait_to_block / MICROUNITS,
1208               sstats->_time_to_sync / MICROUNITS,
1209               sstats->_time_to_do_cleanups / MICROUNITS,
1210               sstats->_time_to_exec_vmop / MICROUNITS);
1211
1212    if (need_to_track_page_armed_status) {
1213      tty->print(INT32_FORMAT "         ", sstats->_page_armed);
1214    }
1215    tty->print_cr(INT32_FORMAT "   ", sstats->_nof_threads_hit_page_trap);
1216  }
1217}
1218
1219// This method will be called when VM exits. It will first call
1220// print_statistics to print out the rest of the sampling.  Then
1221// it tries to summarize the sampling.
1222void SafepointSynchronize::print_stat_on_exit() {
1223  if (_safepoint_stats == NULL) return;
1224
1225  SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1226
1227  // During VM exit, end_statistics may not get called and in that
1228  // case, if the sync time is less than PrintSafepointStatisticsTimeout,
1229  // don't print it out.
1230  // Approximate the vm op time.
1231  _safepoint_stats[_cur_stat_index]._time_to_exec_vmop =
1232    os::javaTimeNanos() - cleanup_end_time;
1233
1234  if ( PrintSafepointStatisticsTimeout < 0 ||
1235       spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
1236    print_statistics();
1237  }
1238  tty->cr();
1239
1240  // Print out polling page sampling status.
1241  if (!need_to_track_page_armed_status) {
1242    tty->print_cr("Polling page always armed");
1243  } else {
1244    tty->print_cr("Defer polling page loop count = %d\n",
1245                 DeferPollingPageLoopCount);
1246  }
1247
1248  for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) {
1249    if (_safepoint_reasons[index] != 0) {
1250      tty->print_cr("%-26s" UINT64_FORMAT_W(10), VM_Operation::name(index),
1251                    _safepoint_reasons[index]);
1252    }
1253  }
1254
1255  tty->print_cr(UINT64_FORMAT_W(5) " VM operations coalesced during safepoint",
1256                _coalesced_vmop_count);
1257  tty->print_cr("Maximum sync time  " INT64_FORMAT_W(5) " ms",
1258                _max_sync_time / MICROUNITS);
1259  tty->print_cr("Maximum vm operation time (except for Exit VM operation)  "
1260                INT64_FORMAT_W(5) " ms",
1261                _max_vmop_time / MICROUNITS);
1262}
1263
1264// ------------------------------------------------------------------------------------------------
1265// Non-product code
1266
1267#ifndef PRODUCT
1268
1269void SafepointSynchronize::print_state() {
1270  if (_state == _not_synchronized) {
1271    tty->print_cr("not synchronized");
1272  } else if (_state == _synchronizing || _state == _synchronized) {
1273    tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" :
1274                  "synchronized");
1275
1276    for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
1277       cur->safepoint_state()->print();
1278    }
1279  }
1280}
1281
1282void SafepointSynchronize::safepoint_msg(const char* format, ...) {
1283  if (ShowSafepointMsgs) {
1284    va_list ap;
1285    va_start(ap, format);
1286    tty->vprint_cr(format, ap);
1287    va_end(ap);
1288  }
1289}
1290
1291#endif // !PRODUCT
1292