1/*
2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/vmSymbols.hpp"
27#include "memory/resourceArea.hpp"
28#include "oops/markOop.hpp"
29#include "oops/oop.inline.hpp"
30#include "runtime/atomic.hpp"
31#include "runtime/handles.inline.hpp"
32#include "runtime/interfaceSupport.hpp"
33#include "runtime/mutexLocker.hpp"
34#include "runtime/objectMonitor.hpp"
35#include "runtime/objectMonitor.inline.hpp"
36#include "runtime/orderAccess.inline.hpp"
37#include "runtime/osThread.hpp"
38#include "runtime/stubRoutines.hpp"
39#include "runtime/thread.inline.hpp"
40#include "services/threadService.hpp"
41#include "trace/tracing.hpp"
42#include "trace/traceMacros.hpp"
43#include "utilities/dtrace.hpp"
44#include "utilities/macros.hpp"
45#include "utilities/preserveException.hpp"
46
47#ifdef DTRACE_ENABLED
48
49// Only bother with this argument setup if dtrace is available
50// TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
51
52
53#define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
54  char* bytes = NULL;                                                      \
55  int len = 0;                                                             \
56  jlong jtid = SharedRuntime::get_java_tid(thread);                        \
57  Symbol* klassname = ((oop)obj)->klass()->name();                         \
58  if (klassname != NULL) {                                                 \
59    bytes = (char*)klassname->bytes();                                     \
60    len = klassname->utf8_length();                                        \
61  }
62
63#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
64  {                                                                        \
65    if (DTraceMonitorProbes) {                                             \
66      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
67      HOTSPOT_MONITOR_WAIT(jtid,                                           \
68                           (monitor), bytes, len, (millis));               \
69    }                                                                      \
70  }
71
72#define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER
73#define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED
74#define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT
75#define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY
76#define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL
77
78#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
79  {                                                                        \
80    if (DTraceMonitorProbes) {                                             \
81      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
82      HOTSPOT_MONITOR_##probe(jtid,                                        \
83                              (uintptr_t)(monitor), bytes, len);           \
84    }                                                                      \
85  }
86
87#else //  ndef DTRACE_ENABLED
88
89#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
90#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
91
92#endif // ndef DTRACE_ENABLED
93
94// Tunables ...
95// The knob* variables are effectively final.  Once set they should
96// never be modified hence.  Consider using __read_mostly with GCC.
97
98int ObjectMonitor::Knob_ExitRelease = 0;
99int ObjectMonitor::Knob_Verbose     = 0;
100int ObjectMonitor::Knob_VerifyInUse = 0;
101int ObjectMonitor::Knob_VerifyMatch = 0;
102int ObjectMonitor::Knob_SpinLimit   = 5000;    // derived by an external tool -
103static int Knob_LogSpins            = 0;       // enable jvmstat tally for spins
104static int Knob_HandOff             = 0;
105static int Knob_ReportSettings      = 0;
106
107static int Knob_SpinBase            = 0;       // Floor AKA SpinMin
108static int Knob_SpinBackOff         = 0;       // spin-loop backoff
109static int Knob_CASPenalty          = -1;      // Penalty for failed CAS
110static int Knob_OXPenalty           = -1;      // Penalty for observed _owner change
111static int Knob_SpinSetSucc         = 1;       // spinners set the _succ field
112static int Knob_SpinEarly           = 1;
113static int Knob_SuccEnabled         = 1;       // futile wake throttling
114static int Knob_SuccRestrict        = 0;       // Limit successors + spinners to at-most-one
115static int Knob_MaxSpinners         = -1;      // Should be a function of # CPUs
116static int Knob_Bonus               = 100;     // spin success bonus
117static int Knob_BonusB              = 100;     // spin success bonus
118static int Knob_Penalty             = 200;     // spin failure penalty
119static int Knob_Poverty             = 1000;
120static int Knob_SpinAfterFutile     = 1;       // Spin after returning from park()
121static int Knob_FixedSpin           = 0;
122static int Knob_OState              = 3;       // Spinner checks thread state of _owner
123static int Knob_UsePause            = 1;
124static int Knob_ExitPolicy          = 0;
125static int Knob_PreSpin             = 10;      // 20-100 likely better
126static int Knob_ResetEvent          = 0;
127static int BackOffMask              = 0;
128
129static int Knob_FastHSSEC           = 0;
130static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
131static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
132static volatile int InitDone        = 0;
133
134// -----------------------------------------------------------------------------
135// Theory of operations -- Monitors lists, thread residency, etc:
136//
137// * A thread acquires ownership of a monitor by successfully
138//   CAS()ing the _owner field from null to non-null.
139//
140// * Invariant: A thread appears on at most one monitor list --
141//   cxq, EntryList or WaitSet -- at any one time.
142//
143// * Contending threads "push" themselves onto the cxq with CAS
144//   and then spin/park.
145//
146// * After a contending thread eventually acquires the lock it must
147//   dequeue itself from either the EntryList or the cxq.
148//
149// * The exiting thread identifies and unparks an "heir presumptive"
150//   tentative successor thread on the EntryList.  Critically, the
151//   exiting thread doesn't unlink the successor thread from the EntryList.
152//   After having been unparked, the wakee will recontend for ownership of
153//   the monitor.   The successor (wakee) will either acquire the lock or
154//   re-park itself.
155//
156//   Succession is provided for by a policy of competitive handoff.
157//   The exiting thread does _not_ grant or pass ownership to the
158//   successor thread.  (This is also referred to as "handoff" succession").
159//   Instead the exiting thread releases ownership and possibly wakes
160//   a successor, so the successor can (re)compete for ownership of the lock.
161//   If the EntryList is empty but the cxq is populated the exiting
162//   thread will drain the cxq into the EntryList.  It does so by
163//   by detaching the cxq (installing null with CAS) and folding
164//   the threads from the cxq into the EntryList.  The EntryList is
165//   doubly linked, while the cxq is singly linked because of the
166//   CAS-based "push" used to enqueue recently arrived threads (RATs).
167//
168// * Concurrency invariants:
169//
170//   -- only the monitor owner may access or mutate the EntryList.
171//      The mutex property of the monitor itself protects the EntryList
172//      from concurrent interference.
173//   -- Only the monitor owner may detach the cxq.
174//
175// * The monitor entry list operations avoid locks, but strictly speaking
176//   they're not lock-free.  Enter is lock-free, exit is not.
177//   For a description of 'Methods and apparatus providing non-blocking access
178//   to a resource,' see U.S. Pat. No. 7844973.
179//
180// * The cxq can have multiple concurrent "pushers" but only one concurrent
181//   detaching thread.  This mechanism is immune from the ABA corruption.
182//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
183//
184// * Taken together, the cxq and the EntryList constitute or form a
185//   single logical queue of threads stalled trying to acquire the lock.
186//   We use two distinct lists to improve the odds of a constant-time
187//   dequeue operation after acquisition (in the ::enter() epilogue) and
188//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
189//   A key desideratum is to minimize queue & monitor metadata manipulation
190//   that occurs while holding the monitor lock -- that is, we want to
191//   minimize monitor lock holds times.  Note that even a small amount of
192//   fixed spinning will greatly reduce the # of enqueue-dequeue operations
193//   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
194//   locks and monitor metadata.
195//
196//   Cxq points to the set of Recently Arrived Threads attempting entry.
197//   Because we push threads onto _cxq with CAS, the RATs must take the form of
198//   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
199//   the unlocking thread notices that EntryList is null but _cxq is != null.
200//
201//   The EntryList is ordered by the prevailing queue discipline and
202//   can be organized in any convenient fashion, such as a doubly-linked list or
203//   a circular doubly-linked list.  Critically, we want insert and delete operations
204//   to operate in constant-time.  If we need a priority queue then something akin
205//   to Solaris' sleepq would work nicely.  Viz.,
206//   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
207//   Queue discipline is enforced at ::exit() time, when the unlocking thread
208//   drains the cxq into the EntryList, and orders or reorders the threads on the
209//   EntryList accordingly.
210//
211//   Barring "lock barging", this mechanism provides fair cyclic ordering,
212//   somewhat similar to an elevator-scan.
213//
214// * The monitor synchronization subsystem avoids the use of native
215//   synchronization primitives except for the narrow platform-specific
216//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
217//   the semantics of park-unpark.  Put another way, this monitor implementation
218//   depends only on atomic operations and park-unpark.  The monitor subsystem
219//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
220//   underlying OS manages the READY<->RUN transitions.
221//
222// * Waiting threads reside on the WaitSet list -- wait() puts
223//   the caller onto the WaitSet.
224//
225// * notify() or notifyAll() simply transfers threads from the WaitSet to
226//   either the EntryList or cxq.  Subsequent exit() operations will
227//   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
228//   it's likely the notifyee would simply impale itself on the lock held
229//   by the notifier.
230//
231// * An interesting alternative is to encode cxq as (List,LockByte) where
232//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
233//   variable, like _recursions, in the scheme.  The threads or Events that form
234//   the list would have to be aligned in 256-byte addresses.  A thread would
235//   try to acquire the lock or enqueue itself with CAS, but exiting threads
236//   could use a 1-0 protocol and simply STB to set the LockByte to 0.
237//   Note that is is *not* word-tearing, but it does presume that full-word
238//   CAS operations are coherent with intermix with STB operations.  That's true
239//   on most common processors.
240//
241// * See also http://blogs.sun.com/dave
242
243
244// -----------------------------------------------------------------------------
245// Enter support
246
247void ObjectMonitor::enter(TRAPS) {
248  // The following code is ordered to check the most common cases first
249  // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
250  Thread * const Self = THREAD;
251
252  void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
253  if (cur == NULL) {
254    // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
255    assert(_recursions == 0, "invariant");
256    assert(_owner == Self, "invariant");
257    return;
258  }
259
260  if (cur == Self) {
261    // TODO-FIXME: check for integer overflow!  BUGID 6557169.
262    _recursions++;
263    return;
264  }
265
266  if (Self->is_lock_owned ((address)cur)) {
267    assert(_recursions == 0, "internal state error");
268    _recursions = 1;
269    // Commute owner from a thread-specific on-stack BasicLockObject address to
270    // a full-fledged "Thread *".
271    _owner = Self;
272    return;
273  }
274
275  // We've encountered genuine contention.
276  assert(Self->_Stalled == 0, "invariant");
277  Self->_Stalled = intptr_t(this);
278
279  // Try one round of spinning *before* enqueueing Self
280  // and before going through the awkward and expensive state
281  // transitions.  The following spin is strictly optional ...
282  // Note that if we acquire the monitor from an initial spin
283  // we forgo posting JVMTI events and firing DTRACE probes.
284  if (Knob_SpinEarly && TrySpin (Self) > 0) {
285    assert(_owner == Self, "invariant");
286    assert(_recursions == 0, "invariant");
287    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
288    Self->_Stalled = 0;
289    return;
290  }
291
292  assert(_owner != Self, "invariant");
293  assert(_succ != Self, "invariant");
294  assert(Self->is_Java_thread(), "invariant");
295  JavaThread * jt = (JavaThread *) Self;
296  assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
297  assert(jt->thread_state() != _thread_blocked, "invariant");
298  assert(this->object() != NULL, "invariant");
299  assert(_count >= 0, "invariant");
300
301  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
302  // Ensure the object-monitor relationship remains stable while there's contention.
303  Atomic::inc(&_count);
304
305  EventJavaMonitorEnter event;
306
307  { // Change java thread status to indicate blocked on monitor enter.
308    JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
309
310    DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
311    if (JvmtiExport::should_post_monitor_contended_enter()) {
312      JvmtiExport::post_monitor_contended_enter(jt, this);
313
314      // The current thread does not yet own the monitor and does not
315      // yet appear on any queues that would get it made the successor.
316      // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
317      // handler cannot accidentally consume an unpark() meant for the
318      // ParkEvent associated with this ObjectMonitor.
319    }
320
321    OSThreadContendState osts(Self->osthread());
322    ThreadBlockInVM tbivm(jt);
323
324    Self->set_current_pending_monitor(this);
325
326    // TODO-FIXME: change the following for(;;) loop to straight-line code.
327    for (;;) {
328      jt->set_suspend_equivalent();
329      // cleared by handle_special_suspend_equivalent_condition()
330      // or java_suspend_self()
331
332      EnterI(THREAD);
333
334      if (!ExitSuspendEquivalent(jt)) break;
335
336      // We have acquired the contended monitor, but while we were
337      // waiting another thread suspended us. We don't want to enter
338      // the monitor while suspended because that would surprise the
339      // thread that suspended us.
340      //
341      _recursions = 0;
342      _succ = NULL;
343      exit(false, Self);
344
345      jt->java_suspend_self();
346    }
347    Self->set_current_pending_monitor(NULL);
348
349    // We cleared the pending monitor info since we've just gotten past
350    // the enter-check-for-suspend dance and we now own the monitor free
351    // and clear, i.e., it is no longer pending. The ThreadBlockInVM
352    // destructor can go to a safepoint at the end of this block. If we
353    // do a thread dump during that safepoint, then this thread will show
354    // as having "-locked" the monitor, but the OS and java.lang.Thread
355    // states will still report that the thread is blocked trying to
356    // acquire it.
357  }
358
359  Atomic::dec(&_count);
360  assert(_count >= 0, "invariant");
361  Self->_Stalled = 0;
362
363  // Must either set _recursions = 0 or ASSERT _recursions == 0.
364  assert(_recursions == 0, "invariant");
365  assert(_owner == Self, "invariant");
366  assert(_succ != Self, "invariant");
367  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
368
369  // The thread -- now the owner -- is back in vm mode.
370  // Report the glorious news via TI,DTrace and jvmstat.
371  // The probe effect is non-trivial.  All the reportage occurs
372  // while we hold the monitor, increasing the length of the critical
373  // section.  Amdahl's parallel speedup law comes vividly into play.
374  //
375  // Another option might be to aggregate the events (thread local or
376  // per-monitor aggregation) and defer reporting until a more opportune
377  // time -- such as next time some thread encounters contention but has
378  // yet to acquire the lock.  While spinning that thread could
379  // spinning we could increment JVMStat counters, etc.
380
381  DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
382  if (JvmtiExport::should_post_monitor_contended_entered()) {
383    JvmtiExport::post_monitor_contended_entered(jt, this);
384
385    // The current thread already owns the monitor and is not going to
386    // call park() for the remainder of the monitor enter protocol. So
387    // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
388    // event handler consumed an unpark() issued by the thread that
389    // just exited the monitor.
390  }
391
392  if (event.should_commit()) {
393    event.set_monitorClass(((oop)this->object())->klass());
394    event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
395    event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
396    event.commit();
397  }
398
399  OM_PERFDATA_OP(ContendedLockAttempts, inc());
400}
401
402
403// Caveat: TryLock() is not necessarily serializing if it returns failure.
404// Callers must compensate as needed.
405
406int ObjectMonitor::TryLock(Thread * Self) {
407  void * own = _owner;
408  if (own != NULL) return 0;
409  if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
410    // Either guarantee _recursions == 0 or set _recursions = 0.
411    assert(_recursions == 0, "invariant");
412    assert(_owner == Self, "invariant");
413    return 1;
414  }
415  // The lock had been free momentarily, but we lost the race to the lock.
416  // Interference -- the CAS failed.
417  // We can either return -1 or retry.
418  // Retry doesn't make as much sense because the lock was just acquired.
419  return -1;
420}
421
422#define MAX_RECHECK_INTERVAL 1000
423
424void ObjectMonitor::EnterI(TRAPS) {
425  Thread * const Self = THREAD;
426  assert(Self->is_Java_thread(), "invariant");
427  assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
428
429  // Try the lock - TATAS
430  if (TryLock (Self) > 0) {
431    assert(_succ != Self, "invariant");
432    assert(_owner == Self, "invariant");
433    assert(_Responsible != Self, "invariant");
434    return;
435  }
436
437  DeferredInitialize();
438
439  // We try one round of spinning *before* enqueueing Self.
440  //
441  // If the _owner is ready but OFFPROC we could use a YieldTo()
442  // operation to donate the remainder of this thread's quantum
443  // to the owner.  This has subtle but beneficial affinity
444  // effects.
445
446  if (TrySpin (Self) > 0) {
447    assert(_owner == Self, "invariant");
448    assert(_succ != Self, "invariant");
449    assert(_Responsible != Self, "invariant");
450    return;
451  }
452
453  // The Spin failed -- Enqueue and park the thread ...
454  assert(_succ != Self, "invariant");
455  assert(_owner != Self, "invariant");
456  assert(_Responsible != Self, "invariant");
457
458  // Enqueue "Self" on ObjectMonitor's _cxq.
459  //
460  // Node acts as a proxy for Self.
461  // As an aside, if were to ever rewrite the synchronization code mostly
462  // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
463  // Java objects.  This would avoid awkward lifecycle and liveness issues,
464  // as well as eliminate a subset of ABA issues.
465  // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
466
467  ObjectWaiter node(Self);
468  Self->_ParkEvent->reset();
469  node._prev   = (ObjectWaiter *) 0xBAD;
470  node.TState  = ObjectWaiter::TS_CXQ;
471
472  // Push "Self" onto the front of the _cxq.
473  // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
474  // Note that spinning tends to reduce the rate at which threads
475  // enqueue and dequeue on EntryList|cxq.
476  ObjectWaiter * nxt;
477  for (;;) {
478    node._next = nxt = _cxq;
479    if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
480
481    // Interference - the CAS failed because _cxq changed.  Just retry.
482    // As an optional optimization we retry the lock.
483    if (TryLock (Self) > 0) {
484      assert(_succ != Self, "invariant");
485      assert(_owner == Self, "invariant");
486      assert(_Responsible != Self, "invariant");
487      return;
488    }
489  }
490
491  // Check for cxq|EntryList edge transition to non-null.  This indicates
492  // the onset of contention.  While contention persists exiting threads
493  // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
494  // operations revert to the faster 1-0 mode.  This enter operation may interleave
495  // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
496  // arrange for one of the contending thread to use a timed park() operations
497  // to detect and recover from the race.  (Stranding is form of progress failure
498  // where the monitor is unlocked but all the contending threads remain parked).
499  // That is, at least one of the contended threads will periodically poll _owner.
500  // One of the contending threads will become the designated "Responsible" thread.
501  // The Responsible thread uses a timed park instead of a normal indefinite park
502  // operation -- it periodically wakes and checks for and recovers from potential
503  // strandings admitted by 1-0 exit operations.   We need at most one Responsible
504  // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
505  // be responsible for a monitor.
506  //
507  // Currently, one of the contended threads takes on the added role of "Responsible".
508  // A viable alternative would be to use a dedicated "stranding checker" thread
509  // that periodically iterated over all the threads (or active monitors) and unparked
510  // successors where there was risk of stranding.  This would help eliminate the
511  // timer scalability issues we see on some platforms as we'd only have one thread
512  // -- the checker -- parked on a timer.
513
514  if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
515    // Try to assume the role of responsible thread for the monitor.
516    // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
517    Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
518  }
519
520  // The lock might have been released while this thread was occupied queueing
521  // itself onto _cxq.  To close the race and avoid "stranding" and
522  // progress-liveness failure we must resample-retry _owner before parking.
523  // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
524  // In this case the ST-MEMBAR is accomplished with CAS().
525  //
526  // TODO: Defer all thread state transitions until park-time.
527  // Since state transitions are heavy and inefficient we'd like
528  // to defer the state transitions until absolutely necessary,
529  // and in doing so avoid some transitions ...
530
531  TEVENT(Inflated enter - Contention);
532  int nWakeups = 0;
533  int recheckInterval = 1;
534
535  for (;;) {
536
537    if (TryLock(Self) > 0) break;
538    assert(_owner != Self, "invariant");
539
540    if ((SyncFlags & 2) && _Responsible == NULL) {
541      Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
542    }
543
544    // park self
545    if (_Responsible == Self || (SyncFlags & 1)) {
546      TEVENT(Inflated enter - park TIMED);
547      Self->_ParkEvent->park((jlong) recheckInterval);
548      // Increase the recheckInterval, but clamp the value.
549      recheckInterval *= 8;
550      if (recheckInterval > MAX_RECHECK_INTERVAL) {
551        recheckInterval = MAX_RECHECK_INTERVAL;
552      }
553    } else {
554      TEVENT(Inflated enter - park UNTIMED);
555      Self->_ParkEvent->park();
556    }
557
558    if (TryLock(Self) > 0) break;
559
560    // The lock is still contested.
561    // Keep a tally of the # of futile wakeups.
562    // Note that the counter is not protected by a lock or updated by atomics.
563    // That is by design - we trade "lossy" counters which are exposed to
564    // races during updates for a lower probe effect.
565    TEVENT(Inflated enter - Futile wakeup);
566    // This PerfData object can be used in parallel with a safepoint.
567    // See the work around in PerfDataManager::destroy().
568    OM_PERFDATA_OP(FutileWakeups, inc());
569    ++nWakeups;
570
571    // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
572    // We can defer clearing _succ until after the spin completes
573    // TrySpin() must tolerate being called with _succ == Self.
574    // Try yet another round of adaptive spinning.
575    if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
576
577    // We can find that we were unpark()ed and redesignated _succ while
578    // we were spinning.  That's harmless.  If we iterate and call park(),
579    // park() will consume the event and return immediately and we'll
580    // just spin again.  This pattern can repeat, leaving _succ to simply
581    // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
582    // Alternately, we can sample fired() here, and if set, forgo spinning
583    // in the next iteration.
584
585    if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
586      Self->_ParkEvent->reset();
587      OrderAccess::fence();
588    }
589    if (_succ == Self) _succ = NULL;
590
591    // Invariant: after clearing _succ a thread *must* retry _owner before parking.
592    OrderAccess::fence();
593  }
594
595  // Egress :
596  // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
597  // Normally we'll find Self on the EntryList .
598  // From the perspective of the lock owner (this thread), the
599  // EntryList is stable and cxq is prepend-only.
600  // The head of cxq is volatile but the interior is stable.
601  // In addition, Self.TState is stable.
602
603  assert(_owner == Self, "invariant");
604  assert(object() != NULL, "invariant");
605  // I'd like to write:
606  //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
607  // but as we're at a safepoint that's not safe.
608
609  UnlinkAfterAcquire(Self, &node);
610  if (_succ == Self) _succ = NULL;
611
612  assert(_succ != Self, "invariant");
613  if (_Responsible == Self) {
614    _Responsible = NULL;
615    OrderAccess::fence(); // Dekker pivot-point
616
617    // We may leave threads on cxq|EntryList without a designated
618    // "Responsible" thread.  This is benign.  When this thread subsequently
619    // exits the monitor it can "see" such preexisting "old" threads --
620    // threads that arrived on the cxq|EntryList before the fence, above --
621    // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
622    // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
623    // non-null and elect a new "Responsible" timer thread.
624    //
625    // This thread executes:
626    //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
627    //    LD cxq|EntryList               (in subsequent exit)
628    //
629    // Entering threads in the slow/contended path execute:
630    //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
631    //    The (ST cxq; MEMBAR) is accomplished with CAS().
632    //
633    // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
634    // exit operation from floating above the ST Responsible=null.
635  }
636
637  // We've acquired ownership with CAS().
638  // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
639  // But since the CAS() this thread may have also stored into _succ,
640  // EntryList, cxq or Responsible.  These meta-data updates must be
641  // visible __before this thread subsequently drops the lock.
642  // Consider what could occur if we didn't enforce this constraint --
643  // STs to monitor meta-data and user-data could reorder with (become
644  // visible after) the ST in exit that drops ownership of the lock.
645  // Some other thread could then acquire the lock, but observe inconsistent
646  // or old monitor meta-data and heap data.  That violates the JMM.
647  // To that end, the 1-0 exit() operation must have at least STST|LDST
648  // "release" barrier semantics.  Specifically, there must be at least a
649  // STST|LDST barrier in exit() before the ST of null into _owner that drops
650  // the lock.   The barrier ensures that changes to monitor meta-data and data
651  // protected by the lock will be visible before we release the lock, and
652  // therefore before some other thread (CPU) has a chance to acquire the lock.
653  // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
654  //
655  // Critically, any prior STs to _succ or EntryList must be visible before
656  // the ST of null into _owner in the *subsequent* (following) corresponding
657  // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
658  // execute a serializing instruction.
659
660  if (SyncFlags & 8) {
661    OrderAccess::fence();
662  }
663  return;
664}
665
666// ReenterI() is a specialized inline form of the latter half of the
667// contended slow-path from EnterI().  We use ReenterI() only for
668// monitor reentry in wait().
669//
670// In the future we should reconcile EnterI() and ReenterI(), adding
671// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
672// loop accordingly.
673
674void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
675  assert(Self != NULL, "invariant");
676  assert(SelfNode != NULL, "invariant");
677  assert(SelfNode->_thread == Self, "invariant");
678  assert(_waiters > 0, "invariant");
679  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
680  assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
681  JavaThread * jt = (JavaThread *) Self;
682
683  int nWakeups = 0;
684  for (;;) {
685    ObjectWaiter::TStates v = SelfNode->TState;
686    guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
687    assert(_owner != Self, "invariant");
688
689    if (TryLock(Self) > 0) break;
690    if (TrySpin(Self) > 0) break;
691
692    TEVENT(Wait Reentry - parking);
693
694    // State transition wrappers around park() ...
695    // ReenterI() wisely defers state transitions until
696    // it's clear we must park the thread.
697    {
698      OSThreadContendState osts(Self->osthread());
699      ThreadBlockInVM tbivm(jt);
700
701      // cleared by handle_special_suspend_equivalent_condition()
702      // or java_suspend_self()
703      jt->set_suspend_equivalent();
704      if (SyncFlags & 1) {
705        Self->_ParkEvent->park((jlong)MAX_RECHECK_INTERVAL);
706      } else {
707        Self->_ParkEvent->park();
708      }
709
710      // were we externally suspended while we were waiting?
711      for (;;) {
712        if (!ExitSuspendEquivalent(jt)) break;
713        if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
714        jt->java_suspend_self();
715        jt->set_suspend_equivalent();
716      }
717    }
718
719    // Try again, but just so we distinguish between futile wakeups and
720    // successful wakeups.  The following test isn't algorithmically
721    // necessary, but it helps us maintain sensible statistics.
722    if (TryLock(Self) > 0) break;
723
724    // The lock is still contested.
725    // Keep a tally of the # of futile wakeups.
726    // Note that the counter is not protected by a lock or updated by atomics.
727    // That is by design - we trade "lossy" counters which are exposed to
728    // races during updates for a lower probe effect.
729    TEVENT(Wait Reentry - futile wakeup);
730    ++nWakeups;
731
732    // Assuming this is not a spurious wakeup we'll normally
733    // find that _succ == Self.
734    if (_succ == Self) _succ = NULL;
735
736    // Invariant: after clearing _succ a contending thread
737    // *must* retry  _owner before parking.
738    OrderAccess::fence();
739
740    // This PerfData object can be used in parallel with a safepoint.
741    // See the work around in PerfDataManager::destroy().
742    OM_PERFDATA_OP(FutileWakeups, inc());
743  }
744
745  // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
746  // Normally we'll find Self on the EntryList.
747  // Unlinking from the EntryList is constant-time and atomic-free.
748  // From the perspective of the lock owner (this thread), the
749  // EntryList is stable and cxq is prepend-only.
750  // The head of cxq is volatile but the interior is stable.
751  // In addition, Self.TState is stable.
752
753  assert(_owner == Self, "invariant");
754  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
755  UnlinkAfterAcquire(Self, SelfNode);
756  if (_succ == Self) _succ = NULL;
757  assert(_succ != Self, "invariant");
758  SelfNode->TState = ObjectWaiter::TS_RUN;
759  OrderAccess::fence();      // see comments at the end of EnterI()
760}
761
762// By convention we unlink a contending thread from EntryList|cxq immediately
763// after the thread acquires the lock in ::enter().  Equally, we could defer
764// unlinking the thread until ::exit()-time.
765
766void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
767  assert(_owner == Self, "invariant");
768  assert(SelfNode->_thread == Self, "invariant");
769
770  if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
771    // Normal case: remove Self from the DLL EntryList .
772    // This is a constant-time operation.
773    ObjectWaiter * nxt = SelfNode->_next;
774    ObjectWaiter * prv = SelfNode->_prev;
775    if (nxt != NULL) nxt->_prev = prv;
776    if (prv != NULL) prv->_next = nxt;
777    if (SelfNode == _EntryList) _EntryList = nxt;
778    assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
779    assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
780    TEVENT(Unlink from EntryList);
781  } else {
782    assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
783    // Inopportune interleaving -- Self is still on the cxq.
784    // This usually means the enqueue of self raced an exiting thread.
785    // Normally we'll find Self near the front of the cxq, so
786    // dequeueing is typically fast.  If needbe we can accelerate
787    // this with some MCS/CHL-like bidirectional list hints and advisory
788    // back-links so dequeueing from the interior will normally operate
789    // in constant-time.
790    // Dequeue Self from either the head (with CAS) or from the interior
791    // with a linear-time scan and normal non-atomic memory operations.
792    // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
793    // and then unlink Self from EntryList.  We have to drain eventually,
794    // so it might as well be now.
795
796    ObjectWaiter * v = _cxq;
797    assert(v != NULL, "invariant");
798    if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
799      // The CAS above can fail from interference IFF a "RAT" arrived.
800      // In that case Self must be in the interior and can no longer be
801      // at the head of cxq.
802      if (v == SelfNode) {
803        assert(_cxq != v, "invariant");
804        v = _cxq;          // CAS above failed - start scan at head of list
805      }
806      ObjectWaiter * p;
807      ObjectWaiter * q = NULL;
808      for (p = v; p != NULL && p != SelfNode; p = p->_next) {
809        q = p;
810        assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
811      }
812      assert(v != SelfNode, "invariant");
813      assert(p == SelfNode, "Node not found on cxq");
814      assert(p != _cxq, "invariant");
815      assert(q != NULL, "invariant");
816      assert(q->_next == p, "invariant");
817      q->_next = p->_next;
818    }
819    TEVENT(Unlink from cxq);
820  }
821
822#ifdef ASSERT
823  // Diagnostic hygiene ...
824  SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
825  SelfNode->_next  = (ObjectWaiter *) 0xBAD;
826  SelfNode->TState = ObjectWaiter::TS_RUN;
827#endif
828}
829
830// -----------------------------------------------------------------------------
831// Exit support
832//
833// exit()
834// ~~~~~~
835// Note that the collector can't reclaim the objectMonitor or deflate
836// the object out from underneath the thread calling ::exit() as the
837// thread calling ::exit() never transitions to a stable state.
838// This inhibits GC, which in turn inhibits asynchronous (and
839// inopportune) reclamation of "this".
840//
841// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
842// There's one exception to the claim above, however.  EnterI() can call
843// exit() to drop a lock if the acquirer has been externally suspended.
844// In that case exit() is called with _thread_state as _thread_blocked,
845// but the monitor's _count field is > 0, which inhibits reclamation.
846//
847// 1-0 exit
848// ~~~~~~~~
849// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
850// the fast-path operators have been optimized so the common ::exit()
851// operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
852// The code emitted by fast_unlock() elides the usual MEMBAR.  This
853// greatly improves latency -- MEMBAR and CAS having considerable local
854// latency on modern processors -- but at the cost of "stranding".  Absent the
855// MEMBAR, a thread in fast_unlock() can race a thread in the slow
856// ::enter() path, resulting in the entering thread being stranding
857// and a progress-liveness failure.   Stranding is extremely rare.
858// We use timers (timed park operations) & periodic polling to detect
859// and recover from stranding.  Potentially stranded threads periodically
860// wake up and poll the lock.  See the usage of the _Responsible variable.
861//
862// The CAS() in enter provides for safety and exclusion, while the CAS or
863// MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
864// eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
865// We detect and recover from stranding with timers.
866//
867// If a thread transiently strands it'll park until (a) another
868// thread acquires the lock and then drops the lock, at which time the
869// exiting thread will notice and unpark the stranded thread, or, (b)
870// the timer expires.  If the lock is high traffic then the stranding latency
871// will be low due to (a).  If the lock is low traffic then the odds of
872// stranding are lower, although the worst-case stranding latency
873// is longer.  Critically, we don't want to put excessive load in the
874// platform's timer subsystem.  We want to minimize both the timer injection
875// rate (timers created/sec) as well as the number of timers active at
876// any one time.  (more precisely, we want to minimize timer-seconds, which is
877// the integral of the # of active timers at any instant over time).
878// Both impinge on OS scalability.  Given that, at most one thread parked on
879// a monitor will use a timer.
880//
881// There is also the risk of a futile wake-up. If we drop the lock
882// another thread can reacquire the lock immediately, and we can
883// then wake a thread unnecessarily. This is benign, and we've
884// structured the code so the windows are short and the frequency
885// of such futile wakups is low.
886
887void ObjectMonitor::exit(bool not_suspended, TRAPS) {
888  Thread * const Self = THREAD;
889  if (THREAD != _owner) {
890    if (THREAD->is_lock_owned((address) _owner)) {
891      // Transmute _owner from a BasicLock pointer to a Thread address.
892      // We don't need to hold _mutex for this transition.
893      // Non-null to Non-null is safe as long as all readers can
894      // tolerate either flavor.
895      assert(_recursions == 0, "invariant");
896      _owner = THREAD;
897      _recursions = 0;
898    } else {
899      // Apparent unbalanced locking ...
900      // Naively we'd like to throw IllegalMonitorStateException.
901      // As a practical matter we can neither allocate nor throw an
902      // exception as ::exit() can be called from leaf routines.
903      // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
904      // Upon deeper reflection, however, in a properly run JVM the only
905      // way we should encounter this situation is in the presence of
906      // unbalanced JNI locking. TODO: CheckJNICalls.
907      // See also: CR4414101
908      TEVENT(Exit - Throw IMSX);
909      assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
910      return;
911    }
912  }
913
914  if (_recursions != 0) {
915    _recursions--;        // this is simple recursive enter
916    TEVENT(Inflated exit - recursive);
917    return;
918  }
919
920  // Invariant: after setting Responsible=null an thread must execute
921  // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
922  if ((SyncFlags & 4) == 0) {
923    _Responsible = NULL;
924  }
925
926#if INCLUDE_TRACE
927  // get the owner's thread id for the MonitorEnter event
928  // if it is enabled and the thread isn't suspended
929  if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
930    _previous_owner_tid = THREAD_TRACE_ID(Self);
931  }
932#endif
933
934  for (;;) {
935    assert(THREAD == _owner, "invariant");
936
937    if (Knob_ExitPolicy == 0) {
938      // release semantics: prior loads and stores from within the critical section
939      // must not float (reorder) past the following store that drops the lock.
940      // On SPARC that requires MEMBAR #loadstore|#storestore.
941      // But of course in TSO #loadstore|#storestore is not required.
942      // I'd like to write one of the following:
943      // A.  OrderAccess::release() ; _owner = NULL
944      // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
945      // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
946      // store into a _dummy variable.  That store is not needed, but can result
947      // in massive wasteful coherency traffic on classic SMP systems.
948      // Instead, I use release_store(), which is implemented as just a simple
949      // ST on x64, x86 and SPARC.
950      OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
951      OrderAccess::storeload();                        // See if we need to wake a successor
952      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
953        TEVENT(Inflated exit - simple egress);
954        return;
955      }
956      TEVENT(Inflated exit - complex egress);
957      // Other threads are blocked trying to acquire the lock.
958
959      // Normally the exiting thread is responsible for ensuring succession,
960      // but if other successors are ready or other entering threads are spinning
961      // then this thread can simply store NULL into _owner and exit without
962      // waking a successor.  The existence of spinners or ready successors
963      // guarantees proper succession (liveness).  Responsibility passes to the
964      // ready or running successors.  The exiting thread delegates the duty.
965      // More precisely, if a successor already exists this thread is absolved
966      // of the responsibility of waking (unparking) one.
967      //
968      // The _succ variable is critical to reducing futile wakeup frequency.
969      // _succ identifies the "heir presumptive" thread that has been made
970      // ready (unparked) but that has not yet run.  We need only one such
971      // successor thread to guarantee progress.
972      // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
973      // section 3.3 "Futile Wakeup Throttling" for details.
974      //
975      // Note that spinners in Enter() also set _succ non-null.
976      // In the current implementation spinners opportunistically set
977      // _succ so that exiting threads might avoid waking a successor.
978      // Another less appealing alternative would be for the exiting thread
979      // to drop the lock and then spin briefly to see if a spinner managed
980      // to acquire the lock.  If so, the exiting thread could exit
981      // immediately without waking a successor, otherwise the exiting
982      // thread would need to dequeue and wake a successor.
983      // (Note that we'd need to make the post-drop spin short, but no
984      // shorter than the worst-case round-trip cache-line migration time.
985      // The dropped lock needs to become visible to the spinner, and then
986      // the acquisition of the lock by the spinner must become visible to
987      // the exiting thread).
988
989      // It appears that an heir-presumptive (successor) must be made ready.
990      // Only the current lock owner can manipulate the EntryList or
991      // drain _cxq, so we need to reacquire the lock.  If we fail
992      // to reacquire the lock the responsibility for ensuring succession
993      // falls to the new owner.
994      //
995      if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
996        return;
997      }
998      TEVENT(Exit - Reacquired);
999    } else {
1000      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1001        OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
1002        OrderAccess::storeload();
1003        // Ratify the previously observed values.
1004        if (_cxq == NULL || _succ != NULL) {
1005          TEVENT(Inflated exit - simple egress);
1006          return;
1007        }
1008
1009        // inopportune interleaving -- the exiting thread (this thread)
1010        // in the fast-exit path raced an entering thread in the slow-enter
1011        // path.
1012        // We have two choices:
1013        // A.  Try to reacquire the lock.
1014        //     If the CAS() fails return immediately, otherwise
1015        //     we either restart/rerun the exit operation, or simply
1016        //     fall-through into the code below which wakes a successor.
1017        // B.  If the elements forming the EntryList|cxq are TSM
1018        //     we could simply unpark() the lead thread and return
1019        //     without having set _succ.
1020        if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
1021          TEVENT(Inflated exit - reacquired succeeded);
1022          return;
1023        }
1024        TEVENT(Inflated exit - reacquired failed);
1025      } else {
1026        TEVENT(Inflated exit - complex egress);
1027      }
1028    }
1029
1030    guarantee(_owner == THREAD, "invariant");
1031
1032    ObjectWaiter * w = NULL;
1033    int QMode = Knob_QMode;
1034
1035    if (QMode == 2 && _cxq != NULL) {
1036      // QMode == 2 : cxq has precedence over EntryList.
1037      // Try to directly wake a successor from the cxq.
1038      // If successful, the successor will need to unlink itself from cxq.
1039      w = _cxq;
1040      assert(w != NULL, "invariant");
1041      assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
1042      ExitEpilog(Self, w);
1043      return;
1044    }
1045
1046    if (QMode == 3 && _cxq != NULL) {
1047      // Aggressively drain cxq into EntryList at the first opportunity.
1048      // This policy ensure that recently-run threads live at the head of EntryList.
1049      // Drain _cxq into EntryList - bulk transfer.
1050      // First, detach _cxq.
1051      // The following loop is tantamount to: w = swap(&cxq, NULL)
1052      w = _cxq;
1053      for (;;) {
1054        assert(w != NULL, "Invariant");
1055        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
1056        if (u == w) break;
1057        w = u;
1058      }
1059      assert(w != NULL, "invariant");
1060
1061      ObjectWaiter * q = NULL;
1062      ObjectWaiter * p;
1063      for (p = w; p != NULL; p = p->_next) {
1064        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1065        p->TState = ObjectWaiter::TS_ENTER;
1066        p->_prev = q;
1067        q = p;
1068      }
1069
1070      // Append the RATs to the EntryList
1071      // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
1072      ObjectWaiter * Tail;
1073      for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL;
1074           Tail = Tail->_next)
1075        /* empty */;
1076      if (Tail == NULL) {
1077        _EntryList = w;
1078      } else {
1079        Tail->_next = w;
1080        w->_prev = Tail;
1081      }
1082
1083      // Fall thru into code that tries to wake a successor from EntryList
1084    }
1085
1086    if (QMode == 4 && _cxq != NULL) {
1087      // Aggressively drain cxq into EntryList at the first opportunity.
1088      // This policy ensure that recently-run threads live at the head of EntryList.
1089
1090      // Drain _cxq into EntryList - bulk transfer.
1091      // First, detach _cxq.
1092      // The following loop is tantamount to: w = swap(&cxq, NULL)
1093      w = _cxq;
1094      for (;;) {
1095        assert(w != NULL, "Invariant");
1096        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
1097        if (u == w) break;
1098        w = u;
1099      }
1100      assert(w != NULL, "invariant");
1101
1102      ObjectWaiter * q = NULL;
1103      ObjectWaiter * p;
1104      for (p = w; p != NULL; p = p->_next) {
1105        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1106        p->TState = ObjectWaiter::TS_ENTER;
1107        p->_prev = q;
1108        q = p;
1109      }
1110
1111      // Prepend the RATs to the EntryList
1112      if (_EntryList != NULL) {
1113        q->_next = _EntryList;
1114        _EntryList->_prev = q;
1115      }
1116      _EntryList = w;
1117
1118      // Fall thru into code that tries to wake a successor from EntryList
1119    }
1120
1121    w = _EntryList;
1122    if (w != NULL) {
1123      // I'd like to write: guarantee (w->_thread != Self).
1124      // But in practice an exiting thread may find itself on the EntryList.
1125      // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1126      // then calls exit().  Exit release the lock by setting O._owner to NULL.
1127      // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1128      // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1129      // release the lock "O".  T2 resumes immediately after the ST of null into
1130      // _owner, above.  T2 notices that the EntryList is populated, so it
1131      // reacquires the lock and then finds itself on the EntryList.
1132      // Given all that, we have to tolerate the circumstance where "w" is
1133      // associated with Self.
1134      assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1135      ExitEpilog(Self, w);
1136      return;
1137    }
1138
1139    // If we find that both _cxq and EntryList are null then just
1140    // re-run the exit protocol from the top.
1141    w = _cxq;
1142    if (w == NULL) continue;
1143
1144    // Drain _cxq into EntryList - bulk transfer.
1145    // First, detach _cxq.
1146    // The following loop is tantamount to: w = swap(&cxq, NULL)
1147    for (;;) {
1148      assert(w != NULL, "Invariant");
1149      ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
1150      if (u == w) break;
1151      w = u;
1152    }
1153    TEVENT(Inflated exit - drain cxq into EntryList);
1154
1155    assert(w != NULL, "invariant");
1156    assert(_EntryList == NULL, "invariant");
1157
1158    // Convert the LIFO SLL anchored by _cxq into a DLL.
1159    // The list reorganization step operates in O(LENGTH(w)) time.
1160    // It's critical that this step operate quickly as
1161    // "Self" still holds the outer-lock, restricting parallelism
1162    // and effectively lengthening the critical section.
1163    // Invariant: s chases t chases u.
1164    // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1165    // we have faster access to the tail.
1166
1167    if (QMode == 1) {
1168      // QMode == 1 : drain cxq to EntryList, reversing order
1169      // We also reverse the order of the list.
1170      ObjectWaiter * s = NULL;
1171      ObjectWaiter * t = w;
1172      ObjectWaiter * u = NULL;
1173      while (t != NULL) {
1174        guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
1175        t->TState = ObjectWaiter::TS_ENTER;
1176        u = t->_next;
1177        t->_prev = u;
1178        t->_next = s;
1179        s = t;
1180        t = u;
1181      }
1182      _EntryList  = s;
1183      assert(s != NULL, "invariant");
1184    } else {
1185      // QMode == 0 or QMode == 2
1186      _EntryList = w;
1187      ObjectWaiter * q = NULL;
1188      ObjectWaiter * p;
1189      for (p = w; p != NULL; p = p->_next) {
1190        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1191        p->TState = ObjectWaiter::TS_ENTER;
1192        p->_prev = q;
1193        q = p;
1194      }
1195    }
1196
1197    // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
1198    // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1199
1200    // See if we can abdicate to a spinner instead of waking a thread.
1201    // A primary goal of the implementation is to reduce the
1202    // context-switch rate.
1203    if (_succ != NULL) continue;
1204
1205    w = _EntryList;
1206    if (w != NULL) {
1207      guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1208      ExitEpilog(Self, w);
1209      return;
1210    }
1211  }
1212}
1213
1214// ExitSuspendEquivalent:
1215// A faster alternate to handle_special_suspend_equivalent_condition()
1216//
1217// handle_special_suspend_equivalent_condition() unconditionally
1218// acquires the SR_lock.  On some platforms uncontended MutexLocker()
1219// operations have high latency.  Note that in ::enter() we call HSSEC
1220// while holding the monitor, so we effectively lengthen the critical sections.
1221//
1222// There are a number of possible solutions:
1223//
1224// A.  To ameliorate the problem we might also defer state transitions
1225//     to as late as possible -- just prior to parking.
1226//     Given that, we'd call HSSEC after having returned from park(),
1227//     but before attempting to acquire the monitor.  This is only a
1228//     partial solution.  It avoids calling HSSEC while holding the
1229//     monitor (good), but it still increases successor reacquisition latency --
1230//     the interval between unparking a successor and the time the successor
1231//     resumes and retries the lock.  See ReenterI(), which defers state transitions.
1232//     If we use this technique we can also avoid EnterI()-exit() loop
1233//     in ::enter() where we iteratively drop the lock and then attempt
1234//     to reacquire it after suspending.
1235//
1236// B.  In the future we might fold all the suspend bits into a
1237//     composite per-thread suspend flag and then update it with CAS().
1238//     Alternately, a Dekker-like mechanism with multiple variables
1239//     would suffice:
1240//       ST Self->_suspend_equivalent = false
1241//       MEMBAR
1242//       LD Self_>_suspend_flags
1243//
1244// UPDATE 2007-10-6: since I've replaced the native Mutex/Monitor subsystem
1245// with a more efficient implementation, the need to use "FastHSSEC" has
1246// decreased. - Dave
1247
1248
1249bool ObjectMonitor::ExitSuspendEquivalent(JavaThread * jSelf) {
1250  const int Mode = Knob_FastHSSEC;
1251  if (Mode && !jSelf->is_external_suspend()) {
1252    assert(jSelf->is_suspend_equivalent(), "invariant");
1253    jSelf->clear_suspend_equivalent();
1254    if (2 == Mode) OrderAccess::storeload();
1255    if (!jSelf->is_external_suspend()) return false;
1256    // We raced a suspension -- fall thru into the slow path
1257    TEVENT(ExitSuspendEquivalent - raced);
1258    jSelf->set_suspend_equivalent();
1259  }
1260  return jSelf->handle_special_suspend_equivalent_condition();
1261}
1262
1263
1264void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1265  assert(_owner == Self, "invariant");
1266
1267  // Exit protocol:
1268  // 1. ST _succ = wakee
1269  // 2. membar #loadstore|#storestore;
1270  // 2. ST _owner = NULL
1271  // 3. unpark(wakee)
1272
1273  _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
1274  ParkEvent * Trigger = Wakee->_event;
1275
1276  // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1277  // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1278  // out-of-scope (non-extant).
1279  Wakee  = NULL;
1280
1281  // Drop the lock
1282  OrderAccess::release_store_ptr(&_owner, NULL);
1283  OrderAccess::fence();                               // ST _owner vs LD in unpark()
1284
1285  if (SafepointSynchronize::do_call_back()) {
1286    TEVENT(unpark before SAFEPOINT);
1287  }
1288
1289  DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1290  Trigger->unpark();
1291
1292  // Maintain stats and report events to JVMTI
1293  OM_PERFDATA_OP(Parks, inc());
1294}
1295
1296
1297// -----------------------------------------------------------------------------
1298// Class Loader deadlock handling.
1299//
1300// complete_exit exits a lock returning recursion count
1301// complete_exit/reenter operate as a wait without waiting
1302// complete_exit requires an inflated monitor
1303// The _owner field is not always the Thread addr even with an
1304// inflated monitor, e.g. the monitor can be inflated by a non-owning
1305// thread due to contention.
1306intptr_t ObjectMonitor::complete_exit(TRAPS) {
1307  Thread * const Self = THREAD;
1308  assert(Self->is_Java_thread(), "Must be Java thread!");
1309  JavaThread *jt = (JavaThread *)THREAD;
1310
1311  DeferredInitialize();
1312
1313  if (THREAD != _owner) {
1314    if (THREAD->is_lock_owned ((address)_owner)) {
1315      assert(_recursions == 0, "internal state error");
1316      _owner = THREAD;   // Convert from basiclock addr to Thread addr
1317      _recursions = 0;
1318    }
1319  }
1320
1321  guarantee(Self == _owner, "complete_exit not owner");
1322  intptr_t save = _recursions; // record the old recursion count
1323  _recursions = 0;        // set the recursion level to be 0
1324  exit(true, Self);           // exit the monitor
1325  guarantee(_owner != Self, "invariant");
1326  return save;
1327}
1328
1329// reenter() enters a lock and sets recursion count
1330// complete_exit/reenter operate as a wait without waiting
1331void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1332  Thread * const Self = THREAD;
1333  assert(Self->is_Java_thread(), "Must be Java thread!");
1334  JavaThread *jt = (JavaThread *)THREAD;
1335
1336  guarantee(_owner != Self, "reenter already owner");
1337  enter(THREAD);       // enter the monitor
1338  guarantee(_recursions == 0, "reenter recursion");
1339  _recursions = recursions;
1340  return;
1341}
1342
1343
1344// -----------------------------------------------------------------------------
1345// A macro is used below because there may already be a pending
1346// exception which should not abort the execution of the routines
1347// which use this (which is why we don't put this into check_slow and
1348// call it with a CHECK argument).
1349
1350#define CHECK_OWNER()                                                       \
1351  do {                                                                      \
1352    if (THREAD != _owner) {                                                 \
1353      if (THREAD->is_lock_owned((address) _owner)) {                        \
1354        _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
1355        _recursions = 0;                                                    \
1356      } else {                                                              \
1357        TEVENT(Throw IMSX);                                                 \
1358        THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
1359      }                                                                     \
1360    }                                                                       \
1361  } while (false)
1362
1363// check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
1364// TODO-FIXME: remove check_slow() -- it's likely dead.
1365
1366void ObjectMonitor::check_slow(TRAPS) {
1367  TEVENT(check_slow - throw IMSX);
1368  assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1369  THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1370}
1371
1372static int Adjust(volatile int * adr, int dx) {
1373  int v;
1374  for (v = *adr; Atomic::cmpxchg(v + dx, adr, v) != v; v = *adr) /* empty */;
1375  return v;
1376}
1377
1378// helper method for posting a monitor wait event
1379void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
1380                                            jlong notifier_tid,
1381                                            jlong timeout,
1382                                            bool timedout) {
1383  assert(event != NULL, "invariant");
1384  event->set_monitorClass(((oop)this->object())->klass());
1385  event->set_timeout(timeout);
1386  event->set_address((TYPE_ADDRESS)this->object_addr());
1387  event->set_notifier(notifier_tid);
1388  event->set_timedOut(timedout);
1389  event->commit();
1390}
1391
1392// -----------------------------------------------------------------------------
1393// Wait/Notify/NotifyAll
1394//
1395// Note: a subset of changes to ObjectMonitor::wait()
1396// will need to be replicated in complete_exit
1397void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1398  Thread * const Self = THREAD;
1399  assert(Self->is_Java_thread(), "Must be Java thread!");
1400  JavaThread *jt = (JavaThread *)THREAD;
1401
1402  DeferredInitialize();
1403
1404  // Throw IMSX or IEX.
1405  CHECK_OWNER();
1406
1407  EventJavaMonitorWait event;
1408
1409  // check for a pending interrupt
1410  if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1411    // post monitor waited event.  Note that this is past-tense, we are done waiting.
1412    if (JvmtiExport::should_post_monitor_waited()) {
1413      // Note: 'false' parameter is passed here because the
1414      // wait was not timed out due to thread interrupt.
1415      JvmtiExport::post_monitor_waited(jt, this, false);
1416
1417      // In this short circuit of the monitor wait protocol, the
1418      // current thread never drops ownership of the monitor and
1419      // never gets added to the wait queue so the current thread
1420      // cannot be made the successor. This means that the
1421      // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1422      // consume an unpark() meant for the ParkEvent associated with
1423      // this ObjectMonitor.
1424    }
1425    if (event.should_commit()) {
1426      post_monitor_wait_event(&event, 0, millis, false);
1427    }
1428    TEVENT(Wait - Throw IEX);
1429    THROW(vmSymbols::java_lang_InterruptedException());
1430    return;
1431  }
1432
1433  TEVENT(Wait);
1434
1435  assert(Self->_Stalled == 0, "invariant");
1436  Self->_Stalled = intptr_t(this);
1437  jt->set_current_waiting_monitor(this);
1438
1439  // create a node to be put into the queue
1440  // Critically, after we reset() the event but prior to park(), we must check
1441  // for a pending interrupt.
1442  ObjectWaiter node(Self);
1443  node.TState = ObjectWaiter::TS_WAIT;
1444  Self->_ParkEvent->reset();
1445  OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
1446
1447  // Enter the waiting queue, which is a circular doubly linked list in this case
1448  // but it could be a priority queue or any data structure.
1449  // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
1450  // by the the owner of the monitor *except* in the case where park()
1451  // returns because of a timeout of interrupt.  Contention is exceptionally rare
1452  // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1453
1454  Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1455  AddWaiter(&node);
1456  Thread::SpinRelease(&_WaitSetLock);
1457
1458  if ((SyncFlags & 4) == 0) {
1459    _Responsible = NULL;
1460  }
1461  intptr_t save = _recursions; // record the old recursion count
1462  _waiters++;                  // increment the number of waiters
1463  _recursions = 0;             // set the recursion level to be 1
1464  exit(true, Self);                    // exit the monitor
1465  guarantee(_owner != Self, "invariant");
1466
1467  // The thread is on the WaitSet list - now park() it.
1468  // On MP systems it's conceivable that a brief spin before we park
1469  // could be profitable.
1470  //
1471  // TODO-FIXME: change the following logic to a loop of the form
1472  //   while (!timeout && !interrupted && _notified == 0) park()
1473
1474  int ret = OS_OK;
1475  int WasNotified = 0;
1476  { // State transition wrappers
1477    OSThread* osthread = Self->osthread();
1478    OSThreadWaitState osts(osthread, true);
1479    {
1480      ThreadBlockInVM tbivm(jt);
1481      // Thread is in thread_blocked state and oop access is unsafe.
1482      jt->set_suspend_equivalent();
1483
1484      if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
1485        // Intentionally empty
1486      } else if (node._notified == 0) {
1487        if (millis <= 0) {
1488          Self->_ParkEvent->park();
1489        } else {
1490          ret = Self->_ParkEvent->park(millis);
1491        }
1492      }
1493
1494      // were we externally suspended while we were waiting?
1495      if (ExitSuspendEquivalent (jt)) {
1496        // TODO-FIXME: add -- if succ == Self then succ = null.
1497        jt->java_suspend_self();
1498      }
1499
1500    } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
1501
1502    // Node may be on the WaitSet, the EntryList (or cxq), or in transition
1503    // from the WaitSet to the EntryList.
1504    // See if we need to remove Node from the WaitSet.
1505    // We use double-checked locking to avoid grabbing _WaitSetLock
1506    // if the thread is not on the wait queue.
1507    //
1508    // Note that we don't need a fence before the fetch of TState.
1509    // In the worst case we'll fetch a old-stale value of TS_WAIT previously
1510    // written by the is thread. (perhaps the fetch might even be satisfied
1511    // by a look-aside into the processor's own store buffer, although given
1512    // the length of the code path between the prior ST and this load that's
1513    // highly unlikely).  If the following LD fetches a stale TS_WAIT value
1514    // then we'll acquire the lock and then re-fetch a fresh TState value.
1515    // That is, we fail toward safety.
1516
1517    if (node.TState == ObjectWaiter::TS_WAIT) {
1518      Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
1519      if (node.TState == ObjectWaiter::TS_WAIT) {
1520        DequeueSpecificWaiter(&node);       // unlink from WaitSet
1521        assert(node._notified == 0, "invariant");
1522        node.TState = ObjectWaiter::TS_RUN;
1523      }
1524      Thread::SpinRelease(&_WaitSetLock);
1525    }
1526
1527    // The thread is now either on off-list (TS_RUN),
1528    // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
1529    // The Node's TState variable is stable from the perspective of this thread.
1530    // No other threads will asynchronously modify TState.
1531    guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
1532    OrderAccess::loadload();
1533    if (_succ == Self) _succ = NULL;
1534    WasNotified = node._notified;
1535
1536    // Reentry phase -- reacquire the monitor.
1537    // re-enter contended monitor after object.wait().
1538    // retain OBJECT_WAIT state until re-enter successfully completes
1539    // Thread state is thread_in_vm and oop access is again safe,
1540    // although the raw address of the object may have changed.
1541    // (Don't cache naked oops over safepoints, of course).
1542
1543    // post monitor waited event. Note that this is past-tense, we are done waiting.
1544    if (JvmtiExport::should_post_monitor_waited()) {
1545      JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
1546
1547      if (node._notified != 0 && _succ == Self) {
1548        // In this part of the monitor wait-notify-reenter protocol it
1549        // is possible (and normal) for another thread to do a fastpath
1550        // monitor enter-exit while this thread is still trying to get
1551        // to the reenter portion of the protocol.
1552        //
1553        // The ObjectMonitor was notified and the current thread is
1554        // the successor which also means that an unpark() has already
1555        // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1556        // consume the unpark() that was done when the successor was
1557        // set because the same ParkEvent is shared between Java
1558        // monitors and JVM/TI RawMonitors (for now).
1559        //
1560        // We redo the unpark() to ensure forward progress, i.e., we
1561        // don't want all pending threads hanging (parked) with none
1562        // entering the unlocked monitor.
1563        node._event->unpark();
1564      }
1565    }
1566
1567    if (event.should_commit()) {
1568      post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
1569    }
1570
1571    OrderAccess::fence();
1572
1573    assert(Self->_Stalled != 0, "invariant");
1574    Self->_Stalled = 0;
1575
1576    assert(_owner != Self, "invariant");
1577    ObjectWaiter::TStates v = node.TState;
1578    if (v == ObjectWaiter::TS_RUN) {
1579      enter(Self);
1580    } else {
1581      guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1582      ReenterI(Self, &node);
1583      node.wait_reenter_end(this);
1584    }
1585
1586    // Self has reacquired the lock.
1587    // Lifecycle - the node representing Self must not appear on any queues.
1588    // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1589    // want residual elements associated with this thread left on any lists.
1590    guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1591    assert(_owner == Self, "invariant");
1592    assert(_succ != Self, "invariant");
1593  } // OSThreadWaitState()
1594
1595  jt->set_current_waiting_monitor(NULL);
1596
1597  guarantee(_recursions == 0, "invariant");
1598  _recursions = save;     // restore the old recursion count
1599  _waiters--;             // decrement the number of waiters
1600
1601  // Verify a few postconditions
1602  assert(_owner == Self, "invariant");
1603  assert(_succ != Self, "invariant");
1604  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
1605
1606  if (SyncFlags & 32) {
1607    OrderAccess::fence();
1608  }
1609
1610  // check if the notification happened
1611  if (!WasNotified) {
1612    // no, it could be timeout or Thread.interrupt() or both
1613    // check for interrupt event, otherwise it is timeout
1614    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1615      TEVENT(Wait - throw IEX from epilog);
1616      THROW(vmSymbols::java_lang_InterruptedException());
1617    }
1618  }
1619
1620  // NOTE: Spurious wake up will be consider as timeout.
1621  // Monitor notify has precedence over thread interrupt.
1622}
1623
1624
1625// Consider:
1626// If the lock is cool (cxq == null && succ == null) and we're on an MP system
1627// then instead of transferring a thread from the WaitSet to the EntryList
1628// we might just dequeue a thread from the WaitSet and directly unpark() it.
1629
1630void ObjectMonitor::INotify(Thread * Self) {
1631  const int policy = Knob_MoveNotifyee;
1632
1633  Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1634  ObjectWaiter * iterator = DequeueWaiter();
1635  if (iterator != NULL) {
1636    TEVENT(Notify1 - Transfer);
1637    guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1638    guarantee(iterator->_notified == 0, "invariant");
1639    // Disposition - what might we do with iterator ?
1640    // a.  add it directly to the EntryList - either tail (policy == 1)
1641    //     or head (policy == 0).
1642    // b.  push it onto the front of the _cxq (policy == 2).
1643    // For now we use (b).
1644    if (policy != 4) {
1645      iterator->TState = ObjectWaiter::TS_ENTER;
1646    }
1647    iterator->_notified = 1;
1648    iterator->_notifier_tid = THREAD_TRACE_ID(Self);
1649
1650    ObjectWaiter * list = _EntryList;
1651    if (list != NULL) {
1652      assert(list->_prev == NULL, "invariant");
1653      assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1654      assert(list != iterator, "invariant");
1655    }
1656
1657    if (policy == 0) {       // prepend to EntryList
1658      if (list == NULL) {
1659        iterator->_next = iterator->_prev = NULL;
1660        _EntryList = iterator;
1661      } else {
1662        list->_prev = iterator;
1663        iterator->_next = list;
1664        iterator->_prev = NULL;
1665        _EntryList = iterator;
1666      }
1667    } else if (policy == 1) {      // append to EntryList
1668      if (list == NULL) {
1669        iterator->_next = iterator->_prev = NULL;
1670        _EntryList = iterator;
1671      } else {
1672        // CONSIDER:  finding the tail currently requires a linear-time walk of
1673        // the EntryList.  We can make tail access constant-time by converting to
1674        // a CDLL instead of using our current DLL.
1675        ObjectWaiter * tail;
1676        for (tail = list; tail->_next != NULL; tail = tail->_next) /* empty */;
1677        assert(tail != NULL && tail->_next == NULL, "invariant");
1678        tail->_next = iterator;
1679        iterator->_prev = tail;
1680        iterator->_next = NULL;
1681      }
1682    } else if (policy == 2) {      // prepend to cxq
1683      if (list == NULL) {
1684        iterator->_next = iterator->_prev = NULL;
1685        _EntryList = iterator;
1686      } else {
1687        iterator->TState = ObjectWaiter::TS_CXQ;
1688        for (;;) {
1689          ObjectWaiter * front = _cxq;
1690          iterator->_next = front;
1691          if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) {
1692            break;
1693          }
1694        }
1695      }
1696    } else if (policy == 3) {      // append to cxq
1697      iterator->TState = ObjectWaiter::TS_CXQ;
1698      for (;;) {
1699        ObjectWaiter * tail = _cxq;
1700        if (tail == NULL) {
1701          iterator->_next = NULL;
1702          if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) {
1703            break;
1704          }
1705        } else {
1706          while (tail->_next != NULL) tail = tail->_next;
1707          tail->_next = iterator;
1708          iterator->_prev = tail;
1709          iterator->_next = NULL;
1710          break;
1711        }
1712      }
1713    } else {
1714      ParkEvent * ev = iterator->_event;
1715      iterator->TState = ObjectWaiter::TS_RUN;
1716      OrderAccess::fence();
1717      ev->unpark();
1718    }
1719
1720    // _WaitSetLock protects the wait queue, not the EntryList.  We could
1721    // move the add-to-EntryList operation, above, outside the critical section
1722    // protected by _WaitSetLock.  In practice that's not useful.  With the
1723    // exception of  wait() timeouts and interrupts the monitor owner
1724    // is the only thread that grabs _WaitSetLock.  There's almost no contention
1725    // on _WaitSetLock so it's not profitable to reduce the length of the
1726    // critical section.
1727
1728    if (policy < 4) {
1729      iterator->wait_reenter_begin(this);
1730    }
1731  }
1732  Thread::SpinRelease(&_WaitSetLock);
1733}
1734
1735// Consider: a not-uncommon synchronization bug is to use notify() when
1736// notifyAll() is more appropriate, potentially resulting in stranded
1737// threads; this is one example of a lost wakeup. A useful diagnostic
1738// option is to force all notify() operations to behave as notifyAll().
1739//
1740// Note: We can also detect many such problems with a "minimum wait".
1741// When the "minimum wait" is set to a small non-zero timeout value
1742// and the program does not hang whereas it did absent "minimum wait",
1743// that suggests a lost wakeup bug. The '-XX:SyncFlags=1' option uses
1744// a "minimum wait" for all park() operations; see the recheckInterval
1745// variable and MAX_RECHECK_INTERVAL.
1746
1747void ObjectMonitor::notify(TRAPS) {
1748  CHECK_OWNER();
1749  if (_WaitSet == NULL) {
1750    TEVENT(Empty-Notify);
1751    return;
1752  }
1753  DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1754  INotify(THREAD);
1755  OM_PERFDATA_OP(Notifications, inc(1));
1756}
1757
1758
1759// The current implementation of notifyAll() transfers the waiters one-at-a-time
1760// from the waitset to the EntryList. This could be done more efficiently with a
1761// single bulk transfer but in practice it's not time-critical. Beware too,
1762// that in prepend-mode we invert the order of the waiters. Let's say that the
1763// waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1764// mode the waitset will be empty and the EntryList will be "DCBAXYZ".
1765
1766void ObjectMonitor::notifyAll(TRAPS) {
1767  CHECK_OWNER();
1768  if (_WaitSet == NULL) {
1769    TEVENT(Empty-NotifyAll);
1770    return;
1771  }
1772
1773  DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1774  int tally = 0;
1775  while (_WaitSet != NULL) {
1776    tally++;
1777    INotify(THREAD);
1778  }
1779
1780  OM_PERFDATA_OP(Notifications, inc(tally));
1781}
1782
1783// -----------------------------------------------------------------------------
1784// Adaptive Spinning Support
1785//
1786// Adaptive spin-then-block - rational spinning
1787//
1788// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1789// algorithm.  On high order SMP systems it would be better to start with
1790// a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
1791// a contending thread could enqueue itself on the cxq and then spin locally
1792// on a thread-specific variable such as its ParkEvent._Event flag.
1793// That's left as an exercise for the reader.  Note that global spinning is
1794// not problematic on Niagara, as the L2 cache serves the interconnect and
1795// has both low latency and massive bandwidth.
1796//
1797// Broadly, we can fix the spin frequency -- that is, the % of contended lock
1798// acquisition attempts where we opt to spin --  at 100% and vary the spin count
1799// (duration) or we can fix the count at approximately the duration of
1800// a context switch and vary the frequency.   Of course we could also
1801// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
1802// For a description of 'Adaptive spin-then-block mutual exclusion in
1803// multi-threaded processing,' see U.S. Pat. No. 8046758.
1804//
1805// This implementation varies the duration "D", where D varies with
1806// the success rate of recent spin attempts. (D is capped at approximately
1807// length of a round-trip context switch).  The success rate for recent
1808// spin attempts is a good predictor of the success rate of future spin
1809// attempts.  The mechanism adapts automatically to varying critical
1810// section length (lock modality), system load and degree of parallelism.
1811// D is maintained per-monitor in _SpinDuration and is initialized
1812// optimistically.  Spin frequency is fixed at 100%.
1813//
1814// Note that _SpinDuration is volatile, but we update it without locks
1815// or atomics.  The code is designed so that _SpinDuration stays within
1816// a reasonable range even in the presence of races.  The arithmetic
1817// operations on _SpinDuration are closed over the domain of legal values,
1818// so at worst a race will install and older but still legal value.
1819// At the very worst this introduces some apparent non-determinism.
1820// We might spin when we shouldn't or vice-versa, but since the spin
1821// count are relatively short, even in the worst case, the effect is harmless.
1822//
1823// Care must be taken that a low "D" value does not become an
1824// an absorbing state.  Transient spinning failures -- when spinning
1825// is overall profitable -- should not cause the system to converge
1826// on low "D" values.  We want spinning to be stable and predictable
1827// and fairly responsive to change and at the same time we don't want
1828// it to oscillate, become metastable, be "too" non-deterministic,
1829// or converge on or enter undesirable stable absorbing states.
1830//
1831// We implement a feedback-based control system -- using past behavior
1832// to predict future behavior.  We face two issues: (a) if the
1833// input signal is random then the spin predictor won't provide optimal
1834// results, and (b) if the signal frequency is too high then the control
1835// system, which has some natural response lag, will "chase" the signal.
1836// (b) can arise from multimodal lock hold times.  Transient preemption
1837// can also result in apparent bimodal lock hold times.
1838// Although sub-optimal, neither condition is particularly harmful, as
1839// in the worst-case we'll spin when we shouldn't or vice-versa.
1840// The maximum spin duration is rather short so the failure modes aren't bad.
1841// To be conservative, I've tuned the gain in system to bias toward
1842// _not spinning.  Relatedly, the system can sometimes enter a mode where it
1843// "rings" or oscillates between spinning and not spinning.  This happens
1844// when spinning is just on the cusp of profitability, however, so the
1845// situation is not dire.  The state is benign -- there's no need to add
1846// hysteresis control to damp the transition rate between spinning and
1847// not spinning.
1848
1849// Spinning: Fixed frequency (100%), vary duration
1850int ObjectMonitor::TrySpin(Thread * Self) {
1851  // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
1852  int ctr = Knob_FixedSpin;
1853  if (ctr != 0) {
1854    while (--ctr >= 0) {
1855      if (TryLock(Self) > 0) return 1;
1856      SpinPause();
1857    }
1858    return 0;
1859  }
1860
1861  for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
1862    if (TryLock(Self) > 0) {
1863      // Increase _SpinDuration ...
1864      // Note that we don't clamp SpinDuration precisely at SpinLimit.
1865      // Raising _SpurDuration to the poverty line is key.
1866      int x = _SpinDuration;
1867      if (x < Knob_SpinLimit) {
1868        if (x < Knob_Poverty) x = Knob_Poverty;
1869        _SpinDuration = x + Knob_BonusB;
1870      }
1871      return 1;
1872    }
1873    SpinPause();
1874  }
1875
1876  // Admission control - verify preconditions for spinning
1877  //
1878  // We always spin a little bit, just to prevent _SpinDuration == 0 from
1879  // becoming an absorbing state.  Put another way, we spin briefly to
1880  // sample, just in case the system load, parallelism, contention, or lock
1881  // modality changed.
1882  //
1883  // Consider the following alternative:
1884  // Periodically set _SpinDuration = _SpinLimit and try a long/full
1885  // spin attempt.  "Periodically" might mean after a tally of
1886  // the # of failed spin attempts (or iterations) reaches some threshold.
1887  // This takes us into the realm of 1-out-of-N spinning, where we
1888  // hold the duration constant but vary the frequency.
1889
1890  ctr = _SpinDuration;
1891  if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
1892  if (ctr <= 0) return 0;
1893
1894  if (Knob_SuccRestrict && _succ != NULL) return 0;
1895  if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
1896    TEVENT(Spin abort - notrunnable [TOP]);
1897    return 0;
1898  }
1899
1900  int MaxSpin = Knob_MaxSpinners;
1901  if (MaxSpin >= 0) {
1902    if (_Spinner > MaxSpin) {
1903      TEVENT(Spin abort -- too many spinners);
1904      return 0;
1905    }
1906    // Slightly racy, but benign ...
1907    Adjust(&_Spinner, 1);
1908  }
1909
1910  // We're good to spin ... spin ingress.
1911  // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1912  // when preparing to LD...CAS _owner, etc and the CAS is likely
1913  // to succeed.
1914  int hits    = 0;
1915  int msk     = 0;
1916  int caspty  = Knob_CASPenalty;
1917  int oxpty   = Knob_OXPenalty;
1918  int sss     = Knob_SpinSetSucc;
1919  if (sss && _succ == NULL) _succ = Self;
1920  Thread * prv = NULL;
1921
1922  // There are three ways to exit the following loop:
1923  // 1.  A successful spin where this thread has acquired the lock.
1924  // 2.  Spin failure with prejudice
1925  // 3.  Spin failure without prejudice
1926
1927  while (--ctr >= 0) {
1928
1929    // Periodic polling -- Check for pending GC
1930    // Threads may spin while they're unsafe.
1931    // We don't want spinning threads to delay the JVM from reaching
1932    // a stop-the-world safepoint or to steal cycles from GC.
1933    // If we detect a pending safepoint we abort in order that
1934    // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1935    // this thread, if safe, doesn't steal cycles from GC.
1936    // This is in keeping with the "no loitering in runtime" rule.
1937    // We periodically check to see if there's a safepoint pending.
1938    if ((ctr & 0xFF) == 0) {
1939      if (SafepointSynchronize::do_call_back()) {
1940        TEVENT(Spin: safepoint);
1941        goto Abort;           // abrupt spin egress
1942      }
1943      if (Knob_UsePause & 1) SpinPause();
1944    }
1945
1946    if (Knob_UsePause & 2) SpinPause();
1947
1948    // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
1949    // This is useful on classic SMP systems, but is of less utility on
1950    // N1-style CMT platforms.
1951    //
1952    // Trade-off: lock acquisition latency vs coherency bandwidth.
1953    // Lock hold times are typically short.  A histogram
1954    // of successful spin attempts shows that we usually acquire
1955    // the lock early in the spin.  That suggests we want to
1956    // sample _owner frequently in the early phase of the spin,
1957    // but then back-off and sample less frequently as the spin
1958    // progresses.  The back-off makes a good citizen on SMP big
1959    // SMP systems.  Oversampling _owner can consume excessive
1960    // coherency bandwidth.  Relatedly, if we _oversample _owner we
1961    // can inadvertently interfere with the the ST m->owner=null.
1962    // executed by the lock owner.
1963    if (ctr & msk) continue;
1964    ++hits;
1965    if ((hits & 0xF) == 0) {
1966      // The 0xF, above, corresponds to the exponent.
1967      // Consider: (msk+1)|msk
1968      msk = ((msk << 2)|3) & BackOffMask;
1969    }
1970
1971    // Probe _owner with TATAS
1972    // If this thread observes the monitor transition or flicker
1973    // from locked to unlocked to locked, then the odds that this
1974    // thread will acquire the lock in this spin attempt go down
1975    // considerably.  The same argument applies if the CAS fails
1976    // or if we observe _owner change from one non-null value to
1977    // another non-null value.   In such cases we might abort
1978    // the spin without prejudice or apply a "penalty" to the
1979    // spin count-down variable "ctr", reducing it by 100, say.
1980
1981    Thread * ox = (Thread *) _owner;
1982    if (ox == NULL) {
1983      ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
1984      if (ox == NULL) {
1985        // The CAS succeeded -- this thread acquired ownership
1986        // Take care of some bookkeeping to exit spin state.
1987        if (sss && _succ == Self) {
1988          _succ = NULL;
1989        }
1990        if (MaxSpin > 0) Adjust(&_Spinner, -1);
1991
1992        // Increase _SpinDuration :
1993        // The spin was successful (profitable) so we tend toward
1994        // longer spin attempts in the future.
1995        // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1996        // If we acquired the lock early in the spin cycle it
1997        // makes sense to increase _SpinDuration proportionally.
1998        // Note that we don't clamp SpinDuration precisely at SpinLimit.
1999        int x = _SpinDuration;
2000        if (x < Knob_SpinLimit) {
2001          if (x < Knob_Poverty) x = Knob_Poverty;
2002          _SpinDuration = x + Knob_Bonus;
2003        }
2004        return 1;
2005      }
2006
2007      // The CAS failed ... we can take any of the following actions:
2008      // * penalize: ctr -= Knob_CASPenalty
2009      // * exit spin with prejudice -- goto Abort;
2010      // * exit spin without prejudice.
2011      // * Since CAS is high-latency, retry again immediately.
2012      prv = ox;
2013      TEVENT(Spin: cas failed);
2014      if (caspty == -2) break;
2015      if (caspty == -1) goto Abort;
2016      ctr -= caspty;
2017      continue;
2018    }
2019
2020    // Did lock ownership change hands ?
2021    if (ox != prv && prv != NULL) {
2022      TEVENT(spin: Owner changed)
2023      if (oxpty == -2) break;
2024      if (oxpty == -1) goto Abort;
2025      ctr -= oxpty;
2026    }
2027    prv = ox;
2028
2029    // Abort the spin if the owner is not executing.
2030    // The owner must be executing in order to drop the lock.
2031    // Spinning while the owner is OFFPROC is idiocy.
2032    // Consider: ctr -= RunnablePenalty ;
2033    if (Knob_OState && NotRunnable (Self, ox)) {
2034      TEVENT(Spin abort - notrunnable);
2035      goto Abort;
2036    }
2037    if (sss && _succ == NULL) _succ = Self;
2038  }
2039
2040  // Spin failed with prejudice -- reduce _SpinDuration.
2041  // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2042  // AIMD is globally stable.
2043  TEVENT(Spin failure);
2044  {
2045    int x = _SpinDuration;
2046    if (x > 0) {
2047      // Consider an AIMD scheme like: x -= (x >> 3) + 100
2048      // This is globally sample and tends to damp the response.
2049      x -= Knob_Penalty;
2050      if (x < 0) x = 0;
2051      _SpinDuration = x;
2052    }
2053  }
2054
2055 Abort:
2056  if (MaxSpin >= 0) Adjust(&_Spinner, -1);
2057  if (sss && _succ == Self) {
2058    _succ = NULL;
2059    // Invariant: after setting succ=null a contending thread
2060    // must recheck-retry _owner before parking.  This usually happens
2061    // in the normal usage of TrySpin(), but it's safest
2062    // to make TrySpin() as foolproof as possible.
2063    OrderAccess::fence();
2064    if (TryLock(Self) > 0) return 1;
2065  }
2066  return 0;
2067}
2068
2069// NotRunnable() -- informed spinning
2070//
2071// Don't bother spinning if the owner is not eligible to drop the lock.
2072// Peek at the owner's schedctl.sc_state and Thread._thread_values and
2073// spin only if the owner thread is _thread_in_Java or _thread_in_vm.
2074// The thread must be runnable in order to drop the lock in timely fashion.
2075// If the _owner is not runnable then spinning will not likely be
2076// successful (profitable).
2077//
2078// Beware -- the thread referenced by _owner could have died
2079// so a simply fetch from _owner->_thread_state might trap.
2080// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
2081// Because of the lifecycle issues the schedctl and _thread_state values
2082// observed by NotRunnable() might be garbage.  NotRunnable must
2083// tolerate this and consider the observed _thread_state value
2084// as advisory.
2085//
2086// Beware too, that _owner is sometimes a BasicLock address and sometimes
2087// a thread pointer.
2088// Alternately, we might tag the type (thread pointer vs basiclock pointer)
2089// with the LSB of _owner.  Another option would be to probablistically probe
2090// the putative _owner->TypeTag value.
2091//
2092// Checking _thread_state isn't perfect.  Even if the thread is
2093// in_java it might be blocked on a page-fault or have been preempted
2094// and sitting on a ready/dispatch queue.  _thread state in conjunction
2095// with schedctl.sc_state gives us a good picture of what the
2096// thread is doing, however.
2097//
2098// TODO: check schedctl.sc_state.
2099// We'll need to use SafeFetch32() to read from the schedctl block.
2100// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
2101//
2102// The return value from NotRunnable() is *advisory* -- the
2103// result is based on sampling and is not necessarily coherent.
2104// The caller must tolerate false-negative and false-positive errors.
2105// Spinning, in general, is probabilistic anyway.
2106
2107
2108int ObjectMonitor::NotRunnable(Thread * Self, Thread * ox) {
2109  // Check ox->TypeTag == 2BAD.
2110  if (ox == NULL) return 0;
2111
2112  // Avoid transitive spinning ...
2113  // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
2114  // Immediately after T1 acquires L it's possible that T2, also
2115  // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
2116  // This occurs transiently after T1 acquired L but before
2117  // T1 managed to clear T1.Stalled.  T2 does not need to abort
2118  // its spin in this circumstance.
2119  intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
2120
2121  if (BlockedOn == 1) return 1;
2122  if (BlockedOn != 0) {
2123    return BlockedOn != intptr_t(this) && _owner == ox;
2124  }
2125
2126  assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
2127  int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
2128  // consider also: jst != _thread_in_Java -- but that's overspecific.
2129  return jst == _thread_blocked || jst == _thread_in_native;
2130}
2131
2132
2133// -----------------------------------------------------------------------------
2134// WaitSet management ...
2135
2136ObjectWaiter::ObjectWaiter(Thread* thread) {
2137  _next     = NULL;
2138  _prev     = NULL;
2139  _notified = 0;
2140  TState    = TS_RUN;
2141  _thread   = thread;
2142  _event    = thread->_ParkEvent;
2143  _active   = false;
2144  assert(_event != NULL, "invariant");
2145}
2146
2147void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
2148  JavaThread *jt = (JavaThread *)this->_thread;
2149  _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
2150}
2151
2152void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
2153  JavaThread *jt = (JavaThread *)this->_thread;
2154  JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
2155}
2156
2157inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
2158  assert(node != NULL, "should not add NULL node");
2159  assert(node->_prev == NULL, "node already in list");
2160  assert(node->_next == NULL, "node already in list");
2161  // put node at end of queue (circular doubly linked list)
2162  if (_WaitSet == NULL) {
2163    _WaitSet = node;
2164    node->_prev = node;
2165    node->_next = node;
2166  } else {
2167    ObjectWaiter* head = _WaitSet;
2168    ObjectWaiter* tail = head->_prev;
2169    assert(tail->_next == head, "invariant check");
2170    tail->_next = node;
2171    head->_prev = node;
2172    node->_next = head;
2173    node->_prev = tail;
2174  }
2175}
2176
2177inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
2178  // dequeue the very first waiter
2179  ObjectWaiter* waiter = _WaitSet;
2180  if (waiter) {
2181    DequeueSpecificWaiter(waiter);
2182  }
2183  return waiter;
2184}
2185
2186inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
2187  assert(node != NULL, "should not dequeue NULL node");
2188  assert(node->_prev != NULL, "node already removed from list");
2189  assert(node->_next != NULL, "node already removed from list");
2190  // when the waiter has woken up because of interrupt,
2191  // timeout or other spurious wake-up, dequeue the
2192  // waiter from waiting list
2193  ObjectWaiter* next = node->_next;
2194  if (next == node) {
2195    assert(node->_prev == node, "invariant check");
2196    _WaitSet = NULL;
2197  } else {
2198    ObjectWaiter* prev = node->_prev;
2199    assert(prev->_next == node, "invariant check");
2200    assert(next->_prev == node, "invariant check");
2201    next->_prev = prev;
2202    prev->_next = next;
2203    if (_WaitSet == node) {
2204      _WaitSet = next;
2205    }
2206  }
2207  node->_next = NULL;
2208  node->_prev = NULL;
2209}
2210
2211// -----------------------------------------------------------------------------
2212// PerfData support
2213PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL;
2214PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL;
2215PerfCounter * ObjectMonitor::_sync_Parks                       = NULL;
2216PerfCounter * ObjectMonitor::_sync_EmptyNotifications          = NULL;
2217PerfCounter * ObjectMonitor::_sync_Notifications               = NULL;
2218PerfCounter * ObjectMonitor::_sync_PrivateA                    = NULL;
2219PerfCounter * ObjectMonitor::_sync_PrivateB                    = NULL;
2220PerfCounter * ObjectMonitor::_sync_SlowExit                    = NULL;
2221PerfCounter * ObjectMonitor::_sync_SlowEnter                   = NULL;
2222PerfCounter * ObjectMonitor::_sync_SlowNotify                  = NULL;
2223PerfCounter * ObjectMonitor::_sync_SlowNotifyAll               = NULL;
2224PerfCounter * ObjectMonitor::_sync_FailedSpins                 = NULL;
2225PerfCounter * ObjectMonitor::_sync_SuccessfulSpins             = NULL;
2226PerfCounter * ObjectMonitor::_sync_MonInCirculation            = NULL;
2227PerfCounter * ObjectMonitor::_sync_MonScavenged                = NULL;
2228PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL;
2229PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL;
2230PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL;
2231
2232// One-shot global initialization for the sync subsystem.
2233// We could also defer initialization and initialize on-demand
2234// the first time we call inflate().  Initialization would
2235// be protected - like so many things - by the MonitorCache_lock.
2236
2237void ObjectMonitor::Initialize() {
2238  static int InitializationCompleted = 0;
2239  assert(InitializationCompleted == 0, "invariant");
2240  InitializationCompleted = 1;
2241  if (UsePerfData) {
2242    EXCEPTION_MARK;
2243#define NEWPERFCOUNTER(n)                                                \
2244  {                                                                      \
2245    n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
2246                                        CHECK);                          \
2247  }
2248#define NEWPERFVARIABLE(n)                                                \
2249  {                                                                       \
2250    n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
2251                                         CHECK);                          \
2252  }
2253    NEWPERFCOUNTER(_sync_Inflations);
2254    NEWPERFCOUNTER(_sync_Deflations);
2255    NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2256    NEWPERFCOUNTER(_sync_FutileWakeups);
2257    NEWPERFCOUNTER(_sync_Parks);
2258    NEWPERFCOUNTER(_sync_EmptyNotifications);
2259    NEWPERFCOUNTER(_sync_Notifications);
2260    NEWPERFCOUNTER(_sync_SlowEnter);
2261    NEWPERFCOUNTER(_sync_SlowExit);
2262    NEWPERFCOUNTER(_sync_SlowNotify);
2263    NEWPERFCOUNTER(_sync_SlowNotifyAll);
2264    NEWPERFCOUNTER(_sync_FailedSpins);
2265    NEWPERFCOUNTER(_sync_SuccessfulSpins);
2266    NEWPERFCOUNTER(_sync_PrivateA);
2267    NEWPERFCOUNTER(_sync_PrivateB);
2268    NEWPERFCOUNTER(_sync_MonInCirculation);
2269    NEWPERFCOUNTER(_sync_MonScavenged);
2270    NEWPERFVARIABLE(_sync_MonExtant);
2271#undef NEWPERFCOUNTER
2272#undef NEWPERFVARIABLE
2273  }
2274}
2275
2276static char * kvGet(char * kvList, const char * Key) {
2277  if (kvList == NULL) return NULL;
2278  size_t n = strlen(Key);
2279  char * Search;
2280  for (Search = kvList; *Search; Search += strlen(Search) + 1) {
2281    if (strncmp (Search, Key, n) == 0) {
2282      if (Search[n] == '=') return Search + n + 1;
2283      if (Search[n] == 0)   return(char *) "1";
2284    }
2285  }
2286  return NULL;
2287}
2288
2289static int kvGetInt(char * kvList, const char * Key, int Default) {
2290  char * v = kvGet(kvList, Key);
2291  int rslt = v ? ::strtol(v, NULL, 0) : Default;
2292  if (Knob_ReportSettings && v != NULL) {
2293    tty->print_cr("INFO: SyncKnob: %s %d(%d)", Key, rslt, Default) ;
2294    tty->flush();
2295  }
2296  return rslt;
2297}
2298
2299void ObjectMonitor::DeferredInitialize() {
2300  if (InitDone > 0) return;
2301  if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2302    while (InitDone != 1) /* empty */;
2303    return;
2304  }
2305
2306  // One-shot global initialization ...
2307  // The initialization is idempotent, so we don't need locks.
2308  // In the future consider doing this via os::init_2().
2309  // SyncKnobs consist of <Key>=<Value> pairs in the style
2310  // of environment variables.  Start by converting ':' to NUL.
2311
2312  if (SyncKnobs == NULL) SyncKnobs = "";
2313
2314  size_t sz = strlen(SyncKnobs);
2315  char * knobs = (char *) malloc(sz + 2);
2316  if (knobs == NULL) {
2317    vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
2318    guarantee(0, "invariant");
2319  }
2320  strcpy(knobs, SyncKnobs);
2321  knobs[sz+1] = 0;
2322  for (char * p = knobs; *p; p++) {
2323    if (*p == ':') *p = 0;
2324  }
2325
2326  #define SETKNOB(x) { Knob_##x = kvGetInt(knobs, #x, Knob_##x); }
2327  SETKNOB(ReportSettings);
2328  SETKNOB(ExitRelease);
2329  SETKNOB(Verbose);
2330  SETKNOB(VerifyInUse);
2331  SETKNOB(VerifyMatch);
2332  SETKNOB(FixedSpin);
2333  SETKNOB(SpinLimit);
2334  SETKNOB(SpinBase);
2335  SETKNOB(SpinBackOff);
2336  SETKNOB(CASPenalty);
2337  SETKNOB(OXPenalty);
2338  SETKNOB(LogSpins);
2339  SETKNOB(SpinSetSucc);
2340  SETKNOB(SuccEnabled);
2341  SETKNOB(SuccRestrict);
2342  SETKNOB(Penalty);
2343  SETKNOB(Bonus);
2344  SETKNOB(BonusB);
2345  SETKNOB(Poverty);
2346  SETKNOB(SpinAfterFutile);
2347  SETKNOB(UsePause);
2348  SETKNOB(SpinEarly);
2349  SETKNOB(OState);
2350  SETKNOB(MaxSpinners);
2351  SETKNOB(PreSpin);
2352  SETKNOB(ExitPolicy);
2353  SETKNOB(QMode);
2354  SETKNOB(ResetEvent);
2355  SETKNOB(MoveNotifyee);
2356  SETKNOB(FastHSSEC);
2357  #undef SETKNOB
2358
2359  if (Knob_Verbose) {
2360    sanity_checks();
2361  }
2362
2363  if (os::is_MP()) {
2364    BackOffMask = (1 << Knob_SpinBackOff) - 1;
2365    if (Knob_ReportSettings) {
2366      tty->print_cr("INFO: BackOffMask=0x%X", BackOffMask);
2367    }
2368    // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2369  } else {
2370    Knob_SpinLimit = 0;
2371    Knob_SpinBase  = 0;
2372    Knob_PreSpin   = 0;
2373    Knob_FixedSpin = -1;
2374  }
2375
2376  if (Knob_LogSpins == 0) {
2377    ObjectMonitor::_sync_FailedSpins = NULL;
2378  }
2379
2380  free(knobs);
2381  OrderAccess::fence();
2382  InitDone = 1;
2383}
2384
2385void ObjectMonitor::sanity_checks() {
2386  int error_cnt = 0;
2387  int warning_cnt = 0;
2388  bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests);
2389
2390  if (verbose) {
2391    tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT,
2392                  sizeof(ObjectMonitor));
2393    tty->print_cr("INFO: sizeof(PaddedEnd<ObjectMonitor>)=" SIZE_FORMAT,
2394                  sizeof(PaddedEnd<ObjectMonitor>));
2395  }
2396
2397  uint cache_line_size = VM_Version::L1_data_cache_line_size();
2398  if (verbose) {
2399    tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size);
2400  }
2401
2402  ObjectMonitor dummy;
2403  u_char *addr_begin  = (u_char*)&dummy;
2404  u_char *addr_header = (u_char*)&dummy._header;
2405  u_char *addr_owner  = (u_char*)&dummy._owner;
2406
2407  uint offset_header = (uint)(addr_header - addr_begin);
2408  if (verbose) tty->print_cr("INFO: offset(_header)=%u", offset_header);
2409
2410  uint offset_owner = (uint)(addr_owner - addr_begin);
2411  if (verbose) tty->print_cr("INFO: offset(_owner)=%u", offset_owner);
2412
2413  if ((uint)(addr_header - addr_begin) != 0) {
2414    tty->print_cr("ERROR: offset(_header) must be zero (0).");
2415    error_cnt++;
2416  }
2417
2418  if (cache_line_size != 0) {
2419    // We were able to determine the L1 data cache line size so
2420    // do some cache line specific sanity checks
2421
2422    if ((offset_owner - offset_header) < cache_line_size) {
2423      tty->print_cr("WARNING: the _header and _owner fields are closer "
2424                    "than a cache line which permits false sharing.");
2425      warning_cnt++;
2426    }
2427
2428    if ((sizeof(PaddedEnd<ObjectMonitor>) % cache_line_size) != 0) {
2429      tty->print_cr("WARNING: PaddedEnd<ObjectMonitor> size is not a "
2430                    "multiple of a cache line which permits false sharing.");
2431      warning_cnt++;
2432    }
2433  }
2434
2435  ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt,
2436                                    &warning_cnt);
2437
2438  if (verbose || error_cnt != 0 || warning_cnt != 0) {
2439    tty->print_cr("INFO: error_cnt=%d", error_cnt);
2440    tty->print_cr("INFO: warning_cnt=%d", warning_cnt);
2441  }
2442
2443  guarantee(error_cnt == 0,
2444            "Fatal error(s) found in ObjectMonitor::sanity_checks()");
2445}
2446
2447#ifndef PRODUCT
2448void ObjectMonitor::verify() {
2449}
2450
2451void ObjectMonitor::print() {
2452}
2453
2454void ObjectMonitor_test() {
2455  ObjectMonitor::sanity_checks();
2456}
2457#endif
2458