mutex.cpp revision 2721:f08d439fab8c
1
2/*
3 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "runtime/mutex.hpp"
28#include "runtime/osThread.hpp"
29#include "utilities/events.hpp"
30#ifdef TARGET_OS_FAMILY_linux
31# include "mutex_linux.inline.hpp"
32# include "thread_linux.inline.hpp"
33#endif
34#ifdef TARGET_OS_FAMILY_solaris
35# include "mutex_solaris.inline.hpp"
36# include "thread_solaris.inline.hpp"
37#endif
38#ifdef TARGET_OS_FAMILY_windows
39# include "mutex_windows.inline.hpp"
40# include "thread_windows.inline.hpp"
41#endif
42#ifdef TARGET_OS_FAMILY_bsd
43# include "mutex_bsd.inline.hpp"
44# include "thread_bsd.inline.hpp"
45#endif
46
47// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
48//
49// Native Monitor-Mutex locking - theory of operations
50//
51// * Native Monitors are completely unrelated to Java-level monitors,
52//   although the "back-end" slow-path implementations share a common lineage.
53//   See objectMonitor:: in synchronizer.cpp.
54//   Native Monitors do *not* support nesting or recursion but otherwise
55//   they're basically Hoare-flavor monitors.
56//
57// * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
58//   in the _LockWord from zero to non-zero.  Note that the _Owner field
59//   is advisory and is used only to verify that the thread calling unlock()
60//   is indeed the last thread to have acquired the lock.
61//
62// * Contending threads "push" themselves onto the front of the contention
63//   queue -- called the cxq -- with CAS and then spin/park.
64//   The _LockWord contains the LockByte as well as the pointer to the head
65//   of the cxq.  Colocating the LockByte with the cxq precludes certain races.
66//
67// * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
68//   idioms.  We currently use MEMBAR in the uncontended unlock() path, as
69//   MEMBAR often has less latency than CAS.  If warranted, we could switch to
70//   a CAS:0 mode, using timers to close the resultant race, as is done
71//   with Java Monitors in synchronizer.cpp.
72//
73//   See the following for a discussion of the relative cost of atomics (CAS)
74//   MEMBAR, and ways to eliminate such instructions from the common-case paths:
75//   -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
76//   -- http://blogs.sun.com/dave/resource/MustangSync.pdf
77//   -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
78//   -- synchronizer.cpp
79//
80// * Overall goals - desiderata
81//   1. Minimize context switching
82//   2. Minimize lock migration
83//   3. Minimize CPI -- affinity and locality
84//   4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
85//   5. Minimize outer lock hold times
86//   6. Behave gracefully on a loaded system
87//
88// * Thread flow and list residency:
89//
90//   Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
91//   [..resident on monitor list..]
92//   [...........contending..................]
93//
94//   -- The contention queue (cxq) contains recently-arrived threads (RATs).
95//      Threads on the cxq eventually drain into the EntryList.
96//   -- Invariant: a thread appears on at most one list -- cxq, EntryList
97//      or WaitSet -- at any one time.
98//   -- For a given monitor there can be at most one "OnDeck" thread at any
99//      given time but if needbe this particular invariant could be relaxed.
100//
101// * The WaitSet and EntryList linked lists are composed of ParkEvents.
102//   I use ParkEvent instead of threads as ParkEvents are immortal and
103//   type-stable, meaning we can safely unpark() a possibly stale
104//   list element in the unlock()-path.  (That's benign).
105//
106// * Succession policy - providing for progress:
107//
108//   As necessary, the unlock()ing thread identifies, unlinks, and unparks
109//   an "heir presumptive" tentative successor thread from the EntryList.
110//   This becomes the so-called "OnDeck" thread, of which there can be only
111//   one at any given time for a given monitor.  The wakee will recontend
112//   for ownership of monitor.
113//
114//   Succession is provided for by a policy of competitive handoff.
115//   The exiting thread does _not_ grant or pass ownership to the
116//   successor thread.  (This is also referred to as "handoff" succession").
117//   Instead the exiting thread releases ownership and possibly wakes
118//   a successor, so the successor can (re)compete for ownership of the lock.
119//
120//   Competitive handoff provides excellent overall throughput at the expense
121//   of short-term fairness.  If fairness is a concern then one remedy might
122//   be to add an AcquireCounter field to the monitor.  After a thread acquires
123//   the lock it will decrement the AcquireCounter field.  When the count
124//   reaches 0 the thread would reset the AcquireCounter variable, abdicate
125//   the lock directly to some thread on the EntryList, and then move itself to the
126//   tail of the EntryList.
127//
128//   But in practice most threads engage or otherwise participate in resource
129//   bounded producer-consumer relationships, so lock domination is not usually
130//   a practical concern.  Recall too, that in general it's easier to construct
131//   a fair lock from a fast lock, but not vice-versa.
132//
133// * The cxq can have multiple concurrent "pushers" but only one concurrent
134//   detaching thread.  This mechanism is immune from the ABA corruption.
135//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
136//   We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
137//   thread constraint.
138//
139// * Taken together, the cxq and the EntryList constitute or form a
140//   single logical queue of threads stalled trying to acquire the lock.
141//   We use two distinct lists to reduce heat on the list ends.
142//   Threads in lock() enqueue onto cxq while threads in unlock() will
143//   dequeue from the EntryList.  (c.f. Michael Scott's "2Q" algorithm).
144//   A key desideratum is to minimize queue & monitor metadata manipulation
145//   that occurs while holding the "outer" monitor lock -- that is, we want to
146//   minimize monitor lock holds times.
147//
148//   The EntryList is ordered by the prevailing queue discipline and
149//   can be organized in any convenient fashion, such as a doubly-linked list or
150//   a circular doubly-linked list.  If we need a priority queue then something akin
151//   to Solaris' sleepq would work nicely.  Viz.,
152//   -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
153//   -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
154//   Queue discipline is enforced at ::unlock() time, when the unlocking thread
155//   drains the cxq into the EntryList, and orders or reorders the threads on the
156//   EntryList accordingly.
157//
158//   Barring "lock barging", this mechanism provides fair cyclic ordering,
159//   somewhat similar to an elevator-scan.
160//
161// * OnDeck
162//   --  For a given monitor there can be at most one OnDeck thread at any given
163//       instant.  The OnDeck thread is contending for the lock, but has been
164//       unlinked from the EntryList and cxq by some previous unlock() operations.
165//       Once a thread has been designated the OnDeck thread it will remain so
166//       until it manages to acquire the lock -- being OnDeck is a stable property.
167//   --  Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
168//   --  OnDeck also serves as an "inner lock" as follows.  Threads in unlock() will, after
169//       having cleared the LockByte and dropped the outer lock,  attempt to "trylock"
170//       OnDeck by CASing the field from null to non-null.  If successful, that thread
171//       is then responsible for progress and succession and can use CAS to detach and
172//       drain the cxq into the EntryList.  By convention, only this thread, the holder of
173//       the OnDeck inner lock, can manipulate the EntryList or detach and drain the
174//       RATs on the cxq into the EntryList.  This avoids ABA corruption on the cxq as
175//       we allow multiple concurrent "push" operations but restrict detach concurrency
176//       to at most one thread.  Having selected and detached a successor, the thread then
177//       changes the OnDeck to refer to that successor, and then unparks the successor.
178//       That successor will eventually acquire the lock and clear OnDeck.  Beware
179//       that the OnDeck usage as a lock is asymmetric.  A thread in unlock() transiently
180//       "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
181//       and then the successor eventually "drops" OnDeck.  Note that there's never
182//       any sense of contention on the inner lock, however.  Threads never contend
183//       or wait for the inner lock.
184//   --  OnDeck provides for futile wakeup throttling a described in section 3.3 of
185//       See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
186//       In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
187//       TState fields found in Java-level objectMonitors.  (See synchronizer.cpp).
188//
189// * Waiting threads reside on the WaitSet list -- wait() puts
190//   the caller onto the WaitSet.  Notify() or notifyAll() simply
191//   transfers threads from the WaitSet to either the EntryList or cxq.
192//   Subsequent unlock() operations will eventually unpark the notifyee.
193//   Unparking a notifee in notify() proper is inefficient - if we were to do so
194//   it's likely the notifyee would simply impale itself on the lock held
195//   by the notifier.
196//
197// * The mechanism is obstruction-free in that if the holder of the transient
198//   OnDeck lock in unlock() is preempted or otherwise stalls, other threads
199//   can still acquire and release the outer lock and continue to make progress.
200//   At worst, waking of already blocked contending threads may be delayed,
201//   but nothing worse.  (We only use "trylock" operations on the inner OnDeck
202//   lock).
203//
204// * Note that thread-local storage must be initialized before a thread
205//   uses Native monitors or mutexes.  The native monitor-mutex subsystem
206//   depends on Thread::current().
207//
208// * The monitor synchronization subsystem avoids the use of native
209//   synchronization primitives except for the narrow platform-specific
210//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
211//   the semantics of park-unpark.  Put another way, this monitor implementation
212//   depends only on atomic operations and park-unpark.  The monitor subsystem
213//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
214//   underlying OS manages the READY<->RUN transitions.
215//
216// * The memory consistency model provide by lock()-unlock() is at least as
217//   strong or stronger than the Java Memory model defined by JSR-133.
218//   That is, we guarantee at least entry consistency, if not stronger.
219//   See http://g.oswego.edu/dl/jmm/cookbook.html.
220//
221// * Thread:: currently contains a set of purpose-specific ParkEvents:
222//   _MutexEvent, _ParkEvent, etc.  A better approach might be to do away with
223//   the purpose-specific ParkEvents and instead implement a general per-thread
224//   stack of available ParkEvents which we could provision on-demand.  The
225//   stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
226//   and ::Release().  A thread would simply pop an element from the local stack before it
227//   enqueued or park()ed.  When the contention was over the thread would
228//   push the no-longer-needed ParkEvent back onto its stack.
229//
230// * A slightly reduced form of ILock() and IUnlock() have been partially
231//   model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
232//   It'd be interesting to see if TLA/TLC could be useful as well.
233//
234// * Mutex-Monitor is a low-level "leaf" subsystem.  That is, the monitor
235//   code should never call other code in the JVM that might itself need to
236//   acquire monitors or mutexes.  That's true *except* in the case of the
237//   ThreadBlockInVM state transition wrappers.  The ThreadBlockInVM DTOR handles
238//   mutator reentry (ingress) by checking for a pending safepoint in which case it will
239//   call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
240//   In that particular case a call to lock() for a given Monitor can end up recursively
241//   calling lock() on another monitor.   While distasteful, this is largely benign
242//   as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
243//
244//   It's unfortunate that native mutexes and thread state transitions were convolved.
245//   They're really separate concerns and should have remained that way.  Melding
246//   them together was facile -- a bit too facile.   The current implementation badly
247//   conflates the two concerns.
248//
249// * TODO-FIXME:
250//
251//   -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
252//      We should also add DTRACE probes in the ParkEvent subsystem for
253//      Park-entry, Park-exit, and Unpark.
254//
255//   -- We have an excess of mutex-like constructs in the JVM, namely:
256//      1. objectMonitors for Java-level synchronization (synchronizer.cpp)
257//      2. low-level muxAcquire and muxRelease
258//      3. low-level spinAcquire and spinRelease
259//      4. native Mutex:: and Monitor::
260//      5. jvm_raw_lock() and _unlock()
261//      6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
262//         similar name.
263//
264// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
265
266
267// CASPTR() uses the canonical argument order that dominates in the literature.
268// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
269
270#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
271#define UNS(x) (uintptr_t(x))
272#define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
273
274// Simplistic low-quality Marsaglia SHIFT-XOR RNG.
275// Bijective except for the trailing mask operation.
276// Useful for spin loops as the compiler can't optimize it away.
277
278static inline jint MarsagliaXORV (jint x) {
279  if (x == 0) x = 1|os::random() ;
280  x ^= x << 6;
281  x ^= ((unsigned)x) >> 21;
282  x ^= x << 7 ;
283  return x & 0x7FFFFFFF ;
284}
285
286static inline jint MarsagliaXOR (jint * const a) {
287  jint x = *a ;
288  if (x == 0) x = UNS(a)|1 ;
289  x ^= x << 6;
290  x ^= ((unsigned)x) >> 21;
291  x ^= x << 7 ;
292  *a = x ;
293  return x & 0x7FFFFFFF ;
294}
295
296static int Stall (int its) {
297  static volatile jint rv = 1 ;
298  volatile int OnFrame = 0 ;
299  jint v = rv ^ UNS(OnFrame) ;
300  while (--its >= 0) {
301    v = MarsagliaXORV (v) ;
302  }
303  // Make this impossible for the compiler to optimize away,
304  // but (mostly) avoid W coherency sharing on MP systems.
305  if (v == 0x12345) rv = v ;
306  return v ;
307}
308
309int Monitor::TryLock () {
310  intptr_t v = _LockWord.FullWord ;
311  for (;;) {
312    if ((v & _LBIT) != 0) return 0 ;
313    const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
314    if (v == u) return 1 ;
315    v = u ;
316  }
317}
318
319int Monitor::TryFast () {
320  // Optimistic fast-path form ...
321  // Fast-path attempt for the common uncontended case.
322  // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
323  intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ;  // agro ...
324  if (v == 0) return 1 ;
325
326  for (;;) {
327    if ((v & _LBIT) != 0) return 0 ;
328    const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
329    if (v == u) return 1 ;
330    v = u ;
331  }
332}
333
334int Monitor::ILocked () {
335  const intptr_t w = _LockWord.FullWord & 0xFF ;
336  assert (w == 0 || w == _LBIT, "invariant") ;
337  return w == _LBIT ;
338}
339
340// Polite TATAS spinlock with exponential backoff - bounded spin.
341// Ideally we'd use processor cycles, time or vtime to control
342// the loop, but we currently use iterations.
343// All the constants within were derived empirically but work over
344// over the spectrum of J2SE reference platforms.
345// On Niagara-class systems the back-off is unnecessary but
346// is relatively harmless.  (At worst it'll slightly retard
347// acquisition times).  The back-off is critical for older SMP systems
348// where constant fetching of the LockWord would otherwise impair
349// scalability.
350//
351// Clamp spinning at approximately 1/2 of a context-switch round-trip.
352// See synchronizer.cpp for details and rationale.
353
354int Monitor::TrySpin (Thread * const Self) {
355  if (TryLock())    return 1 ;
356  if (!os::is_MP()) return 0 ;
357
358  int Probes  = 0 ;
359  int Delay   = 0 ;
360  int Steps   = 0 ;
361  int SpinMax = NativeMonitorSpinLimit ;
362  int flgs    = NativeMonitorFlags ;
363  for (;;) {
364    intptr_t v = _LockWord.FullWord;
365    if ((v & _LBIT) == 0) {
366      if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
367        return 1 ;
368      }
369      continue ;
370    }
371
372    if ((flgs & 8) == 0) {
373      SpinPause () ;
374    }
375
376    // Periodically increase Delay -- variable Delay form
377    // conceptually: delay *= 1 + 1/Exponent
378    ++ Probes;
379    if (Probes > SpinMax) return 0 ;
380
381    if ((Probes & 0x7) == 0) {
382      Delay = ((Delay << 1)|1) & 0x7FF ;
383      // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
384    }
385
386    if (flgs & 2) continue ;
387
388    // Consider checking _owner's schedctl state, if OFFPROC abort spin.
389    // If the owner is OFFPROC then it's unlike that the lock will be dropped
390    // in a timely fashion, which suggests that spinning would not be fruitful
391    // or profitable.
392
393    // Stall for "Delay" time units - iterations in the current implementation.
394    // Avoid generating coherency traffic while stalled.
395    // Possible ways to delay:
396    //   PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
397    //   wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
398    // Note that on Niagara-class systems we want to minimize STs in the
399    // spin loop.  N1 and brethren write-around the L1$ over the xbar into the L2$.
400    // Furthermore, they don't have a W$ like traditional SPARC processors.
401    // We currently use a Marsaglia Shift-Xor RNG loop.
402    Steps += Delay ;
403    if (Self != NULL) {
404      jint rv = Self->rng[0] ;
405      for (int k = Delay ; --k >= 0; ) {
406        rv = MarsagliaXORV (rv) ;
407        if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
408      }
409      Self->rng[0] = rv ;
410    } else {
411      Stall (Delay) ;
412    }
413  }
414}
415
416static int ParkCommon (ParkEvent * ev, jlong timo) {
417  // Diagnostic support - periodically unwedge blocked threads
418  intx nmt = NativeMonitorTimeout ;
419  if (nmt > 0 && (nmt < timo || timo <= 0)) {
420     timo = nmt ;
421  }
422  int err = OS_OK ;
423  if (0 == timo) {
424    ev->park() ;
425  } else {
426    err = ev->park(timo) ;
427  }
428  return err ;
429}
430
431inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
432  intptr_t v = _LockWord.FullWord ;
433  for (;;) {
434    if ((v & _LBIT) == 0) {
435      const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
436      if (u == v) return 1 ;        // indicate acquired
437      v = u ;
438    } else {
439      // Anticipate success ...
440      ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
441      const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
442      if (u == v) return 0 ;        // indicate pushed onto cxq
443      v = u ;
444    }
445    // Interference - LockWord change - just retry
446  }
447}
448
449// ILock and IWait are the lowest level primitive internal blocking
450// synchronization functions.  The callers of IWait and ILock must have
451// performed any needed state transitions beforehand.
452// IWait and ILock may directly call park() without any concern for thread state.
453// Note that ILock and IWait do *not* access _owner.
454// _owner is a higher-level logical concept.
455
456void Monitor::ILock (Thread * Self) {
457  assert (_OnDeck != Self->_MutexEvent, "invariant") ;
458
459  if (TryFast()) {
460 Exeunt:
461    assert (ILocked(), "invariant") ;
462    return ;
463  }
464
465  ParkEvent * const ESelf = Self->_MutexEvent ;
466  assert (_OnDeck != ESelf, "invariant") ;
467
468  // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
469  // Synchronizer.cpp uses a similar optimization.
470  if (TrySpin (Self)) goto Exeunt ;
471
472  // Slow-path - the lock is contended.
473  // Either Enqueue Self on cxq or acquire the outer lock.
474  // LockWord encoding = (cxq,LOCKBYTE)
475  ESelf->reset() ;
476  OrderAccess::fence() ;
477
478  // Optional optimization ... try barging on the inner lock
479  if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
480    goto OnDeck_LOOP ;
481  }
482
483  if (AcquireOrPush (ESelf)) goto Exeunt ;
484
485  // At any given time there is at most one ondeck thread.
486  // ondeck implies not resident on cxq and not resident on EntryList
487  // Only the OnDeck thread can try to acquire -- contended for -- the lock.
488  // CONSIDER: use Self->OnDeck instead of m->OnDeck.
489  // Deschedule Self so that others may run.
490  while (_OnDeck != ESelf) {
491    ParkCommon (ESelf, 0) ;
492  }
493
494  // Self is now in the ONDECK position and will remain so until it
495  // manages to acquire the lock.
496 OnDeck_LOOP:
497  for (;;) {
498    assert (_OnDeck == ESelf, "invariant") ;
499    if (TrySpin (Self)) break ;
500    // CONSIDER: if ESelf->TryPark() && TryLock() break ...
501    // It's probably wise to spin only if we *actually* blocked
502    // CONSIDER: check the lockbyte, if it remains set then
503    // preemptively drain the cxq into the EntryList.
504    // The best place and time to perform queue operations -- lock metadata --
505    // is _before having acquired the outer lock, while waiting for the lock to drop.
506    ParkCommon (ESelf, 0) ;
507  }
508
509  assert (_OnDeck == ESelf, "invariant") ;
510  _OnDeck = NULL ;
511
512  // Note that we current drop the inner lock (clear OnDeck) in the slow-path
513  // epilog immediately after having acquired the outer lock.
514  // But instead we could consider the following optimizations:
515  // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
516  //    This might avoid potential reacquisition of the inner lock in IUlock().
517  // B. While still holding the inner lock, attempt to opportunistically select
518  //    and unlink the next ONDECK thread from the EntryList.
519  //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
520  //    It's critical that the select-and-unlink operation run in constant-time as
521  //    it executes when holding the outer lock and may artificially increase the
522  //    effective length of the critical section.
523  // Note that (A) and (B) are tantamount to succession by direct handoff for
524  // the inner lock.
525  goto Exeunt ;
526}
527
528void Monitor::IUnlock (bool RelaxAssert) {
529  assert (ILocked(), "invariant") ;
530  _LockWord.Bytes[_LSBINDEX] = 0 ;       // drop outer lock
531  OrderAccess::storeload ();
532  ParkEvent * const w = _OnDeck ;
533  assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
534  if (w != NULL) {
535    // Either we have a valid ondeck thread or ondeck is transiently "locked"
536    // by some exiting thread as it arranges for succession.  The LSBit of
537    // OnDeck allows us to discriminate two cases.  If the latter, the
538    // responsibility for progress and succession lies with that other thread.
539    // For good performance, we also depend on the fact that redundant unpark()
540    // operations are cheap.  That is, repeated Unpark()ing of the ONDECK thread
541    // is inexpensive.  This approach provides implicit futile wakeup throttling.
542    // Note that the referent "w" might be stale with respect to the lock.
543    // In that case the following unpark() is harmless and the worst that'll happen
544    // is a spurious return from a park() operation.  Critically, if "w" _is stale,
545    // then progress is known to have occurred as that means the thread associated
546    // with "w" acquired the lock.  In that case this thread need take no further
547    // action to guarantee progress.
548    if ((UNS(w) & _LBIT) == 0) w->unpark() ;
549    return ;
550  }
551
552  intptr_t cxq = _LockWord.FullWord ;
553  if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
554    return ;      // normal fast-path exit - cxq and EntryList both empty
555  }
556  if (cxq & _LBIT) {
557    // Optional optimization ...
558    // Some other thread acquired the lock in the window since this
559    // thread released it.  Succession is now that thread's responsibility.
560    return ;
561  }
562
563 Succession:
564  // Slow-path exit - this thread must ensure succession and progress.
565  // OnDeck serves as lock to protect cxq and EntryList.
566  // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
567  // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
568  // but only one concurrent consumer (detacher of RATs).
569  // Consider protecting this critical section with schedctl on Solaris.
570  // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
571  // picks a successor and marks that thread as OnDeck.  That successor
572  // thread will then clear OnDeck once it eventually acquires the outer lock.
573  if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
574    return ;
575  }
576
577  ParkEvent * List = _EntryList ;
578  if (List != NULL) {
579    // Transfer the head of the EntryList to the OnDeck position.
580    // Once OnDeck, a thread stays OnDeck until it acquires the lock.
581    // For a given lock there is at most OnDeck thread at any one instant.
582   WakeOne:
583    assert (List == _EntryList, "invariant") ;
584    ParkEvent * const w = List ;
585    assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
586    _EntryList = w->ListNext ;
587    // as a diagnostic measure consider setting w->_ListNext = BAD
588    assert (UNS(_OnDeck) == _LBIT, "invariant") ;
589    _OnDeck = w ;           // pass OnDeck to w.
590                            // w will clear OnDeck once it acquires the outer lock
591
592    // Another optional optimization ...
593    // For heavily contended locks it's not uncommon that some other
594    // thread acquired the lock while this thread was arranging succession.
595    // Try to defer the unpark() operation - Delegate the responsibility
596    // for unpark()ing the OnDeck thread to the current or subsequent owners
597    // That is, the new owner is responsible for unparking the OnDeck thread.
598    OrderAccess::storeload() ;
599    cxq = _LockWord.FullWord ;
600    if (cxq & _LBIT) return ;
601
602    w->unpark() ;
603    return ;
604  }
605
606  cxq = _LockWord.FullWord ;
607  if ((cxq & ~_LBIT) != 0) {
608    // The EntryList is empty but the cxq is populated.
609    // drain RATs from cxq into EntryList
610    // Detach RATs segment with CAS and then merge into EntryList
611    for (;;) {
612      // optional optimization - if locked, the owner is responsible for succession
613      if (cxq & _LBIT) goto Punt ;
614      const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
615      if (vfy == cxq) break ;
616      cxq = vfy ;
617      // Interference - LockWord changed - Just retry
618      // We can see concurrent interference from contending threads
619      // pushing themselves onto the cxq or from lock-unlock operations.
620      // From the perspective of this thread, EntryList is stable and
621      // the cxq is prepend-only -- the head is volatile but the interior
622      // of the cxq is stable.  In theory if we encounter interference from threads
623      // pushing onto cxq we could simply break off the original cxq suffix and
624      // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
625      // on the high-traffic LockWord variable.   For instance lets say the cxq is "ABCD"
626      // when we first fetch cxq above.  Between the fetch -- where we observed "A"
627      // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
628      // yielding cxq = "PQRABCD".  In this case we could simply set A.ListNext
629      // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
630      // Note too, that it's safe for this thread to traverse the cxq
631      // without taking any special concurrency precautions.
632    }
633
634    // We don't currently reorder the cxq segment as we move it onto
635    // the EntryList, but it might make sense to reverse the order
636    // or perhaps sort by thread priority.  See the comments in
637    // synchronizer.cpp objectMonitor::exit().
638    assert (_EntryList == NULL, "invariant") ;
639    _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
640    assert (List != NULL, "invariant") ;
641    goto WakeOne ;
642  }
643
644  // cxq|EntryList is empty.
645  // w == NULL implies that cxq|EntryList == NULL in the past.
646  // Possible race - rare inopportune interleaving.
647  // A thread could have added itself to cxq since this thread previously checked.
648  // Detect and recover by refetching cxq.
649 Punt:
650  assert (UNS(_OnDeck) == _LBIT, "invariant") ;
651  _OnDeck = NULL ;            // Release inner lock.
652  OrderAccess::storeload();   // Dekker duality - pivot point
653
654  // Resample LockWord/cxq to recover from possible race.
655  // For instance, while this thread T1 held OnDeck, some other thread T2 might
656  // acquire the outer lock.  Another thread T3 might try to acquire the outer
657  // lock, but encounter contention and enqueue itself on cxq.  T2 then drops the
658  // outer lock, but skips succession as this thread T1 still holds OnDeck.
659  // T1 is and remains responsible for ensuring succession of T3.
660  //
661  // Note that we don't need to recheck EntryList, just cxq.
662  // If threads moved onto EntryList since we dropped OnDeck
663  // that implies some other thread forced succession.
664  cxq = _LockWord.FullWord ;
665  if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
666    goto Succession ;         // potential race -- re-run succession
667  }
668  return ;
669}
670
671bool Monitor::notify() {
672  assert (_owner == Thread::current(), "invariant") ;
673  assert (ILocked(), "invariant") ;
674  if (_WaitSet == NULL) return true ;
675  NotifyCount ++ ;
676
677  // Transfer one thread from the WaitSet to the EntryList or cxq.
678  // Currently we just unlink the head of the WaitSet and prepend to the cxq.
679  // And of course we could just unlink it and unpark it, too, but
680  // in that case it'd likely impale itself on the reentry.
681  Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
682  ParkEvent * nfy = _WaitSet ;
683  if (nfy != NULL) {                  // DCL idiom
684    _WaitSet = nfy->ListNext ;
685    assert (nfy->Notified == 0, "invariant") ;
686    // push nfy onto the cxq
687    for (;;) {
688      const intptr_t v = _LockWord.FullWord ;
689      assert ((v & 0xFF) == _LBIT, "invariant") ;
690      nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
691      if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
692      // interference - _LockWord changed -- just retry
693    }
694    // Note that setting Notified before pushing nfy onto the cxq is
695    // also legal and safe, but the safety properties are much more
696    // subtle, so for the sake of code stewardship ...
697    OrderAccess::fence() ;
698    nfy->Notified = 1;
699  }
700  Thread::muxRelease (_WaitLock) ;
701  if (nfy != NULL && (NativeMonitorFlags & 16)) {
702    // Experimental code ... light up the wakee in the hope that this thread (the owner)
703    // will drop the lock just about the time the wakee comes ONPROC.
704    nfy->unpark() ;
705  }
706  assert (ILocked(), "invariant") ;
707  return true ;
708}
709
710// Currently notifyAll() transfers the waiters one-at-a-time from the waitset
711// to the cxq.  This could be done more efficiently with a single bulk en-mass transfer,
712// but in practice notifyAll() for large #s of threads is rare and not time-critical.
713// Beware too, that we invert the order of the waiters.  Lets say that the
714// waitset is "ABCD" and the cxq is "XYZ".  After a notifyAll() the waitset
715// will be empty and the cxq will be "DCBAXYZ".  This is benign, of course.
716
717bool Monitor::notify_all() {
718  assert (_owner == Thread::current(), "invariant") ;
719  assert (ILocked(), "invariant") ;
720  while (_WaitSet != NULL) notify() ;
721  return true ;
722}
723
724int Monitor::IWait (Thread * Self, jlong timo) {
725  assert (ILocked(), "invariant") ;
726
727  // Phases:
728  // 1. Enqueue Self on WaitSet - currently prepend
729  // 2. unlock - drop the outer lock
730  // 3. wait for either notification or timeout
731  // 4. lock - reentry - reacquire the outer lock
732
733  ParkEvent * const ESelf = Self->_MutexEvent ;
734  ESelf->Notified = 0 ;
735  ESelf->reset() ;
736  OrderAccess::fence() ;
737
738  // Add Self to WaitSet
739  // Ideally only the holder of the outer lock would manipulate the WaitSet -
740  // That is, the outer lock would implicitly protect the WaitSet.
741  // But if a thread in wait() encounters a timeout it will need to dequeue itself
742  // from the WaitSet _before it becomes the owner of the lock.  We need to dequeue
743  // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
744  // on both the WaitSet and the EntryList|cxq at the same time..  That is, a thread
745  // on the WaitSet can't be allowed to compete for the lock until it has managed to
746  // unlink its ParkEvent from WaitSet.  Thus the need for WaitLock.
747  // Contention on the WaitLock is minimal.
748  //
749  // Another viable approach would be add another ParkEvent, "WaitEvent" to the
750  // thread class.  The WaitSet would be composed of WaitEvents.  Only the
751  // owner of the outer lock would manipulate the WaitSet.  A thread in wait()
752  // could then compete for the outer lock, and then, if necessary, unlink itself
753  // from the WaitSet only after having acquired the outer lock.  More precisely,
754  // there would be no WaitLock.  A thread in in wait() would enqueue its WaitEvent
755  // on the WaitSet; release the outer lock; wait for either notification or timeout;
756  // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
757  //
758  // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
759  // One set would be for the WaitSet and one for the EntryList.
760  // We could also deconstruct the ParkEvent into a "pure" event and add a
761  // new immortal/TSM "ListElement" class that referred to ParkEvents.
762  // In that case we could have one ListElement on the WaitSet and another
763  // on the EntryList, with both referring to the same pure Event.
764
765  Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
766  ESelf->ListNext = _WaitSet ;
767  _WaitSet = ESelf ;
768  Thread::muxRelease (_WaitLock) ;
769
770  // Release the outer lock
771  // We call IUnlock (RelaxAssert=true) as a thread T1 might
772  // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
773  // and then stall before it can attempt to wake a successor.
774  // Some other thread T2 acquires the lock, and calls notify(), moving
775  // T1 from the WaitSet to the cxq.  T2 then drops the lock.  T1 resumes,
776  // and then finds *itself* on the cxq.  During the course of a normal
777  // IUnlock() call a thread should _never find itself on the EntryList
778  // or cxq, but in the case of wait() it's possible.
779  // See synchronizer.cpp objectMonitor::wait().
780  IUnlock (true) ;
781
782  // Wait for either notification or timeout
783  // Beware that in some circumstances we might propagate
784  // spurious wakeups back to the caller.
785
786  for (;;) {
787    if (ESelf->Notified) break ;
788    int err = ParkCommon (ESelf, timo) ;
789    if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
790  }
791
792  // Prepare for reentry - if necessary, remove ESelf from WaitSet
793  // ESelf can be:
794  // 1. Still on the WaitSet.  This can happen if we exited the loop by timeout.
795  // 2. On the cxq or EntryList
796  // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
797
798  OrderAccess::fence() ;
799  int WasOnWaitSet = 0 ;
800  if (ESelf->Notified == 0) {
801    Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
802    if (ESelf->Notified == 0) {     // DCL idiom
803      assert (_OnDeck != ESelf, "invariant") ;   // can't be both OnDeck and on WaitSet
804      // ESelf is resident on the WaitSet -- unlink it.
805      // A doubly-linked list would be better here so we can unlink in constant-time.
806      // We have to unlink before we potentially recontend as ESelf might otherwise
807      // end up on the cxq|EntryList -- it can't be on two lists at once.
808      ParkEvent * p = _WaitSet ;
809      ParkEvent * q = NULL ;            // classic q chases p
810      while (p != NULL && p != ESelf) {
811        q = p ;
812        p = p->ListNext ;
813      }
814      assert (p == ESelf, "invariant") ;
815      if (p == _WaitSet) {      // found at head
816        assert (q == NULL, "invariant") ;
817        _WaitSet = p->ListNext ;
818      } else {                  // found in interior
819        assert (q->ListNext == p, "invariant") ;
820        q->ListNext = p->ListNext ;
821      }
822      WasOnWaitSet = 1 ;        // We were *not* notified but instead encountered timeout
823    }
824    Thread::muxRelease (_WaitLock) ;
825  }
826
827  // Reentry phase - reacquire the lock
828  if (WasOnWaitSet) {
829    // ESelf was previously on the WaitSet but we just unlinked it above
830    // because of a timeout.  ESelf is not resident on any list and is not OnDeck
831    assert (_OnDeck != ESelf, "invariant") ;
832    ILock (Self) ;
833  } else {
834    // A prior notify() operation moved ESelf from the WaitSet to the cxq.
835    // ESelf is now on the cxq, EntryList or at the OnDeck position.
836    // The following fragment is extracted from Monitor::ILock()
837    for (;;) {
838      if (_OnDeck == ESelf && TrySpin(Self)) break ;
839      ParkCommon (ESelf, 0) ;
840    }
841    assert (_OnDeck == ESelf, "invariant") ;
842    _OnDeck = NULL ;
843  }
844
845  assert (ILocked(), "invariant") ;
846  return WasOnWaitSet != 0 ;        // return true IFF timeout
847}
848
849
850// ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
851// In particular, there are certain types of global lock that may be held
852// by a Java thread while it is blocked at a safepoint but before it has
853// written the _owner field. These locks may be sneakily acquired by the
854// VM thread during a safepoint to avoid deadlocks. Alternatively, one should
855// identify all such locks, and ensure that Java threads never block at
856// safepoints while holding them (_no_safepoint_check_flag). While it
857// seems as though this could increase the time to reach a safepoint
858// (or at least increase the mean, if not the variance), the latter
859// approach might make for a cleaner, more maintainable JVM design.
860//
861// Sneaking is vile and reprehensible and should be excised at the 1st
862// opportunity.  It's possible that the need for sneaking could be obviated
863// as follows.  Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
864// or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
865// (b) stall at the TBIVM exit point as a safepoint is in effect.  Critically,
866// it'll stall at the TBIVM reentry state transition after having acquired the
867// underlying lock, but before having set _owner and having entered the actual
868// critical section.  The lock-sneaking facility leverages that fact and allowed the
869// VM thread to logically acquire locks that had already be physically locked by mutators
870// but where mutators were known blocked by the reentry thread state transition.
871//
872// If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
873// wrapped calls to park(), then we could likely do away with sneaking.  We'd
874// decouple lock acquisition and parking.  The critical invariant  to eliminating
875// sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
876// An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
877// One difficulty with this approach is that the TBIVM wrapper could recurse and
878// call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
879// Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
880//
881// But of course the proper ultimate approach is to avoid schemes that require explicit
882// sneaking or dependence on any any clever invariants or subtle implementation properties
883// of Mutex-Monitor and instead directly address the underlying design flaw.
884
885void Monitor::lock (Thread * Self) {
886#ifdef CHECK_UNHANDLED_OOPS
887  // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
888  // or GC threads.
889  if (Self->is_Java_thread()) {
890    Self->clear_unhandled_oops();
891  }
892#endif // CHECK_UNHANDLED_OOPS
893
894  debug_only(check_prelock_state(Self));
895  assert (_owner != Self              , "invariant") ;
896  assert (_OnDeck != Self->_MutexEvent, "invariant") ;
897
898  if (TryFast()) {
899 Exeunt:
900    assert (ILocked(), "invariant") ;
901    assert (owner() == NULL, "invariant");
902    set_owner (Self);
903    return ;
904  }
905
906  // The lock is contended ...
907
908  bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
909  if (can_sneak && _owner == NULL) {
910    // a java thread has locked the lock but has not entered the
911    // critical region -- let's just pretend we've locked the lock
912    // and go on.  we note this with _snuck so we can also
913    // pretend to unlock when the time comes.
914    _snuck = true;
915    goto Exeunt ;
916  }
917
918  // Try a brief spin to avoid passing thru thread state transition ...
919  if (TrySpin (Self)) goto Exeunt ;
920
921  check_block_state(Self);
922  if (Self->is_Java_thread()) {
923    // Horribile dictu - we suffer through a state transition
924    assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
925    ThreadBlockInVM tbivm ((JavaThread *) Self) ;
926    ILock (Self) ;
927  } else {
928    // Mirabile dictu
929    ILock (Self) ;
930  }
931  goto Exeunt ;
932}
933
934void Monitor::lock() {
935  this->lock(Thread::current());
936}
937
938// Lock without safepoint check - a degenerate variant of lock().
939// Should ONLY be used by safepoint code and other code
940// that is guaranteed not to block while running inside the VM. If this is called with
941// thread state set to be in VM, the safepoint synchronization code will deadlock!
942
943void Monitor::lock_without_safepoint_check (Thread * Self) {
944  assert (_owner != Self, "invariant") ;
945  ILock (Self) ;
946  assert (_owner == NULL, "invariant");
947  set_owner (Self);
948}
949
950void Monitor::lock_without_safepoint_check () {
951  lock_without_safepoint_check (Thread::current()) ;
952}
953
954
955// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
956
957bool Monitor::try_lock() {
958  Thread * const Self = Thread::current();
959  debug_only(check_prelock_state(Self));
960  // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
961
962  // Special case, where all Java threads are stopped.
963  // The lock may have been acquired but _owner is not yet set.
964  // In that case the VM thread can safely grab the lock.
965  // It strikes me this should appear _after the TryLock() fails, below.
966  bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
967  if (can_sneak && _owner == NULL) {
968    set_owner(Self); // Do not need to be atomic, since we are at a safepoint
969    _snuck = true;
970    return true;
971  }
972
973  if (TryLock()) {
974    // We got the lock
975    assert (_owner == NULL, "invariant");
976    set_owner (Self);
977    return true;
978  }
979  return false;
980}
981
982void Monitor::unlock() {
983  assert (_owner  == Thread::current(), "invariant") ;
984  assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
985  set_owner (NULL) ;
986  if (_snuck) {
987    assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
988    _snuck = false;
989    return ;
990  }
991  IUnlock (false) ;
992}
993
994// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
995// jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
996//
997// There's no expectation that JVM_RawMonitors will interoperate properly with the native
998// Mutex-Monitor constructs.  We happen to implement JVM_RawMonitors in terms of
999// native Mutex-Monitors simply as a matter of convenience.  A simple abstraction layer
1000// over a pthread_mutex_t would work equally as well, but require more platform-specific
1001// code -- a "PlatformMutex".  Alternatively, a simply layer over muxAcquire-muxRelease
1002// would work too.
1003//
1004// Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1005// instance available.  Instead, we transiently allocate a ParkEvent on-demand if
1006// we encounter contention.  That ParkEvent remains associated with the thread
1007// until it manages to acquire the lock, at which time we return the ParkEvent
1008// to the global ParkEvent free list.  This is correct and suffices for our purposes.
1009//
1010// Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1011// jvm_raw_lock() didn't have the corresponding test.  I suspect that's an
1012// oversight, but I've replicated the original suspect logic in the new code ...
1013
1014void Monitor::jvm_raw_lock() {
1015  assert(rank() == native, "invariant");
1016
1017  if (TryLock()) {
1018 Exeunt:
1019    assert (ILocked(), "invariant") ;
1020    assert (_owner == NULL, "invariant");
1021    // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
1022    // might return NULL. Don't call set_owner since it will break on an NULL owner
1023    // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1024    _owner = ThreadLocalStorage::thread();
1025    return ;
1026  }
1027
1028  if (TrySpin(NULL)) goto Exeunt ;
1029
1030  // slow-path - apparent contention
1031  // Allocate a ParkEvent for transient use.
1032  // The ParkEvent remains associated with this thread until
1033  // the time the thread manages to acquire the lock.
1034  ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
1035  ESelf->reset() ;
1036  OrderAccess::storeload() ;
1037
1038  // Either Enqueue Self on cxq or acquire the outer lock.
1039  if (AcquireOrPush (ESelf)) {
1040    ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
1041    goto Exeunt ;
1042  }
1043
1044  // At any given time there is at most one ondeck thread.
1045  // ondeck implies not resident on cxq and not resident on EntryList
1046  // Only the OnDeck thread can try to acquire -- contended for -- the lock.
1047  // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1048  for (;;) {
1049    if (_OnDeck == ESelf && TrySpin(NULL)) break ;
1050    ParkCommon (ESelf, 0) ;
1051  }
1052
1053  assert (_OnDeck == ESelf, "invariant") ;
1054  _OnDeck = NULL ;
1055  ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
1056  goto Exeunt ;
1057}
1058
1059void Monitor::jvm_raw_unlock() {
1060  // Nearly the same as Monitor::unlock() ...
1061  // directly set _owner instead of using set_owner(null)
1062  _owner = NULL ;
1063  if (_snuck) {         // ???
1064    assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1065    _snuck = false;
1066    return ;
1067  }
1068  IUnlock(false) ;
1069}
1070
1071bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
1072  Thread * const Self = Thread::current() ;
1073  assert (_owner == Self, "invariant") ;
1074  assert (ILocked(), "invariant") ;
1075
1076  // as_suspend_equivalent logically implies !no_safepoint_check
1077  guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
1078  // !no_safepoint_check logically implies java_thread
1079  guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
1080
1081  #ifdef ASSERT
1082    Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1083    assert(least != this, "Specification of get_least_... call above");
1084    if (least != NULL && least->rank() <= special) {
1085      tty->print("Attempting to wait on monitor %s/%d while holding"
1086                 " lock %s/%d -- possible deadlock",
1087                 name(), rank(), least->name(), least->rank());
1088      assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1089    }
1090  #endif // ASSERT
1091
1092  int wait_status ;
1093  // conceptually set the owner to NULL in anticipation of
1094  // abdicating the lock in wait
1095  set_owner(NULL);
1096  if (no_safepoint_check) {
1097    wait_status = IWait (Self, timeout) ;
1098  } else {
1099    assert (Self->is_Java_thread(), "invariant") ;
1100    JavaThread *jt = (JavaThread *)Self;
1101
1102    // Enter safepoint region - ornate and Rococo ...
1103    ThreadBlockInVM tbivm(jt);
1104    OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1105
1106    if (as_suspend_equivalent) {
1107      jt->set_suspend_equivalent();
1108      // cleared by handle_special_suspend_equivalent_condition() or
1109      // java_suspend_self()
1110    }
1111
1112    wait_status = IWait (Self, timeout) ;
1113
1114    // were we externally suspended while we were waiting?
1115    if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1116      // Our event wait has finished and we own the lock, but
1117      // while we were waiting another thread suspended us. We don't
1118      // want to hold the lock while suspended because that
1119      // would surprise the thread that suspended us.
1120      assert (ILocked(), "invariant") ;
1121      IUnlock (true) ;
1122      jt->java_suspend_self();
1123      ILock (Self) ;
1124      assert (ILocked(), "invariant") ;
1125    }
1126  }
1127
1128  // Conceptually reestablish ownership of the lock.
1129  // The "real" lock -- the LockByte -- was reacquired by IWait().
1130  assert (ILocked(), "invariant") ;
1131  assert (_owner == NULL, "invariant") ;
1132  set_owner (Self) ;
1133  return wait_status != 0 ;          // return true IFF timeout
1134}
1135
1136Monitor::~Monitor() {
1137  assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1138}
1139
1140void Monitor::ClearMonitor (Monitor * m, const char *name) {
1141  m->_owner             = NULL ;
1142  m->_snuck             = false ;
1143  if (name == NULL) {
1144    strcpy(m->_name, "UNKNOWN") ;
1145  } else {
1146    strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1147    m->_name[MONITOR_NAME_LEN - 1] = '\0';
1148  }
1149  m->_LockWord.FullWord = 0 ;
1150  m->_EntryList         = NULL ;
1151  m->_OnDeck            = NULL ;
1152  m->_WaitSet           = NULL ;
1153  m->_WaitLock[0]       = 0 ;
1154}
1155
1156Monitor::Monitor() { ClearMonitor(this); }
1157
1158Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
1159  ClearMonitor (this, name) ;
1160#ifdef ASSERT
1161  _allow_vm_block  = allow_vm_block;
1162  _rank            = Rank ;
1163#endif
1164}
1165
1166Mutex::~Mutex() {
1167  assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1168}
1169
1170Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
1171  ClearMonitor ((Monitor *) this, name) ;
1172#ifdef ASSERT
1173 _allow_vm_block   = allow_vm_block;
1174 _rank             = Rank ;
1175#endif
1176}
1177
1178bool Monitor::owned_by_self() const {
1179  bool ret = _owner == Thread::current();
1180  assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
1181  return ret;
1182}
1183
1184void Monitor::print_on_error(outputStream* st) const {
1185  st->print("[" PTR_FORMAT, this);
1186  st->print("] %s", _name);
1187  st->print(" - owner thread: " PTR_FORMAT, _owner);
1188}
1189
1190
1191
1192
1193// ----------------------------------------------------------------------------------
1194// Non-product code
1195
1196#ifndef PRODUCT
1197void Monitor::print_on(outputStream* st) const {
1198  st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
1199}
1200#endif
1201
1202#ifndef PRODUCT
1203#ifdef ASSERT
1204Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1205  Monitor *res, *tmp;
1206  for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1207    if (tmp->rank() < res->rank()) {
1208      res = tmp;
1209    }
1210  }
1211  if (!SafepointSynchronize::is_at_safepoint()) {
1212    // In this case, we expect the held locks to be
1213    // in increasing rank order (modulo any native ranks)
1214    for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1215      if (tmp->next() != NULL) {
1216        assert(tmp->rank() == Mutex::native ||
1217               tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1218      }
1219    }
1220  }
1221  return res;
1222}
1223
1224Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1225  Monitor *res, *tmp;
1226  for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1227    if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1228      res = tmp;
1229    }
1230  }
1231  if (!SafepointSynchronize::is_at_safepoint()) {
1232    // In this case, we expect the held locks to be
1233    // in increasing rank order (modulo any native ranks)
1234    for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1235      if (tmp->next() != NULL) {
1236        assert(tmp->rank() == Mutex::native ||
1237               tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1238      }
1239    }
1240  }
1241  return res;
1242}
1243
1244
1245bool Monitor::contains(Monitor* locks, Monitor * lock) {
1246  for (; locks != NULL; locks = locks->next()) {
1247    if (locks == lock)
1248      return true;
1249  }
1250  return false;
1251}
1252#endif
1253
1254// Called immediately after lock acquisition or release as a diagnostic
1255// to track the lock-set of the thread and test for rank violations that
1256// might indicate exposure to deadlock.
1257// Rather like an EventListener for _owner (:>).
1258
1259void Monitor::set_owner_implementation(Thread *new_owner) {
1260  // This function is solely responsible for maintaining
1261  // and checking the invariant that threads and locks
1262  // are in a 1/N relation, with some some locks unowned.
1263  // It uses the Mutex::_owner, Mutex::_next, and
1264  // Thread::_owned_locks fields, and no other function
1265  // changes those fields.
1266  // It is illegal to set the mutex from one non-NULL
1267  // owner to another--it must be owned by NULL as an
1268  // intermediate state.
1269
1270  if (new_owner != NULL) {
1271    // the thread is acquiring this lock
1272
1273    assert(new_owner == Thread::current(), "Should I be doing this?");
1274    assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1275    _owner = new_owner; // set the owner
1276
1277    // link "this" into the owned locks list
1278
1279    #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
1280      Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1281                    // Mutex::set_owner_implementation is a friend of Thread
1282
1283      assert(this->rank() >= 0, "bad lock rank");
1284
1285      if (LogMultipleMutexLocking && locks != NULL) {
1286        Events::log("thread " INTPTR_FORMAT " locks %s, already owns %s", new_owner, name(), locks->name());
1287      }
1288
1289      // Deadlock avoidance rules require us to acquire Mutexes only in
1290      // a global total order. For example m1 is the lowest ranked mutex
1291      // that the thread holds and m2 is the mutex the thread is trying
1292      // to acquire, then  deadlock avoidance rules require that the rank
1293      // of m2 be less  than the rank of m1.
1294      // The rank Mutex::native  is an exception in that it is not subject
1295      // to the verification rules.
1296      // Here are some further notes relating to mutex acquisition anomalies:
1297      // . under Solaris, the interrupt lock gets acquired when doing
1298      //   profiling, so any lock could be held.
1299      // . it is also ok to acquire Safepoint_lock at the very end while we
1300      //   already hold Terminator_lock - may happen because of periodic safepoints
1301      if (this->rank() != Mutex::native &&
1302          this->rank() != Mutex::suspend_resume &&
1303          locks != NULL && locks->rank() <= this->rank() &&
1304          !SafepointSynchronize::is_at_safepoint() &&
1305          this != Interrupt_lock && this != ProfileVM_lock &&
1306          !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1307            SafepointSynchronize::is_synchronizing())) {
1308        new_owner->print_owned_locks();
1309        fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
1310                      "possible deadlock", this->name(), this->rank(),
1311                      locks->name(), locks->rank()));
1312      }
1313
1314      this->_next = new_owner->_owned_locks;
1315      new_owner->_owned_locks = this;
1316    #endif
1317
1318  } else {
1319    // the thread is releasing this lock
1320
1321    Thread* old_owner = _owner;
1322    debug_only(_last_owner = old_owner);
1323
1324    assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1325    assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1326
1327    _owner = NULL; // set the owner
1328
1329    #ifdef ASSERT
1330      Monitor *locks = old_owner->owned_locks();
1331
1332      if (LogMultipleMutexLocking && locks != this) {
1333        Events::log("thread " INTPTR_FORMAT " unlocks %s, still owns %s", old_owner, this->name(), locks->name());
1334      }
1335
1336      // remove "this" from the owned locks list
1337
1338      Monitor *prev = NULL;
1339      bool found = false;
1340      for (; locks != NULL; prev = locks, locks = locks->next()) {
1341        if (locks == this) {
1342          found = true;
1343          break;
1344        }
1345      }
1346      assert(found, "Removing a lock not owned");
1347      if (prev == NULL) {
1348        old_owner->_owned_locks = _next;
1349      } else {
1350        prev->_next = _next;
1351      }
1352      _next = NULL;
1353    #endif
1354  }
1355}
1356
1357
1358// Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1359void Monitor::check_prelock_state(Thread *thread) {
1360  assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1361         || rank() == Mutex::special, "wrong thread state for using locks");
1362  if (StrictSafepointChecks) {
1363    if (thread->is_VM_thread() && !allow_vm_block()) {
1364      fatal(err_msg("VM thread using lock %s (not allowed to block on)",
1365                    name()));
1366    }
1367    debug_only(if (rank() != Mutex::special) \
1368      thread->check_for_valid_safepoint_state(false);)
1369  }
1370}
1371
1372void Monitor::check_block_state(Thread *thread) {
1373  if (!_allow_vm_block && thread->is_VM_thread()) {
1374    warning("VM thread blocked on lock");
1375    print();
1376    BREAKPOINT;
1377  }
1378  assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1379}
1380
1381#endif // PRODUCT
1382