mutex.cpp revision 1472:c18cbe5936b8
1
2/*
3 * Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26# include "incls/_precompiled.incl"
27# include "incls/_mutex.cpp.incl"
28
29// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
30//
31// Native Monitor-Mutex locking - theory of operations
32//
33// * Native Monitors are completely unrelated to Java-level monitors,
34//   although the "back-end" slow-path implementations share a common lineage.
35//   See objectMonitor:: in synchronizer.cpp.
36//   Native Monitors do *not* support nesting or recursion but otherwise
37//   they're basically Hoare-flavor monitors.
38//
39// * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
40//   in the _LockWord from zero to non-zero.  Note that the _Owner field
41//   is advisory and is used only to verify that the thread calling unlock()
42//   is indeed the last thread to have acquired the lock.
43//
44// * Contending threads "push" themselves onto the front of the contention
45//   queue -- called the cxq -- with CAS and then spin/park.
46//   The _LockWord contains the LockByte as well as the pointer to the head
47//   of the cxq.  Colocating the LockByte with the cxq precludes certain races.
48//
49// * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
50//   idioms.  We currently use MEMBAR in the uncontended unlock() path, as
51//   MEMBAR often has less latency than CAS.  If warranted, we could switch to
52//   a CAS:0 mode, using timers to close the resultant race, as is done
53//   with Java Monitors in synchronizer.cpp.
54//
55//   See the following for a discussion of the relative cost of atomics (CAS)
56//   MEMBAR, and ways to eliminate such instructions from the common-case paths:
57//   -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
58//   -- http://blogs.sun.com/dave/resource/MustangSync.pdf
59//   -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
60//   -- synchronizer.cpp
61//
62// * Overall goals - desiderata
63//   1. Minimize context switching
64//   2. Minimize lock migration
65//   3. Minimize CPI -- affinity and locality
66//   4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
67//   5. Minimize outer lock hold times
68//   6. Behave gracefully on a loaded system
69//
70// * Thread flow and list residency:
71//
72//   Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
73//   [..resident on monitor list..]
74//   [...........contending..................]
75//
76//   -- The contention queue (cxq) contains recently-arrived threads (RATs).
77//      Threads on the cxq eventually drain into the EntryList.
78//   -- Invariant: a thread appears on at most one list -- cxq, EntryList
79//      or WaitSet -- at any one time.
80//   -- For a given monitor there can be at most one "OnDeck" thread at any
81//      given time but if needbe this particular invariant could be relaxed.
82//
83// * The WaitSet and EntryList linked lists are composed of ParkEvents.
84//   I use ParkEvent instead of threads as ParkEvents are immortal and
85//   type-stable, meaning we can safely unpark() a possibly stale
86//   list element in the unlock()-path.  (That's benign).
87//
88// * Succession policy - providing for progress:
89//
90//   As necessary, the unlock()ing thread identifies, unlinks, and unparks
91//   an "heir presumptive" tentative successor thread from the EntryList.
92//   This becomes the so-called "OnDeck" thread, of which there can be only
93//   one at any given time for a given monitor.  The wakee will recontend
94//   for ownership of monitor.
95//
96//   Succession is provided for by a policy of competitive handoff.
97//   The exiting thread does _not_ grant or pass ownership to the
98//   successor thread.  (This is also referred to as "handoff" succession").
99//   Instead the exiting thread releases ownership and possibly wakes
100//   a successor, so the successor can (re)compete for ownership of the lock.
101//
102//   Competitive handoff provides excellent overall throughput at the expense
103//   of short-term fairness.  If fairness is a concern then one remedy might
104//   be to add an AcquireCounter field to the monitor.  After a thread acquires
105//   the lock it will decrement the AcquireCounter field.  When the count
106//   reaches 0 the thread would reset the AcquireCounter variable, abdicate
107//   the lock directly to some thread on the EntryList, and then move itself to the
108//   tail of the EntryList.
109//
110//   But in practice most threads engage or otherwise participate in resource
111//   bounded producer-consumer relationships, so lock domination is not usually
112//   a practical concern.  Recall too, that in general it's easier to construct
113//   a fair lock from a fast lock, but not vice-versa.
114//
115// * The cxq can have multiple concurrent "pushers" but only one concurrent
116//   detaching thread.  This mechanism is immune from the ABA corruption.
117//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
118//   We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
119//   thread constraint.
120//
121// * Taken together, the cxq and the EntryList constitute or form a
122//   single logical queue of threads stalled trying to acquire the lock.
123//   We use two distinct lists to reduce heat on the list ends.
124//   Threads in lock() enqueue onto cxq while threads in unlock() will
125//   dequeue from the EntryList.  (c.f. Michael Scott's "2Q" algorithm).
126//   A key desideratum is to minimize queue & monitor metadata manipulation
127//   that occurs while holding the "outer" monitor lock -- that is, we want to
128//   minimize monitor lock holds times.
129//
130//   The EntryList is ordered by the prevailing queue discipline and
131//   can be organized in any convenient fashion, such as a doubly-linked list or
132//   a circular doubly-linked list.  If we need a priority queue then something akin
133//   to Solaris' sleepq would work nicely.  Viz.,
134//   -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
135//   -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
136//   Queue discipline is enforced at ::unlock() time, when the unlocking thread
137//   drains the cxq into the EntryList, and orders or reorders the threads on the
138//   EntryList accordingly.
139//
140//   Barring "lock barging", this mechanism provides fair cyclic ordering,
141//   somewhat similar to an elevator-scan.
142//
143// * OnDeck
144//   --  For a given monitor there can be at most one OnDeck thread at any given
145//       instant.  The OnDeck thread is contending for the lock, but has been
146//       unlinked from the EntryList and cxq by some previous unlock() operations.
147//       Once a thread has been designated the OnDeck thread it will remain so
148//       until it manages to acquire the lock -- being OnDeck is a stable property.
149//   --  Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
150//   --  OnDeck also serves as an "inner lock" as follows.  Threads in unlock() will, after
151//       having cleared the LockByte and dropped the outer lock,  attempt to "trylock"
152//       OnDeck by CASing the field from null to non-null.  If successful, that thread
153//       is then responsible for progress and succession and can use CAS to detach and
154//       drain the cxq into the EntryList.  By convention, only this thread, the holder of
155//       the OnDeck inner lock, can manipulate the EntryList or detach and drain the
156//       RATs on the cxq into the EntryList.  This avoids ABA corruption on the cxq as
157//       we allow multiple concurrent "push" operations but restrict detach concurrency
158//       to at most one thread.  Having selected and detached a successor, the thread then
159//       changes the OnDeck to refer to that successor, and then unparks the successor.
160//       That successor will eventually acquire the lock and clear OnDeck.  Beware
161//       that the OnDeck usage as a lock is asymmetric.  A thread in unlock() transiently
162//       "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
163//       and then the successor eventually "drops" OnDeck.  Note that there's never
164//       any sense of contention on the inner lock, however.  Threads never contend
165//       or wait for the inner lock.
166//   --  OnDeck provides for futile wakeup throttling a described in section 3.3 of
167//       See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
168//       In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
169//       TState fields found in Java-level objectMonitors.  (See synchronizer.cpp).
170//
171// * Waiting threads reside on the WaitSet list -- wait() puts
172//   the caller onto the WaitSet.  Notify() or notifyAll() simply
173//   transfers threads from the WaitSet to either the EntryList or cxq.
174//   Subsequent unlock() operations will eventually unpark the notifyee.
175//   Unparking a notifee in notify() proper is inefficient - if we were to do so
176//   it's likely the notifyee would simply impale itself on the lock held
177//   by the notifier.
178//
179// * The mechanism is obstruction-free in that if the holder of the transient
180//   OnDeck lock in unlock() is preempted or otherwise stalls, other threads
181//   can still acquire and release the outer lock and continue to make progress.
182//   At worst, waking of already blocked contending threads may be delayed,
183//   but nothing worse.  (We only use "trylock" operations on the inner OnDeck
184//   lock).
185//
186// * Note that thread-local storage must be initialized before a thread
187//   uses Native monitors or mutexes.  The native monitor-mutex subsystem
188//   depends on Thread::current().
189//
190// * The monitor synchronization subsystem avoids the use of native
191//   synchronization primitives except for the narrow platform-specific
192//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
193//   the semantics of park-unpark.  Put another way, this monitor implementation
194//   depends only on atomic operations and park-unpark.  The monitor subsystem
195//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
196//   underlying OS manages the READY<->RUN transitions.
197//
198// * The memory consistency model provide by lock()-unlock() is at least as
199//   strong or stronger than the Java Memory model defined by JSR-133.
200//   That is, we guarantee at least entry consistency, if not stronger.
201//   See http://g.oswego.edu/dl/jmm/cookbook.html.
202//
203// * Thread:: currently contains a set of purpose-specific ParkEvents:
204//   _MutexEvent, _ParkEvent, etc.  A better approach might be to do away with
205//   the purpose-specific ParkEvents and instead implement a general per-thread
206//   stack of available ParkEvents which we could provision on-demand.  The
207//   stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
208//   and ::Release().  A thread would simply pop an element from the local stack before it
209//   enqueued or park()ed.  When the contention was over the thread would
210//   push the no-longer-needed ParkEvent back onto its stack.
211//
212// * A slightly reduced form of ILock() and IUnlock() have been partially
213//   model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
214//   It'd be interesting to see if TLA/TLC could be useful as well.
215//
216// * Mutex-Monitor is a low-level "leaf" subsystem.  That is, the monitor
217//   code should never call other code in the JVM that might itself need to
218//   acquire monitors or mutexes.  That's true *except* in the case of the
219//   ThreadBlockInVM state transition wrappers.  The ThreadBlockInVM DTOR handles
220//   mutator reentry (ingress) by checking for a pending safepoint in which case it will
221//   call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
222//   In that particular case a call to lock() for a given Monitor can end up recursively
223//   calling lock() on another monitor.   While distasteful, this is largely benign
224//   as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
225//
226//   It's unfortunate that native mutexes and thread state transitions were convolved.
227//   They're really separate concerns and should have remained that way.  Melding
228//   them together was facile -- a bit too facile.   The current implementation badly
229//   conflates the two concerns.
230//
231// * TODO-FIXME:
232//
233//   -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
234//      We should also add DTRACE probes in the ParkEvent subsystem for
235//      Park-entry, Park-exit, and Unpark.
236//
237//   -- We have an excess of mutex-like constructs in the JVM, namely:
238//      1. objectMonitors for Java-level synchronization (synchronizer.cpp)
239//      2. low-level muxAcquire and muxRelease
240//      3. low-level spinAcquire and spinRelease
241//      4. native Mutex:: and Monitor::
242//      5. jvm_raw_lock() and _unlock()
243//      6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
244//         similar name.
245//
246// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
247
248
249// CASPTR() uses the canonical argument order that dominates in the literature.
250// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
251
252#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
253#define UNS(x) (uintptr_t(x))
254#define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
255
256// Simplistic low-quality Marsaglia SHIFT-XOR RNG.
257// Bijective except for the trailing mask operation.
258// Useful for spin loops as the compiler can't optimize it away.
259
260static inline jint MarsagliaXORV (jint x) {
261  if (x == 0) x = 1|os::random() ;
262  x ^= x << 6;
263  x ^= ((unsigned)x) >> 21;
264  x ^= x << 7 ;
265  return x & 0x7FFFFFFF ;
266}
267
268static inline jint MarsagliaXOR (jint * const a) {
269  jint x = *a ;
270  if (x == 0) x = UNS(a)|1 ;
271  x ^= x << 6;
272  x ^= ((unsigned)x) >> 21;
273  x ^= x << 7 ;
274  *a = x ;
275  return x & 0x7FFFFFFF ;
276}
277
278static int Stall (int its) {
279  static volatile jint rv = 1 ;
280  volatile int OnFrame = 0 ;
281  jint v = rv ^ UNS(OnFrame) ;
282  while (--its >= 0) {
283    v = MarsagliaXORV (v) ;
284  }
285  // Make this impossible for the compiler to optimize away,
286  // but (mostly) avoid W coherency sharing on MP systems.
287  if (v == 0x12345) rv = v ;
288  return v ;
289}
290
291int Monitor::TryLock () {
292  intptr_t v = _LockWord.FullWord ;
293  for (;;) {
294    if ((v & _LBIT) != 0) return 0 ;
295    const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
296    if (v == u) return 1 ;
297    v = u ;
298  }
299}
300
301int Monitor::TryFast () {
302  // Optimistic fast-path form ...
303  // Fast-path attempt for the common uncontended case.
304  // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
305  intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ;  // agro ...
306  if (v == 0) return 1 ;
307
308  for (;;) {
309    if ((v & _LBIT) != 0) return 0 ;
310    const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
311    if (v == u) return 1 ;
312    v = u ;
313  }
314}
315
316int Monitor::ILocked () {
317  const intptr_t w = _LockWord.FullWord & 0xFF ;
318  assert (w == 0 || w == _LBIT, "invariant") ;
319  return w == _LBIT ;
320}
321
322// Polite TATAS spinlock with exponential backoff - bounded spin.
323// Ideally we'd use processor cycles, time or vtime to control
324// the loop, but we currently use iterations.
325// All the constants within were derived empirically but work over
326// over the spectrum of J2SE reference platforms.
327// On Niagara-class systems the back-off is unnecessary but
328// is relatively harmless.  (At worst it'll slightly retard
329// acquisition times).  The back-off is critical for older SMP systems
330// where constant fetching of the LockWord would otherwise impair
331// scalability.
332//
333// Clamp spinning at approximately 1/2 of a context-switch round-trip.
334// See synchronizer.cpp for details and rationale.
335
336int Monitor::TrySpin (Thread * const Self) {
337  if (TryLock())    return 1 ;
338  if (!os::is_MP()) return 0 ;
339
340  int Probes  = 0 ;
341  int Delay   = 0 ;
342  int Steps   = 0 ;
343  int SpinMax = NativeMonitorSpinLimit ;
344  int flgs    = NativeMonitorFlags ;
345  for (;;) {
346    intptr_t v = _LockWord.FullWord;
347    if ((v & _LBIT) == 0) {
348      if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
349        return 1 ;
350      }
351      continue ;
352    }
353
354    if ((flgs & 8) == 0) {
355      SpinPause () ;
356    }
357
358    // Periodically increase Delay -- variable Delay form
359    // conceptually: delay *= 1 + 1/Exponent
360    ++ Probes;
361    if (Probes > SpinMax) return 0 ;
362
363    if ((Probes & 0x7) == 0) {
364      Delay = ((Delay << 1)|1) & 0x7FF ;
365      // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
366    }
367
368    if (flgs & 2) continue ;
369
370    // Consider checking _owner's schedctl state, if OFFPROC abort spin.
371    // If the owner is OFFPROC then it's unlike that the lock will be dropped
372    // in a timely fashion, which suggests that spinning would not be fruitful
373    // or profitable.
374
375    // Stall for "Delay" time units - iterations in the current implementation.
376    // Avoid generating coherency traffic while stalled.
377    // Possible ways to delay:
378    //   PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
379    //   wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
380    // Note that on Niagara-class systems we want to minimize STs in the
381    // spin loop.  N1 and brethren write-around the L1$ over the xbar into the L2$.
382    // Furthermore, they don't have a W$ like traditional SPARC processors.
383    // We currently use a Marsaglia Shift-Xor RNG loop.
384    Steps += Delay ;
385    if (Self != NULL) {
386      jint rv = Self->rng[0] ;
387      for (int k = Delay ; --k >= 0; ) {
388        rv = MarsagliaXORV (rv) ;
389        if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
390      }
391      Self->rng[0] = rv ;
392    } else {
393      Stall (Delay) ;
394    }
395  }
396}
397
398static int ParkCommon (ParkEvent * ev, jlong timo) {
399  // Diagnostic support - periodically unwedge blocked threads
400  intx nmt = NativeMonitorTimeout ;
401  if (nmt > 0 && (nmt < timo || timo <= 0)) {
402     timo = nmt ;
403  }
404  int err = OS_OK ;
405  if (0 == timo) {
406    ev->park() ;
407  } else {
408    err = ev->park(timo) ;
409  }
410  return err ;
411}
412
413inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
414  intptr_t v = _LockWord.FullWord ;
415  for (;;) {
416    if ((v & _LBIT) == 0) {
417      const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
418      if (u == v) return 1 ;        // indicate acquired
419      v = u ;
420    } else {
421      // Anticipate success ...
422      ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
423      const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
424      if (u == v) return 0 ;        // indicate pushed onto cxq
425      v = u ;
426    }
427    // Interference - LockWord change - just retry
428  }
429}
430
431// ILock and IWait are the lowest level primitive internal blocking
432// synchronization functions.  The callers of IWait and ILock must have
433// performed any needed state transitions beforehand.
434// IWait and ILock may directly call park() without any concern for thread state.
435// Note that ILock and IWait do *not* access _owner.
436// _owner is a higher-level logical concept.
437
438void Monitor::ILock (Thread * Self) {
439  assert (_OnDeck != Self->_MutexEvent, "invariant") ;
440
441  if (TryFast()) {
442 Exeunt:
443    assert (ILocked(), "invariant") ;
444    return ;
445  }
446
447  ParkEvent * const ESelf = Self->_MutexEvent ;
448  assert (_OnDeck != ESelf, "invariant") ;
449
450  // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
451  // Synchronizer.cpp uses a similar optimization.
452  if (TrySpin (Self)) goto Exeunt ;
453
454  // Slow-path - the lock is contended.
455  // Either Enqueue Self on cxq or acquire the outer lock.
456  // LockWord encoding = (cxq,LOCKBYTE)
457  ESelf->reset() ;
458  OrderAccess::fence() ;
459
460  // Optional optimization ... try barging on the inner lock
461  if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
462    goto OnDeck_LOOP ;
463  }
464
465  if (AcquireOrPush (ESelf)) goto Exeunt ;
466
467  // At any given time there is at most one ondeck thread.
468  // ondeck implies not resident on cxq and not resident on EntryList
469  // Only the OnDeck thread can try to acquire -- contended for -- the lock.
470  // CONSIDER: use Self->OnDeck instead of m->OnDeck.
471  // Deschedule Self so that others may run.
472  while (_OnDeck != ESelf) {
473    ParkCommon (ESelf, 0) ;
474  }
475
476  // Self is now in the ONDECK position and will remain so until it
477  // manages to acquire the lock.
478 OnDeck_LOOP:
479  for (;;) {
480    assert (_OnDeck == ESelf, "invariant") ;
481    if (TrySpin (Self)) break ;
482    // CONSIDER: if ESelf->TryPark() && TryLock() break ...
483    // It's probably wise to spin only if we *actually* blocked
484    // CONSIDER: check the lockbyte, if it remains set then
485    // preemptively drain the cxq into the EntryList.
486    // The best place and time to perform queue operations -- lock metadata --
487    // is _before having acquired the outer lock, while waiting for the lock to drop.
488    ParkCommon (ESelf, 0) ;
489  }
490
491  assert (_OnDeck == ESelf, "invariant") ;
492  _OnDeck = NULL ;
493
494  // Note that we current drop the inner lock (clear OnDeck) in the slow-path
495  // epilog immediately after having acquired the outer lock.
496  // But instead we could consider the following optimizations:
497  // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
498  //    This might avoid potential reacquisition of the inner lock in IUlock().
499  // B. While still holding the inner lock, attempt to opportunistically select
500  //    and unlink the next ONDECK thread from the EntryList.
501  //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
502  //    It's critical that the select-and-unlink operation run in constant-time as
503  //    it executes when holding the outer lock and may artificially increase the
504  //    effective length of the critical section.
505  // Note that (A) and (B) are tantamount to succession by direct handoff for
506  // the inner lock.
507  goto Exeunt ;
508}
509
510void Monitor::IUnlock (bool RelaxAssert) {
511  assert (ILocked(), "invariant") ;
512  _LockWord.Bytes[_LSBINDEX] = 0 ;       // drop outer lock
513  OrderAccess::storeload ();
514  ParkEvent * const w = _OnDeck ;
515  assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
516  if (w != NULL) {
517    // Either we have a valid ondeck thread or ondeck is transiently "locked"
518    // by some exiting thread as it arranges for succession.  The LSBit of
519    // OnDeck allows us to discriminate two cases.  If the latter, the
520    // responsibility for progress and succession lies with that other thread.
521    // For good performance, we also depend on the fact that redundant unpark()
522    // operations are cheap.  That is, repeated Unpark()ing of the ONDECK thread
523    // is inexpensive.  This approach provides implicit futile wakeup throttling.
524    // Note that the referent "w" might be stale with respect to the lock.
525    // In that case the following unpark() is harmless and the worst that'll happen
526    // is a spurious return from a park() operation.  Critically, if "w" _is stale,
527    // then progress is known to have occurred as that means the thread associated
528    // with "w" acquired the lock.  In that case this thread need take no further
529    // action to guarantee progress.
530    if ((UNS(w) & _LBIT) == 0) w->unpark() ;
531    return ;
532  }
533
534  intptr_t cxq = _LockWord.FullWord ;
535  if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
536    return ;      // normal fast-path exit - cxq and EntryList both empty
537  }
538  if (cxq & _LBIT) {
539    // Optional optimization ...
540    // Some other thread acquired the lock in the window since this
541    // thread released it.  Succession is now that thread's responsibility.
542    return ;
543  }
544
545 Succession:
546  // Slow-path exit - this thread must ensure succession and progress.
547  // OnDeck serves as lock to protect cxq and EntryList.
548  // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
549  // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
550  // but only one concurrent consumer (detacher of RATs).
551  // Consider protecting this critical section with schedctl on Solaris.
552  // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
553  // picks a successor and marks that thread as OnDeck.  That successor
554  // thread will then clear OnDeck once it eventually acquires the outer lock.
555  if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
556    return ;
557  }
558
559  ParkEvent * List = _EntryList ;
560  if (List != NULL) {
561    // Transfer the head of the EntryList to the OnDeck position.
562    // Once OnDeck, a thread stays OnDeck until it acquires the lock.
563    // For a given lock there is at most OnDeck thread at any one instant.
564   WakeOne:
565    assert (List == _EntryList, "invariant") ;
566    ParkEvent * const w = List ;
567    assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
568    _EntryList = w->ListNext ;
569    // as a diagnostic measure consider setting w->_ListNext = BAD
570    assert (UNS(_OnDeck) == _LBIT, "invariant") ;
571    _OnDeck = w ;           // pass OnDeck to w.
572                            // w will clear OnDeck once it acquires the outer lock
573
574    // Another optional optimization ...
575    // For heavily contended locks it's not uncommon that some other
576    // thread acquired the lock while this thread was arranging succession.
577    // Try to defer the unpark() operation - Delegate the responsibility
578    // for unpark()ing the OnDeck thread to the current or subsequent owners
579    // That is, the new owner is responsible for unparking the OnDeck thread.
580    OrderAccess::storeload() ;
581    cxq = _LockWord.FullWord ;
582    if (cxq & _LBIT) return ;
583
584    w->unpark() ;
585    return ;
586  }
587
588  cxq = _LockWord.FullWord ;
589  if ((cxq & ~_LBIT) != 0) {
590    // The EntryList is empty but the cxq is populated.
591    // drain RATs from cxq into EntryList
592    // Detach RATs segment with CAS and then merge into EntryList
593    for (;;) {
594      // optional optimization - if locked, the owner is responsible for succession
595      if (cxq & _LBIT) goto Punt ;
596      const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
597      if (vfy == cxq) break ;
598      cxq = vfy ;
599      // Interference - LockWord changed - Just retry
600      // We can see concurrent interference from contending threads
601      // pushing themselves onto the cxq or from lock-unlock operations.
602      // From the perspective of this thread, EntryList is stable and
603      // the cxq is prepend-only -- the head is volatile but the interior
604      // of the cxq is stable.  In theory if we encounter interference from threads
605      // pushing onto cxq we could simply break off the original cxq suffix and
606      // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
607      // on the high-traffic LockWord variable.   For instance lets say the cxq is "ABCD"
608      // when we first fetch cxq above.  Between the fetch -- where we observed "A"
609      // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
610      // yielding cxq = "PQRABCD".  In this case we could simply set A.ListNext
611      // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
612      // Note too, that it's safe for this thread to traverse the cxq
613      // without taking any special concurrency precautions.
614    }
615
616    // We don't currently reorder the cxq segment as we move it onto
617    // the EntryList, but it might make sense to reverse the order
618    // or perhaps sort by thread priority.  See the comments in
619    // synchronizer.cpp objectMonitor::exit().
620    assert (_EntryList == NULL, "invariant") ;
621    _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
622    assert (List != NULL, "invariant") ;
623    goto WakeOne ;
624  }
625
626  // cxq|EntryList is empty.
627  // w == NULL implies that cxq|EntryList == NULL in the past.
628  // Possible race - rare inopportune interleaving.
629  // A thread could have added itself to cxq since this thread previously checked.
630  // Detect and recover by refetching cxq.
631 Punt:
632  assert (UNS(_OnDeck) == _LBIT, "invariant") ;
633  _OnDeck = NULL ;            // Release inner lock.
634  OrderAccess::storeload();   // Dekker duality - pivot point
635
636  // Resample LockWord/cxq to recover from possible race.
637  // For instance, while this thread T1 held OnDeck, some other thread T2 might
638  // acquire the outer lock.  Another thread T3 might try to acquire the outer
639  // lock, but encounter contention and enqueue itself on cxq.  T2 then drops the
640  // outer lock, but skips succession as this thread T1 still holds OnDeck.
641  // T1 is and remains responsible for ensuring succession of T3.
642  //
643  // Note that we don't need to recheck EntryList, just cxq.
644  // If threads moved onto EntryList since we dropped OnDeck
645  // that implies some other thread forced succession.
646  cxq = _LockWord.FullWord ;
647  if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
648    goto Succession ;         // potential race -- re-run succession
649  }
650  return ;
651}
652
653bool Monitor::notify() {
654  assert (_owner == Thread::current(), "invariant") ;
655  assert (ILocked(), "invariant") ;
656  if (_WaitSet == NULL) return true ;
657  NotifyCount ++ ;
658
659  // Transfer one thread from the WaitSet to the EntryList or cxq.
660  // Currently we just unlink the head of the WaitSet and prepend to the cxq.
661  // And of course we could just unlink it and unpark it, too, but
662  // in that case it'd likely impale itself on the reentry.
663  Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
664  ParkEvent * nfy = _WaitSet ;
665  if (nfy != NULL) {                  // DCL idiom
666    _WaitSet = nfy->ListNext ;
667    assert (nfy->Notified == 0, "invariant") ;
668    // push nfy onto the cxq
669    for (;;) {
670      const intptr_t v = _LockWord.FullWord ;
671      assert ((v & 0xFF) == _LBIT, "invariant") ;
672      nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
673      if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
674      // interference - _LockWord changed -- just retry
675    }
676    // Note that setting Notified before pushing nfy onto the cxq is
677    // also legal and safe, but the safety properties are much more
678    // subtle, so for the sake of code stewardship ...
679    OrderAccess::fence() ;
680    nfy->Notified = 1;
681  }
682  Thread::muxRelease (_WaitLock) ;
683  if (nfy != NULL && (NativeMonitorFlags & 16)) {
684    // Experimental code ... light up the wakee in the hope that this thread (the owner)
685    // will drop the lock just about the time the wakee comes ONPROC.
686    nfy->unpark() ;
687  }
688  assert (ILocked(), "invariant") ;
689  return true ;
690}
691
692// Currently notifyAll() transfers the waiters one-at-a-time from the waitset
693// to the cxq.  This could be done more efficiently with a single bulk en-mass transfer,
694// but in practice notifyAll() for large #s of threads is rare and not time-critical.
695// Beware too, that we invert the order of the waiters.  Lets say that the
696// waitset is "ABCD" and the cxq is "XYZ".  After a notifyAll() the waitset
697// will be empty and the cxq will be "DCBAXYZ".  This is benign, of course.
698
699bool Monitor::notify_all() {
700  assert (_owner == Thread::current(), "invariant") ;
701  assert (ILocked(), "invariant") ;
702  while (_WaitSet != NULL) notify() ;
703  return true ;
704}
705
706int Monitor::IWait (Thread * Self, jlong timo) {
707  assert (ILocked(), "invariant") ;
708
709  // Phases:
710  // 1. Enqueue Self on WaitSet - currently prepend
711  // 2. unlock - drop the outer lock
712  // 3. wait for either notification or timeout
713  // 4. lock - reentry - reacquire the outer lock
714
715  ParkEvent * const ESelf = Self->_MutexEvent ;
716  ESelf->Notified = 0 ;
717  ESelf->reset() ;
718  OrderAccess::fence() ;
719
720  // Add Self to WaitSet
721  // Ideally only the holder of the outer lock would manipulate the WaitSet -
722  // That is, the outer lock would implicitly protect the WaitSet.
723  // But if a thread in wait() encounters a timeout it will need to dequeue itself
724  // from the WaitSet _before it becomes the owner of the lock.  We need to dequeue
725  // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
726  // on both the WaitSet and the EntryList|cxq at the same time..  That is, a thread
727  // on the WaitSet can't be allowed to compete for the lock until it has managed to
728  // unlink its ParkEvent from WaitSet.  Thus the need for WaitLock.
729  // Contention on the WaitLock is minimal.
730  //
731  // Another viable approach would be add another ParkEvent, "WaitEvent" to the
732  // thread class.  The WaitSet would be composed of WaitEvents.  Only the
733  // owner of the outer lock would manipulate the WaitSet.  A thread in wait()
734  // could then compete for the outer lock, and then, if necessary, unlink itself
735  // from the WaitSet only after having acquired the outer lock.  More precisely,
736  // there would be no WaitLock.  A thread in in wait() would enqueue its WaitEvent
737  // on the WaitSet; release the outer lock; wait for either notification or timeout;
738  // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
739  //
740  // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
741  // One set would be for the WaitSet and one for the EntryList.
742  // We could also deconstruct the ParkEvent into a "pure" event and add a
743  // new immortal/TSM "ListElement" class that referred to ParkEvents.
744  // In that case we could have one ListElement on the WaitSet and another
745  // on the EntryList, with both referring to the same pure Event.
746
747  Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
748  ESelf->ListNext = _WaitSet ;
749  _WaitSet = ESelf ;
750  Thread::muxRelease (_WaitLock) ;
751
752  // Release the outer lock
753  // We call IUnlock (RelaxAssert=true) as a thread T1 might
754  // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
755  // and then stall before it can attempt to wake a successor.
756  // Some other thread T2 acquires the lock, and calls notify(), moving
757  // T1 from the WaitSet to the cxq.  T2 then drops the lock.  T1 resumes,
758  // and then finds *itself* on the cxq.  During the course of a normal
759  // IUnlock() call a thread should _never find itself on the EntryList
760  // or cxq, but in the case of wait() it's possible.
761  // See synchronizer.cpp objectMonitor::wait().
762  IUnlock (true) ;
763
764  // Wait for either notification or timeout
765  // Beware that in some circumstances we might propagate
766  // spurious wakeups back to the caller.
767
768  for (;;) {
769    if (ESelf->Notified) break ;
770    int err = ParkCommon (ESelf, timo) ;
771    if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
772  }
773
774  // Prepare for reentry - if necessary, remove ESelf from WaitSet
775  // ESelf can be:
776  // 1. Still on the WaitSet.  This can happen if we exited the loop by timeout.
777  // 2. On the cxq or EntryList
778  // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
779
780  OrderAccess::fence() ;
781  int WasOnWaitSet = 0 ;
782  if (ESelf->Notified == 0) {
783    Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
784    if (ESelf->Notified == 0) {     // DCL idiom
785      assert (_OnDeck != ESelf, "invariant") ;   // can't be both OnDeck and on WaitSet
786      // ESelf is resident on the WaitSet -- unlink it.
787      // A doubly-linked list would be better here so we can unlink in constant-time.
788      // We have to unlink before we potentially recontend as ESelf might otherwise
789      // end up on the cxq|EntryList -- it can't be on two lists at once.
790      ParkEvent * p = _WaitSet ;
791      ParkEvent * q = NULL ;            // classic q chases p
792      while (p != NULL && p != ESelf) {
793        q = p ;
794        p = p->ListNext ;
795      }
796      assert (p == ESelf, "invariant") ;
797      if (p == _WaitSet) {      // found at head
798        assert (q == NULL, "invariant") ;
799        _WaitSet = p->ListNext ;
800      } else {                  // found in interior
801        assert (q->ListNext == p, "invariant") ;
802        q->ListNext = p->ListNext ;
803      }
804      WasOnWaitSet = 1 ;        // We were *not* notified but instead encountered timeout
805    }
806    Thread::muxRelease (_WaitLock) ;
807  }
808
809  // Reentry phase - reacquire the lock
810  if (WasOnWaitSet) {
811    // ESelf was previously on the WaitSet but we just unlinked it above
812    // because of a timeout.  ESelf is not resident on any list and is not OnDeck
813    assert (_OnDeck != ESelf, "invariant") ;
814    ILock (Self) ;
815  } else {
816    // A prior notify() operation moved ESelf from the WaitSet to the cxq.
817    // ESelf is now on the cxq, EntryList or at the OnDeck position.
818    // The following fragment is extracted from Monitor::ILock()
819    for (;;) {
820      if (_OnDeck == ESelf && TrySpin(Self)) break ;
821      ParkCommon (ESelf, 0) ;
822    }
823    assert (_OnDeck == ESelf, "invariant") ;
824    _OnDeck = NULL ;
825  }
826
827  assert (ILocked(), "invariant") ;
828  return WasOnWaitSet != 0 ;        // return true IFF timeout
829}
830
831
832// ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
833// In particular, there are certain types of global lock that may be held
834// by a Java thread while it is blocked at a safepoint but before it has
835// written the _owner field. These locks may be sneakily acquired by the
836// VM thread during a safepoint to avoid deadlocks. Alternatively, one should
837// identify all such locks, and ensure that Java threads never block at
838// safepoints while holding them (_no_safepoint_check_flag). While it
839// seems as though this could increase the time to reach a safepoint
840// (or at least increase the mean, if not the variance), the latter
841// approach might make for a cleaner, more maintainable JVM design.
842//
843// Sneaking is vile and reprehensible and should be excised at the 1st
844// opportunity.  It's possible that the need for sneaking could be obviated
845// as follows.  Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
846// or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
847// (b) stall at the TBIVM exit point as a safepoint is in effect.  Critically,
848// it'll stall at the TBIVM reentry state transition after having acquired the
849// underlying lock, but before having set _owner and having entered the actual
850// critical section.  The lock-sneaking facility leverages that fact and allowed the
851// VM thread to logically acquire locks that had already be physically locked by mutators
852// but where mutators were known blocked by the reentry thread state transition.
853//
854// If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
855// wrapped calls to park(), then we could likely do away with sneaking.  We'd
856// decouple lock acquisition and parking.  The critical invariant  to eliminating
857// sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
858// An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
859// One difficulty with this approach is that the TBIVM wrapper could recurse and
860// call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
861// Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
862//
863// But of course the proper ultimate approach is to avoid schemes that require explicit
864// sneaking or dependence on any any clever invariants or subtle implementation properties
865// of Mutex-Monitor and instead directly address the underlying design flaw.
866
867void Monitor::lock (Thread * Self) {
868#ifdef CHECK_UNHANDLED_OOPS
869  // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
870  // or GC threads.
871  if (Self->is_Java_thread()) {
872    Self->clear_unhandled_oops();
873  }
874#endif // CHECK_UNHANDLED_OOPS
875
876  debug_only(check_prelock_state(Self));
877  assert (_owner != Self              , "invariant") ;
878  assert (_OnDeck != Self->_MutexEvent, "invariant") ;
879
880  if (TryFast()) {
881 Exeunt:
882    assert (ILocked(), "invariant") ;
883    assert (owner() == NULL, "invariant");
884    set_owner (Self);
885    return ;
886  }
887
888  // The lock is contended ...
889
890  bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
891  if (can_sneak && _owner == NULL) {
892    // a java thread has locked the lock but has not entered the
893    // critical region -- let's just pretend we've locked the lock
894    // and go on.  we note this with _snuck so we can also
895    // pretend to unlock when the time comes.
896    _snuck = true;
897    goto Exeunt ;
898  }
899
900  // Try a brief spin to avoid passing thru thread state transition ...
901  if (TrySpin (Self)) goto Exeunt ;
902
903  check_block_state(Self);
904  if (Self->is_Java_thread()) {
905    // Horribile dictu - we suffer through a state transition
906    assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
907    ThreadBlockInVM tbivm ((JavaThread *) Self) ;
908    ILock (Self) ;
909  } else {
910    // Mirabile dictu
911    ILock (Self) ;
912  }
913  goto Exeunt ;
914}
915
916void Monitor::lock() {
917  this->lock(Thread::current());
918}
919
920// Lock without safepoint check - a degenerate variant of lock().
921// Should ONLY be used by safepoint code and other code
922// that is guaranteed not to block while running inside the VM. If this is called with
923// thread state set to be in VM, the safepoint synchronization code will deadlock!
924
925void Monitor::lock_without_safepoint_check (Thread * Self) {
926  assert (_owner != Self, "invariant") ;
927  ILock (Self) ;
928  assert (_owner == NULL, "invariant");
929  set_owner (Self);
930}
931
932void Monitor::lock_without_safepoint_check () {
933  lock_without_safepoint_check (Thread::current()) ;
934}
935
936
937// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
938
939bool Monitor::try_lock() {
940  Thread * const Self = Thread::current();
941  debug_only(check_prelock_state(Self));
942  // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
943
944  // Special case, where all Java threads are stopped.
945  // The lock may have been acquired but _owner is not yet set.
946  // In that case the VM thread can safely grab the lock.
947  // It strikes me this should appear _after the TryLock() fails, below.
948  bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
949  if (can_sneak && _owner == NULL) {
950    set_owner(Self); // Do not need to be atomic, since we are at a safepoint
951    _snuck = true;
952    return true;
953  }
954
955  if (TryLock()) {
956    // We got the lock
957    assert (_owner == NULL, "invariant");
958    set_owner (Self);
959    return true;
960  }
961  return false;
962}
963
964void Monitor::unlock() {
965  assert (_owner  == Thread::current(), "invariant") ;
966  assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
967  set_owner (NULL) ;
968  if (_snuck) {
969    assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
970    _snuck = false;
971    return ;
972  }
973  IUnlock (false) ;
974}
975
976// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
977// jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
978//
979// There's no expectation that JVM_RawMonitors will interoperate properly with the native
980// Mutex-Monitor constructs.  We happen to implement JVM_RawMonitors in terms of
981// native Mutex-Monitors simply as a matter of convenience.  A simple abstraction layer
982// over a pthread_mutex_t would work equally as well, but require more platform-specific
983// code -- a "PlatformMutex".  Alternatively, a simply layer over muxAcquire-muxRelease
984// would work too.
985//
986// Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
987// instance available.  Instead, we transiently allocate a ParkEvent on-demand if
988// we encounter contention.  That ParkEvent remains associated with the thread
989// until it manages to acquire the lock, at which time we return the ParkEvent
990// to the global ParkEvent free list.  This is correct and suffices for our purposes.
991//
992// Beware that the original jvm_raw_unlock() had a "_snuck" test but that
993// jvm_raw_lock() didn't have the corresponding test.  I suspect that's an
994// oversight, but I've replicated the original suspect logic in the new code ...
995
996void Monitor::jvm_raw_lock() {
997  assert(rank() == native, "invariant");
998
999  if (TryLock()) {
1000 Exeunt:
1001    assert (ILocked(), "invariant") ;
1002    assert (_owner == NULL, "invariant");
1003    // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
1004    // might return NULL. Don't call set_owner since it will break on an NULL owner
1005    // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1006    _owner = ThreadLocalStorage::thread();
1007    return ;
1008  }
1009
1010  if (TrySpin(NULL)) goto Exeunt ;
1011
1012  // slow-path - apparent contention
1013  // Allocate a ParkEvent for transient use.
1014  // The ParkEvent remains associated with this thread until
1015  // the time the thread manages to acquire the lock.
1016  ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
1017  ESelf->reset() ;
1018  OrderAccess::storeload() ;
1019
1020  // Either Enqueue Self on cxq or acquire the outer lock.
1021  if (AcquireOrPush (ESelf)) {
1022    ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
1023    goto Exeunt ;
1024  }
1025
1026  // At any given time there is at most one ondeck thread.
1027  // ondeck implies not resident on cxq and not resident on EntryList
1028  // Only the OnDeck thread can try to acquire -- contended for -- the lock.
1029  // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1030  for (;;) {
1031    if (_OnDeck == ESelf && TrySpin(NULL)) break ;
1032    ParkCommon (ESelf, 0) ;
1033  }
1034
1035  assert (_OnDeck == ESelf, "invariant") ;
1036  _OnDeck = NULL ;
1037  ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
1038  goto Exeunt ;
1039}
1040
1041void Monitor::jvm_raw_unlock() {
1042  // Nearly the same as Monitor::unlock() ...
1043  // directly set _owner instead of using set_owner(null)
1044  _owner = NULL ;
1045  if (_snuck) {         // ???
1046    assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1047    _snuck = false;
1048    return ;
1049  }
1050  IUnlock(false) ;
1051}
1052
1053bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
1054  Thread * const Self = Thread::current() ;
1055  assert (_owner == Self, "invariant") ;
1056  assert (ILocked(), "invariant") ;
1057
1058  // as_suspend_equivalent logically implies !no_safepoint_check
1059  guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
1060  // !no_safepoint_check logically implies java_thread
1061  guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
1062
1063  #ifdef ASSERT
1064    Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1065    assert(least != this, "Specification of get_least_... call above");
1066    if (least != NULL && least->rank() <= special) {
1067      tty->print("Attempting to wait on monitor %s/%d while holding"
1068                 " lock %s/%d -- possible deadlock",
1069                 name(), rank(), least->name(), least->rank());
1070      assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1071    }
1072  #endif // ASSERT
1073
1074  int wait_status ;
1075  // conceptually set the owner to NULL in anticipation of
1076  // abdicating the lock in wait
1077  set_owner(NULL);
1078  if (no_safepoint_check) {
1079    wait_status = IWait (Self, timeout) ;
1080  } else {
1081    assert (Self->is_Java_thread(), "invariant") ;
1082    JavaThread *jt = (JavaThread *)Self;
1083
1084    // Enter safepoint region - ornate and Rococo ...
1085    ThreadBlockInVM tbivm(jt);
1086    OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1087
1088    if (as_suspend_equivalent) {
1089      jt->set_suspend_equivalent();
1090      // cleared by handle_special_suspend_equivalent_condition() or
1091      // java_suspend_self()
1092    }
1093
1094    wait_status = IWait (Self, timeout) ;
1095
1096    // were we externally suspended while we were waiting?
1097    if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1098      // Our event wait has finished and we own the lock, but
1099      // while we were waiting another thread suspended us. We don't
1100      // want to hold the lock while suspended because that
1101      // would surprise the thread that suspended us.
1102      assert (ILocked(), "invariant") ;
1103      IUnlock (true) ;
1104      jt->java_suspend_self();
1105      ILock (Self) ;
1106      assert (ILocked(), "invariant") ;
1107    }
1108  }
1109
1110  // Conceptually reestablish ownership of the lock.
1111  // The "real" lock -- the LockByte -- was reacquired by IWait().
1112  assert (ILocked(), "invariant") ;
1113  assert (_owner == NULL, "invariant") ;
1114  set_owner (Self) ;
1115  return wait_status != 0 ;          // return true IFF timeout
1116}
1117
1118Monitor::~Monitor() {
1119  assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1120}
1121
1122void Monitor::ClearMonitor (Monitor * m, const char *name) {
1123  m->_owner             = NULL ;
1124  m->_snuck             = false ;
1125  if (name == NULL) {
1126    strcpy(m->_name, "UNKNOWN") ;
1127  } else {
1128    strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1129    m->_name[MONITOR_NAME_LEN - 1] = '\0';
1130  }
1131  m->_LockWord.FullWord = 0 ;
1132  m->_EntryList         = NULL ;
1133  m->_OnDeck            = NULL ;
1134  m->_WaitSet           = NULL ;
1135  m->_WaitLock[0]       = 0 ;
1136}
1137
1138Monitor::Monitor() { ClearMonitor(this); }
1139
1140Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
1141  ClearMonitor (this, name) ;
1142#ifdef ASSERT
1143  _allow_vm_block  = allow_vm_block;
1144  _rank            = Rank ;
1145#endif
1146}
1147
1148Mutex::~Mutex() {
1149  assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1150}
1151
1152Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
1153  ClearMonitor ((Monitor *) this, name) ;
1154#ifdef ASSERT
1155 _allow_vm_block   = allow_vm_block;
1156 _rank             = Rank ;
1157#endif
1158}
1159
1160bool Monitor::owned_by_self() const {
1161  bool ret = _owner == Thread::current();
1162  assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
1163  return ret;
1164}
1165
1166void Monitor::print_on_error(outputStream* st) const {
1167  st->print("[" PTR_FORMAT, this);
1168  st->print("] %s", _name);
1169  st->print(" - owner thread: " PTR_FORMAT, _owner);
1170}
1171
1172
1173
1174
1175// ----------------------------------------------------------------------------------
1176// Non-product code
1177
1178#ifndef PRODUCT
1179void Monitor::print_on(outputStream* st) const {
1180  st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
1181}
1182#endif
1183
1184#ifndef PRODUCT
1185#ifdef ASSERT
1186Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1187  Monitor *res, *tmp;
1188  for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1189    if (tmp->rank() < res->rank()) {
1190      res = tmp;
1191    }
1192  }
1193  if (!SafepointSynchronize::is_at_safepoint()) {
1194    // In this case, we expect the held locks to be
1195    // in increasing rank order (modulo any native ranks)
1196    for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1197      if (tmp->next() != NULL) {
1198        assert(tmp->rank() == Mutex::native ||
1199               tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1200      }
1201    }
1202  }
1203  return res;
1204}
1205
1206Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1207  Monitor *res, *tmp;
1208  for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1209    if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1210      res = tmp;
1211    }
1212  }
1213  if (!SafepointSynchronize::is_at_safepoint()) {
1214    // In this case, we expect the held locks to be
1215    // in increasing rank order (modulo any native ranks)
1216    for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1217      if (tmp->next() != NULL) {
1218        assert(tmp->rank() == Mutex::native ||
1219               tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1220      }
1221    }
1222  }
1223  return res;
1224}
1225
1226
1227bool Monitor::contains(Monitor* locks, Monitor * lock) {
1228  for (; locks != NULL; locks = locks->next()) {
1229    if (locks == lock)
1230      return true;
1231  }
1232  return false;
1233}
1234#endif
1235
1236// Called immediately after lock acquisition or release as a diagnostic
1237// to track the lock-set of the thread and test for rank violations that
1238// might indicate exposure to deadlock.
1239// Rather like an EventListener for _owner (:>).
1240
1241void Monitor::set_owner_implementation(Thread *new_owner) {
1242  // This function is solely responsible for maintaining
1243  // and checking the invariant that threads and locks
1244  // are in a 1/N relation, with some some locks unowned.
1245  // It uses the Mutex::_owner, Mutex::_next, and
1246  // Thread::_owned_locks fields, and no other function
1247  // changes those fields.
1248  // It is illegal to set the mutex from one non-NULL
1249  // owner to another--it must be owned by NULL as an
1250  // intermediate state.
1251
1252  if (new_owner != NULL) {
1253    // the thread is acquiring this lock
1254
1255    assert(new_owner == Thread::current(), "Should I be doing this?");
1256    assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1257    _owner = new_owner; // set the owner
1258
1259    // link "this" into the owned locks list
1260
1261    #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
1262      Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1263                    // Mutex::set_owner_implementation is a friend of Thread
1264
1265      assert(this->rank() >= 0, "bad lock rank");
1266
1267      if (LogMultipleMutexLocking && locks != NULL) {
1268        Events::log("thread " INTPTR_FORMAT " locks %s, already owns %s", new_owner, name(), locks->name());
1269      }
1270
1271      // Deadlock avoidance rules require us to acquire Mutexes only in
1272      // a global total order. For example m1 is the lowest ranked mutex
1273      // that the thread holds and m2 is the mutex the thread is trying
1274      // to acquire, then  deadlock avoidance rules require that the rank
1275      // of m2 be less  than the rank of m1.
1276      // The rank Mutex::native  is an exception in that it is not subject
1277      // to the verification rules.
1278      // Here are some further notes relating to mutex acquisition anomalies:
1279      // . under Solaris, the interrupt lock gets acquired when doing
1280      //   profiling, so any lock could be held.
1281      // . it is also ok to acquire Safepoint_lock at the very end while we
1282      //   already hold Terminator_lock - may happen because of periodic safepoints
1283      if (this->rank() != Mutex::native &&
1284          this->rank() != Mutex::suspend_resume &&
1285          locks != NULL && locks->rank() <= this->rank() &&
1286          !SafepointSynchronize::is_at_safepoint() &&
1287          this != Interrupt_lock && this != ProfileVM_lock &&
1288          !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1289            SafepointSynchronize::is_synchronizing())) {
1290        new_owner->print_owned_locks();
1291        fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
1292                      "possible deadlock", this->name(), this->rank(),
1293                      locks->name(), locks->rank()));
1294      }
1295
1296      this->_next = new_owner->_owned_locks;
1297      new_owner->_owned_locks = this;
1298    #endif
1299
1300  } else {
1301    // the thread is releasing this lock
1302
1303    Thread* old_owner = _owner;
1304    debug_only(_last_owner = old_owner);
1305
1306    assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1307    assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1308
1309    _owner = NULL; // set the owner
1310
1311    #ifdef ASSERT
1312      Monitor *locks = old_owner->owned_locks();
1313
1314      if (LogMultipleMutexLocking && locks != this) {
1315        Events::log("thread " INTPTR_FORMAT " unlocks %s, still owns %s", old_owner, this->name(), locks->name());
1316      }
1317
1318      // remove "this" from the owned locks list
1319
1320      Monitor *prev = NULL;
1321      bool found = false;
1322      for (; locks != NULL; prev = locks, locks = locks->next()) {
1323        if (locks == this) {
1324          found = true;
1325          break;
1326        }
1327      }
1328      assert(found, "Removing a lock not owned");
1329      if (prev == NULL) {
1330        old_owner->_owned_locks = _next;
1331      } else {
1332        prev->_next = _next;
1333      }
1334      _next = NULL;
1335    #endif
1336  }
1337}
1338
1339
1340// Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1341void Monitor::check_prelock_state(Thread *thread) {
1342  assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1343         || rank() == Mutex::special, "wrong thread state for using locks");
1344  if (StrictSafepointChecks) {
1345    if (thread->is_VM_thread() && !allow_vm_block()) {
1346      fatal(err_msg("VM thread using lock %s (not allowed to block on)",
1347                    name()));
1348    }
1349    debug_only(if (rank() != Mutex::special) \
1350      thread->check_for_valid_safepoint_state(false);)
1351  }
1352}
1353
1354void Monitor::check_block_state(Thread *thread) {
1355  if (!_allow_vm_block && thread->is_VM_thread()) {
1356    warning("VM thread blocked on lock");
1357    print();
1358    BREAKPOINT;
1359  }
1360  assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1361}
1362
1363#endif // PRODUCT
1364