mutex.cpp revision 6646:b596a1063e90
1 2/* 3 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26#include "precompiled.hpp" 27#include "runtime/atomic.inline.hpp" 28#include "runtime/mutex.hpp" 29#include "runtime/orderAccess.inline.hpp" 30#include "runtime/osThread.hpp" 31#include "runtime/thread.inline.hpp" 32#include "utilities/events.hpp" 33#ifdef TARGET_OS_FAMILY_linux 34# include "mutex_linux.inline.hpp" 35#endif 36#ifdef TARGET_OS_FAMILY_solaris 37# include "mutex_solaris.inline.hpp" 38#endif 39#ifdef TARGET_OS_FAMILY_windows 40# include "mutex_windows.inline.hpp" 41#endif 42#ifdef TARGET_OS_FAMILY_bsd 43# include "mutex_bsd.inline.hpp" 44#endif 45 46PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 47 48// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 49// 50// Native Monitor-Mutex locking - theory of operations 51// 52// * Native Monitors are completely unrelated to Java-level monitors, 53// although the "back-end" slow-path implementations share a common lineage. 54// See objectMonitor:: in synchronizer.cpp. 55// Native Monitors do *not* support nesting or recursion but otherwise 56// they're basically Hoare-flavor monitors. 57// 58// * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte 59// in the _LockWord from zero to non-zero. Note that the _Owner field 60// is advisory and is used only to verify that the thread calling unlock() 61// is indeed the last thread to have acquired the lock. 62// 63// * Contending threads "push" themselves onto the front of the contention 64// queue -- called the cxq -- with CAS and then spin/park. 65// The _LockWord contains the LockByte as well as the pointer to the head 66// of the cxq. Colocating the LockByte with the cxq precludes certain races. 67// 68// * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0 69// idioms. We currently use MEMBAR in the uncontended unlock() path, as 70// MEMBAR often has less latency than CAS. If warranted, we could switch to 71// a CAS:0 mode, using timers to close the resultant race, as is done 72// with Java Monitors in synchronizer.cpp. 73// 74// See the following for a discussion of the relative cost of atomics (CAS) 75// MEMBAR, and ways to eliminate such instructions from the common-case paths: 76// -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot 77// -- http://blogs.sun.com/dave/resource/MustangSync.pdf 78// -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf 79// -- synchronizer.cpp 80// 81// * Overall goals - desiderata 82// 1. Minimize context switching 83// 2. Minimize lock migration 84// 3. Minimize CPI -- affinity and locality 85// 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR 86// 5. Minimize outer lock hold times 87// 6. Behave gracefully on a loaded system 88// 89// * Thread flow and list residency: 90// 91// Contention queue --> EntryList --> OnDeck --> Owner --> !Owner 92// [..resident on monitor list..] 93// [...........contending..................] 94// 95// -- The contention queue (cxq) contains recently-arrived threads (RATs). 96// Threads on the cxq eventually drain into the EntryList. 97// -- Invariant: a thread appears on at most one list -- cxq, EntryList 98// or WaitSet -- at any one time. 99// -- For a given monitor there can be at most one "OnDeck" thread at any 100// given time but if needbe this particular invariant could be relaxed. 101// 102// * The WaitSet and EntryList linked lists are composed of ParkEvents. 103// I use ParkEvent instead of threads as ParkEvents are immortal and 104// type-stable, meaning we can safely unpark() a possibly stale 105// list element in the unlock()-path. (That's benign). 106// 107// * Succession policy - providing for progress: 108// 109// As necessary, the unlock()ing thread identifies, unlinks, and unparks 110// an "heir presumptive" tentative successor thread from the EntryList. 111// This becomes the so-called "OnDeck" thread, of which there can be only 112// one at any given time for a given monitor. The wakee will recontend 113// for ownership of monitor. 114// 115// Succession is provided for by a policy of competitive handoff. 116// The exiting thread does _not_ grant or pass ownership to the 117// successor thread. (This is also referred to as "handoff" succession"). 118// Instead the exiting thread releases ownership and possibly wakes 119// a successor, so the successor can (re)compete for ownership of the lock. 120// 121// Competitive handoff provides excellent overall throughput at the expense 122// of short-term fairness. If fairness is a concern then one remedy might 123// be to add an AcquireCounter field to the monitor. After a thread acquires 124// the lock it will decrement the AcquireCounter field. When the count 125// reaches 0 the thread would reset the AcquireCounter variable, abdicate 126// the lock directly to some thread on the EntryList, and then move itself to the 127// tail of the EntryList. 128// 129// But in practice most threads engage or otherwise participate in resource 130// bounded producer-consumer relationships, so lock domination is not usually 131// a practical concern. Recall too, that in general it's easier to construct 132// a fair lock from a fast lock, but not vice-versa. 133// 134// * The cxq can have multiple concurrent "pushers" but only one concurrent 135// detaching thread. This mechanism is immune from the ABA corruption. 136// More precisely, the CAS-based "push" onto cxq is ABA-oblivious. 137// We use OnDeck as a pseudo-lock to enforce the at-most-one detaching 138// thread constraint. 139// 140// * Taken together, the cxq and the EntryList constitute or form a 141// single logical queue of threads stalled trying to acquire the lock. 142// We use two distinct lists to reduce heat on the list ends. 143// Threads in lock() enqueue onto cxq while threads in unlock() will 144// dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm). 145// A key desideratum is to minimize queue & monitor metadata manipulation 146// that occurs while holding the "outer" monitor lock -- that is, we want to 147// minimize monitor lock holds times. 148// 149// The EntryList is ordered by the prevailing queue discipline and 150// can be organized in any convenient fashion, such as a doubly-linked list or 151// a circular doubly-linked list. If we need a priority queue then something akin 152// to Solaris' sleepq would work nicely. Viz., 153// -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. 154// -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c 155// Queue discipline is enforced at ::unlock() time, when the unlocking thread 156// drains the cxq into the EntryList, and orders or reorders the threads on the 157// EntryList accordingly. 158// 159// Barring "lock barging", this mechanism provides fair cyclic ordering, 160// somewhat similar to an elevator-scan. 161// 162// * OnDeck 163// -- For a given monitor there can be at most one OnDeck thread at any given 164// instant. The OnDeck thread is contending for the lock, but has been 165// unlinked from the EntryList and cxq by some previous unlock() operations. 166// Once a thread has been designated the OnDeck thread it will remain so 167// until it manages to acquire the lock -- being OnDeck is a stable property. 168// -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition. 169// -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after 170// having cleared the LockByte and dropped the outer lock, attempt to "trylock" 171// OnDeck by CASing the field from null to non-null. If successful, that thread 172// is then responsible for progress and succession and can use CAS to detach and 173// drain the cxq into the EntryList. By convention, only this thread, the holder of 174// the OnDeck inner lock, can manipulate the EntryList or detach and drain the 175// RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as 176// we allow multiple concurrent "push" operations but restrict detach concurrency 177// to at most one thread. Having selected and detached a successor, the thread then 178// changes the OnDeck to refer to that successor, and then unparks the successor. 179// That successor will eventually acquire the lock and clear OnDeck. Beware 180// that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently 181// "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor, 182// and then the successor eventually "drops" OnDeck. Note that there's never 183// any sense of contention on the inner lock, however. Threads never contend 184// or wait for the inner lock. 185// -- OnDeck provides for futile wakeup throttling a described in section 3.3 of 186// See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf 187// In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter 188// TState fields found in Java-level objectMonitors. (See synchronizer.cpp). 189// 190// * Waiting threads reside on the WaitSet list -- wait() puts 191// the caller onto the WaitSet. Notify() or notifyAll() simply 192// transfers threads from the WaitSet to either the EntryList or cxq. 193// Subsequent unlock() operations will eventually unpark the notifyee. 194// Unparking a notifee in notify() proper is inefficient - if we were to do so 195// it's likely the notifyee would simply impale itself on the lock held 196// by the notifier. 197// 198// * The mechanism is obstruction-free in that if the holder of the transient 199// OnDeck lock in unlock() is preempted or otherwise stalls, other threads 200// can still acquire and release the outer lock and continue to make progress. 201// At worst, waking of already blocked contending threads may be delayed, 202// but nothing worse. (We only use "trylock" operations on the inner OnDeck 203// lock). 204// 205// * Note that thread-local storage must be initialized before a thread 206// uses Native monitors or mutexes. The native monitor-mutex subsystem 207// depends on Thread::current(). 208// 209// * The monitor synchronization subsystem avoids the use of native 210// synchronization primitives except for the narrow platform-specific 211// park-unpark abstraction. See the comments in os_solaris.cpp regarding 212// the semantics of park-unpark. Put another way, this monitor implementation 213// depends only on atomic operations and park-unpark. The monitor subsystem 214// manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the 215// underlying OS manages the READY<->RUN transitions. 216// 217// * The memory consistency model provide by lock()-unlock() is at least as 218// strong or stronger than the Java Memory model defined by JSR-133. 219// That is, we guarantee at least entry consistency, if not stronger. 220// See http://g.oswego.edu/dl/jmm/cookbook.html. 221// 222// * Thread:: currently contains a set of purpose-specific ParkEvents: 223// _MutexEvent, _ParkEvent, etc. A better approach might be to do away with 224// the purpose-specific ParkEvents and instead implement a general per-thread 225// stack of available ParkEvents which we could provision on-demand. The 226// stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate() 227// and ::Release(). A thread would simply pop an element from the local stack before it 228// enqueued or park()ed. When the contention was over the thread would 229// push the no-longer-needed ParkEvent back onto its stack. 230// 231// * A slightly reduced form of ILock() and IUnlock() have been partially 232// model-checked (Murphi) for safety and progress at T=1,2,3 and 4. 233// It'd be interesting to see if TLA/TLC could be useful as well. 234// 235// * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor 236// code should never call other code in the JVM that might itself need to 237// acquire monitors or mutexes. That's true *except* in the case of the 238// ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles 239// mutator reentry (ingress) by checking for a pending safepoint in which case it will 240// call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc. 241// In that particular case a call to lock() for a given Monitor can end up recursively 242// calling lock() on another monitor. While distasteful, this is largely benign 243// as the calls come from jacket that wraps lock(), and not from deep within lock() itself. 244// 245// It's unfortunate that native mutexes and thread state transitions were convolved. 246// They're really separate concerns and should have remained that way. Melding 247// them together was facile -- a bit too facile. The current implementation badly 248// conflates the two concerns. 249// 250// * TODO-FIXME: 251// 252// -- Add DTRACE probes for contended acquire, contended acquired, contended unlock 253// We should also add DTRACE probes in the ParkEvent subsystem for 254// Park-entry, Park-exit, and Unpark. 255// 256// -- We have an excess of mutex-like constructs in the JVM, namely: 257// 1. objectMonitors for Java-level synchronization (synchronizer.cpp) 258// 2. low-level muxAcquire and muxRelease 259// 3. low-level spinAcquire and spinRelease 260// 4. native Mutex:: and Monitor:: 261// 5. jvm_raw_lock() and _unlock() 262// 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly 263// similar name. 264// 265// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 266 267 268// CASPTR() uses the canonical argument order that dominates in the literature. 269// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. 270 271#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c))) 272#define UNS(x) (uintptr_t(x)) 273#define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }} 274 275// Simplistic low-quality Marsaglia SHIFT-XOR RNG. 276// Bijective except for the trailing mask operation. 277// Useful for spin loops as the compiler can't optimize it away. 278 279static inline jint MarsagliaXORV (jint x) { 280 if (x == 0) x = 1|os::random() ; 281 x ^= x << 6; 282 x ^= ((unsigned)x) >> 21; 283 x ^= x << 7 ; 284 return x & 0x7FFFFFFF ; 285} 286 287static int Stall (int its) { 288 static volatile jint rv = 1 ; 289 volatile int OnFrame = 0 ; 290 jint v = rv ^ UNS(OnFrame) ; 291 while (--its >= 0) { 292 v = MarsagliaXORV (v) ; 293 } 294 // Make this impossible for the compiler to optimize away, 295 // but (mostly) avoid W coherency sharing on MP systems. 296 if (v == 0x12345) rv = v ; 297 return v ; 298} 299 300int Monitor::TryLock () { 301 intptr_t v = _LockWord.FullWord ; 302 for (;;) { 303 if ((v & _LBIT) != 0) return 0 ; 304 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 305 if (v == u) return 1 ; 306 v = u ; 307 } 308} 309 310int Monitor::TryFast () { 311 // Optimistic fast-path form ... 312 // Fast-path attempt for the common uncontended case. 313 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. 314 intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ... 315 if (v == 0) return 1 ; 316 317 for (;;) { 318 if ((v & _LBIT) != 0) return 0 ; 319 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 320 if (v == u) return 1 ; 321 v = u ; 322 } 323} 324 325int Monitor::ILocked () { 326 const intptr_t w = _LockWord.FullWord & 0xFF ; 327 assert (w == 0 || w == _LBIT, "invariant") ; 328 return w == _LBIT ; 329} 330 331// Polite TATAS spinlock with exponential backoff - bounded spin. 332// Ideally we'd use processor cycles, time or vtime to control 333// the loop, but we currently use iterations. 334// All the constants within were derived empirically but work over 335// over the spectrum of J2SE reference platforms. 336// On Niagara-class systems the back-off is unnecessary but 337// is relatively harmless. (At worst it'll slightly retard 338// acquisition times). The back-off is critical for older SMP systems 339// where constant fetching of the LockWord would otherwise impair 340// scalability. 341// 342// Clamp spinning at approximately 1/2 of a context-switch round-trip. 343// See synchronizer.cpp for details and rationale. 344 345int Monitor::TrySpin (Thread * const Self) { 346 if (TryLock()) return 1 ; 347 if (!os::is_MP()) return 0 ; 348 349 int Probes = 0 ; 350 int Delay = 0 ; 351 int Steps = 0 ; 352 int SpinMax = NativeMonitorSpinLimit ; 353 int flgs = NativeMonitorFlags ; 354 for (;;) { 355 intptr_t v = _LockWord.FullWord; 356 if ((v & _LBIT) == 0) { 357 if (CASPTR (&_LockWord, v, v|_LBIT) == v) { 358 return 1 ; 359 } 360 continue ; 361 } 362 363 if ((flgs & 8) == 0) { 364 SpinPause () ; 365 } 366 367 // Periodically increase Delay -- variable Delay form 368 // conceptually: delay *= 1 + 1/Exponent 369 ++ Probes; 370 if (Probes > SpinMax) return 0 ; 371 372 if ((Probes & 0x7) == 0) { 373 Delay = ((Delay << 1)|1) & 0x7FF ; 374 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; 375 } 376 377 if (flgs & 2) continue ; 378 379 // Consider checking _owner's schedctl state, if OFFPROC abort spin. 380 // If the owner is OFFPROC then it's unlike that the lock will be dropped 381 // in a timely fashion, which suggests that spinning would not be fruitful 382 // or profitable. 383 384 // Stall for "Delay" time units - iterations in the current implementation. 385 // Avoid generating coherency traffic while stalled. 386 // Possible ways to delay: 387 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt, 388 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ... 389 // Note that on Niagara-class systems we want to minimize STs in the 390 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. 391 // Furthermore, they don't have a W$ like traditional SPARC processors. 392 // We currently use a Marsaglia Shift-Xor RNG loop. 393 Steps += Delay ; 394 if (Self != NULL) { 395 jint rv = Self->rng[0] ; 396 for (int k = Delay ; --k >= 0; ) { 397 rv = MarsagliaXORV (rv) ; 398 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ; 399 } 400 Self->rng[0] = rv ; 401 } else { 402 Stall (Delay) ; 403 } 404 } 405} 406 407static int ParkCommon (ParkEvent * ev, jlong timo) { 408 // Diagnostic support - periodically unwedge blocked threads 409 intx nmt = NativeMonitorTimeout ; 410 if (nmt > 0 && (nmt < timo || timo <= 0)) { 411 timo = nmt ; 412 } 413 int err = OS_OK ; 414 if (0 == timo) { 415 ev->park() ; 416 } else { 417 err = ev->park(timo) ; 418 } 419 return err ; 420} 421 422inline int Monitor::AcquireOrPush (ParkEvent * ESelf) { 423 intptr_t v = _LockWord.FullWord ; 424 for (;;) { 425 if ((v & _LBIT) == 0) { 426 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 427 if (u == v) return 1 ; // indicate acquired 428 v = u ; 429 } else { 430 // Anticipate success ... 431 ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ; 432 const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ; 433 if (u == v) return 0 ; // indicate pushed onto cxq 434 v = u ; 435 } 436 // Interference - LockWord change - just retry 437 } 438} 439 440// ILock and IWait are the lowest level primitive internal blocking 441// synchronization functions. The callers of IWait and ILock must have 442// performed any needed state transitions beforehand. 443// IWait and ILock may directly call park() without any concern for thread state. 444// Note that ILock and IWait do *not* access _owner. 445// _owner is a higher-level logical concept. 446 447void Monitor::ILock (Thread * Self) { 448 assert (_OnDeck != Self->_MutexEvent, "invariant") ; 449 450 if (TryFast()) { 451 Exeunt: 452 assert (ILocked(), "invariant") ; 453 return ; 454 } 455 456 ParkEvent * const ESelf = Self->_MutexEvent ; 457 assert (_OnDeck != ESelf, "invariant") ; 458 459 // As an optimization, spinners could conditionally try to set ONDECK to _LBIT 460 // Synchronizer.cpp uses a similar optimization. 461 if (TrySpin (Self)) goto Exeunt ; 462 463 // Slow-path - the lock is contended. 464 // Either Enqueue Self on cxq or acquire the outer lock. 465 // LockWord encoding = (cxq,LOCKBYTE) 466 ESelf->reset() ; 467 OrderAccess::fence() ; 468 469 // Optional optimization ... try barging on the inner lock 470 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) { 471 goto OnDeck_LOOP ; 472 } 473 474 if (AcquireOrPush (ESelf)) goto Exeunt ; 475 476 // At any given time there is at most one ondeck thread. 477 // ondeck implies not resident on cxq and not resident on EntryList 478 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 479 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 480 // Deschedule Self so that others may run. 481 while (_OnDeck != ESelf) { 482 ParkCommon (ESelf, 0) ; 483 } 484 485 // Self is now in the ONDECK position and will remain so until it 486 // manages to acquire the lock. 487 OnDeck_LOOP: 488 for (;;) { 489 assert (_OnDeck == ESelf, "invariant") ; 490 if (TrySpin (Self)) break ; 491 // CONSIDER: if ESelf->TryPark() && TryLock() break ... 492 // It's probably wise to spin only if we *actually* blocked 493 // CONSIDER: check the lockbyte, if it remains set then 494 // preemptively drain the cxq into the EntryList. 495 // The best place and time to perform queue operations -- lock metadata -- 496 // is _before having acquired the outer lock, while waiting for the lock to drop. 497 ParkCommon (ESelf, 0) ; 498 } 499 500 assert (_OnDeck == ESelf, "invariant") ; 501 _OnDeck = NULL ; 502 503 // Note that we current drop the inner lock (clear OnDeck) in the slow-path 504 // epilogue immediately after having acquired the outer lock. 505 // But instead we could consider the following optimizations: 506 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. 507 // This might avoid potential reacquisition of the inner lock in IUlock(). 508 // B. While still holding the inner lock, attempt to opportunistically select 509 // and unlink the next ONDECK thread from the EntryList. 510 // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK. 511 // It's critical that the select-and-unlink operation run in constant-time as 512 // it executes when holding the outer lock and may artificially increase the 513 // effective length of the critical section. 514 // Note that (A) and (B) are tantamount to succession by direct handoff for 515 // the inner lock. 516 goto Exeunt ; 517} 518 519void Monitor::IUnlock (bool RelaxAssert) { 520 assert (ILocked(), "invariant") ; 521 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately 522 // before the store that releases the lock. Crucially, all the stores and loads in the 523 // critical section must be globally visible before the store of 0 into the lock-word 524 // that releases the lock becomes globally visible. That is, memory accesses in the 525 // critical section should not be allowed to bypass or overtake the following ST that 526 // releases the lock. As such, to prevent accesses within the critical section 527 // from "leaking" out, we need a release fence between the critical section and the 528 // store that releases the lock. In practice that release barrier is elided on 529 // platforms with strong memory models such as TSO. 530 // 531 // Note that the OrderAccess::storeload() fence that appears after unlock store 532 // provides for progress conditions and succession and is _not related to exclusion 533 // safety or lock release consistency. 534 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock 535 536 OrderAccess::storeload (); 537 ParkEvent * const w = _OnDeck ; 538 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; 539 if (w != NULL) { 540 // Either we have a valid ondeck thread or ondeck is transiently "locked" 541 // by some exiting thread as it arranges for succession. The LSBit of 542 // OnDeck allows us to discriminate two cases. If the latter, the 543 // responsibility for progress and succession lies with that other thread. 544 // For good performance, we also depend on the fact that redundant unpark() 545 // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread 546 // is inexpensive. This approach provides implicit futile wakeup throttling. 547 // Note that the referent "w" might be stale with respect to the lock. 548 // In that case the following unpark() is harmless and the worst that'll happen 549 // is a spurious return from a park() operation. Critically, if "w" _is stale, 550 // then progress is known to have occurred as that means the thread associated 551 // with "w" acquired the lock. In that case this thread need take no further 552 // action to guarantee progress. 553 if ((UNS(w) & _LBIT) == 0) w->unpark() ; 554 return ; 555 } 556 557 intptr_t cxq = _LockWord.FullWord ; 558 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { 559 return ; // normal fast-path exit - cxq and EntryList both empty 560 } 561 if (cxq & _LBIT) { 562 // Optional optimization ... 563 // Some other thread acquired the lock in the window since this 564 // thread released it. Succession is now that thread's responsibility. 565 return ; 566 } 567 568 Succession: 569 // Slow-path exit - this thread must ensure succession and progress. 570 // OnDeck serves as lock to protect cxq and EntryList. 571 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. 572 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) 573 // but only one concurrent consumer (detacher of RATs). 574 // Consider protecting this critical section with schedctl on Solaris. 575 // Unlike a normal lock, however, the exiting thread "locks" OnDeck, 576 // picks a successor and marks that thread as OnDeck. That successor 577 // thread will then clear OnDeck once it eventually acquires the outer lock. 578 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { 579 return ; 580 } 581 582 ParkEvent * List = _EntryList ; 583 if (List != NULL) { 584 // Transfer the head of the EntryList to the OnDeck position. 585 // Once OnDeck, a thread stays OnDeck until it acquires the lock. 586 // For a given lock there is at most OnDeck thread at any one instant. 587 WakeOne: 588 assert (List == _EntryList, "invariant") ; 589 ParkEvent * const w = List ; 590 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; 591 _EntryList = w->ListNext ; 592 // as a diagnostic measure consider setting w->_ListNext = BAD 593 assert (UNS(_OnDeck) == _LBIT, "invariant") ; 594 _OnDeck = w ; // pass OnDeck to w. 595 // w will clear OnDeck once it acquires the outer lock 596 597 // Another optional optimization ... 598 // For heavily contended locks it's not uncommon that some other 599 // thread acquired the lock while this thread was arranging succession. 600 // Try to defer the unpark() operation - Delegate the responsibility 601 // for unpark()ing the OnDeck thread to the current or subsequent owners 602 // That is, the new owner is responsible for unparking the OnDeck thread. 603 OrderAccess::storeload() ; 604 cxq = _LockWord.FullWord ; 605 if (cxq & _LBIT) return ; 606 607 w->unpark() ; 608 return ; 609 } 610 611 cxq = _LockWord.FullWord ; 612 if ((cxq & ~_LBIT) != 0) { 613 // The EntryList is empty but the cxq is populated. 614 // drain RATs from cxq into EntryList 615 // Detach RATs segment with CAS and then merge into EntryList 616 for (;;) { 617 // optional optimization - if locked, the owner is responsible for succession 618 if (cxq & _LBIT) goto Punt ; 619 const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ; 620 if (vfy == cxq) break ; 621 cxq = vfy ; 622 // Interference - LockWord changed - Just retry 623 // We can see concurrent interference from contending threads 624 // pushing themselves onto the cxq or from lock-unlock operations. 625 // From the perspective of this thread, EntryList is stable and 626 // the cxq is prepend-only -- the head is volatile but the interior 627 // of the cxq is stable. In theory if we encounter interference from threads 628 // pushing onto cxq we could simply break off the original cxq suffix and 629 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts 630 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD" 631 // when we first fetch cxq above. Between the fetch -- where we observed "A" 632 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive, 633 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext 634 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList. 635 // Note too, that it's safe for this thread to traverse the cxq 636 // without taking any special concurrency precautions. 637 } 638 639 // We don't currently reorder the cxq segment as we move it onto 640 // the EntryList, but it might make sense to reverse the order 641 // or perhaps sort by thread priority. See the comments in 642 // synchronizer.cpp objectMonitor::exit(). 643 assert (_EntryList == NULL, "invariant") ; 644 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ; 645 assert (List != NULL, "invariant") ; 646 goto WakeOne ; 647 } 648 649 // cxq|EntryList is empty. 650 // w == NULL implies that cxq|EntryList == NULL in the past. 651 // Possible race - rare inopportune interleaving. 652 // A thread could have added itself to cxq since this thread previously checked. 653 // Detect and recover by refetching cxq. 654 Punt: 655 assert (UNS(_OnDeck) == _LBIT, "invariant") ; 656 _OnDeck = NULL ; // Release inner lock. 657 OrderAccess::storeload(); // Dekker duality - pivot point 658 659 // Resample LockWord/cxq to recover from possible race. 660 // For instance, while this thread T1 held OnDeck, some other thread T2 might 661 // acquire the outer lock. Another thread T3 might try to acquire the outer 662 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the 663 // outer lock, but skips succession as this thread T1 still holds OnDeck. 664 // T1 is and remains responsible for ensuring succession of T3. 665 // 666 // Note that we don't need to recheck EntryList, just cxq. 667 // If threads moved onto EntryList since we dropped OnDeck 668 // that implies some other thread forced succession. 669 cxq = _LockWord.FullWord ; 670 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { 671 goto Succession ; // potential race -- re-run succession 672 } 673 return ; 674} 675 676bool Monitor::notify() { 677 assert (_owner == Thread::current(), "invariant") ; 678 assert (ILocked(), "invariant") ; 679 if (_WaitSet == NULL) return true ; 680 NotifyCount ++ ; 681 682 // Transfer one thread from the WaitSet to the EntryList or cxq. 683 // Currently we just unlink the head of the WaitSet and prepend to the cxq. 684 // And of course we could just unlink it and unpark it, too, but 685 // in that case it'd likely impale itself on the reentry. 686 Thread::muxAcquire (_WaitLock, "notify:WaitLock") ; 687 ParkEvent * nfy = _WaitSet ; 688 if (nfy != NULL) { // DCL idiom 689 _WaitSet = nfy->ListNext ; 690 assert (nfy->Notified == 0, "invariant") ; 691 // push nfy onto the cxq 692 for (;;) { 693 const intptr_t v = _LockWord.FullWord ; 694 assert ((v & 0xFF) == _LBIT, "invariant") ; 695 nfy->ListNext = (ParkEvent *)(v & ~_LBIT); 696 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; 697 // interference - _LockWord changed -- just retry 698 } 699 // Note that setting Notified before pushing nfy onto the cxq is 700 // also legal and safe, but the safety properties are much more 701 // subtle, so for the sake of code stewardship ... 702 OrderAccess::fence() ; 703 nfy->Notified = 1; 704 } 705 Thread::muxRelease (_WaitLock) ; 706 if (nfy != NULL && (NativeMonitorFlags & 16)) { 707 // Experimental code ... light up the wakee in the hope that this thread (the owner) 708 // will drop the lock just about the time the wakee comes ONPROC. 709 nfy->unpark() ; 710 } 711 assert (ILocked(), "invariant") ; 712 return true ; 713} 714 715// Currently notifyAll() transfers the waiters one-at-a-time from the waitset 716// to the cxq. This could be done more efficiently with a single bulk en-mass transfer, 717// but in practice notifyAll() for large #s of threads is rare and not time-critical. 718// Beware too, that we invert the order of the waiters. Lets say that the 719// waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset 720// will be empty and the cxq will be "DCBAXYZ". This is benign, of course. 721 722bool Monitor::notify_all() { 723 assert (_owner == Thread::current(), "invariant") ; 724 assert (ILocked(), "invariant") ; 725 while (_WaitSet != NULL) notify() ; 726 return true ; 727} 728 729int Monitor::IWait (Thread * Self, jlong timo) { 730 assert (ILocked(), "invariant") ; 731 732 // Phases: 733 // 1. Enqueue Self on WaitSet - currently prepend 734 // 2. unlock - drop the outer lock 735 // 3. wait for either notification or timeout 736 // 4. lock - reentry - reacquire the outer lock 737 738 ParkEvent * const ESelf = Self->_MutexEvent ; 739 ESelf->Notified = 0 ; 740 ESelf->reset() ; 741 OrderAccess::fence() ; 742 743 // Add Self to WaitSet 744 // Ideally only the holder of the outer lock would manipulate the WaitSet - 745 // That is, the outer lock would implicitly protect the WaitSet. 746 // But if a thread in wait() encounters a timeout it will need to dequeue itself 747 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue 748 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside 749 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread 750 // on the WaitSet can't be allowed to compete for the lock until it has managed to 751 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock. 752 // Contention on the WaitLock is minimal. 753 // 754 // Another viable approach would be add another ParkEvent, "WaitEvent" to the 755 // thread class. The WaitSet would be composed of WaitEvents. Only the 756 // owner of the outer lock would manipulate the WaitSet. A thread in wait() 757 // could then compete for the outer lock, and then, if necessary, unlink itself 758 // from the WaitSet only after having acquired the outer lock. More precisely, 759 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent 760 // on the WaitSet; release the outer lock; wait for either notification or timeout; 761 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet. 762 // 763 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice. 764 // One set would be for the WaitSet and one for the EntryList. 765 // We could also deconstruct the ParkEvent into a "pure" event and add a 766 // new immortal/TSM "ListElement" class that referred to ParkEvents. 767 // In that case we could have one ListElement on the WaitSet and another 768 // on the EntryList, with both referring to the same pure Event. 769 770 Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ; 771 ESelf->ListNext = _WaitSet ; 772 _WaitSet = ESelf ; 773 Thread::muxRelease (_WaitLock) ; 774 775 // Release the outer lock 776 // We call IUnlock (RelaxAssert=true) as a thread T1 might 777 // enqueue itself on the WaitSet, call IUnlock(), drop the lock, 778 // and then stall before it can attempt to wake a successor. 779 // Some other thread T2 acquires the lock, and calls notify(), moving 780 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes, 781 // and then finds *itself* on the cxq. During the course of a normal 782 // IUnlock() call a thread should _never find itself on the EntryList 783 // or cxq, but in the case of wait() it's possible. 784 // See synchronizer.cpp objectMonitor::wait(). 785 IUnlock (true) ; 786 787 // Wait for either notification or timeout 788 // Beware that in some circumstances we might propagate 789 // spurious wakeups back to the caller. 790 791 for (;;) { 792 if (ESelf->Notified) break ; 793 int err = ParkCommon (ESelf, timo) ; 794 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ; 795 } 796 797 // Prepare for reentry - if necessary, remove ESelf from WaitSet 798 // ESelf can be: 799 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout. 800 // 2. On the cxq or EntryList 801 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. 802 803 OrderAccess::fence() ; 804 int WasOnWaitSet = 0 ; 805 if (ESelf->Notified == 0) { 806 Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ; 807 if (ESelf->Notified == 0) { // DCL idiom 808 assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet 809 // ESelf is resident on the WaitSet -- unlink it. 810 // A doubly-linked list would be better here so we can unlink in constant-time. 811 // We have to unlink before we potentially recontend as ESelf might otherwise 812 // end up on the cxq|EntryList -- it can't be on two lists at once. 813 ParkEvent * p = _WaitSet ; 814 ParkEvent * q = NULL ; // classic q chases p 815 while (p != NULL && p != ESelf) { 816 q = p ; 817 p = p->ListNext ; 818 } 819 assert (p == ESelf, "invariant") ; 820 if (p == _WaitSet) { // found at head 821 assert (q == NULL, "invariant") ; 822 _WaitSet = p->ListNext ; 823 } else { // found in interior 824 assert (q->ListNext == p, "invariant") ; 825 q->ListNext = p->ListNext ; 826 } 827 WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout 828 } 829 Thread::muxRelease (_WaitLock) ; 830 } 831 832 // Reentry phase - reacquire the lock 833 if (WasOnWaitSet) { 834 // ESelf was previously on the WaitSet but we just unlinked it above 835 // because of a timeout. ESelf is not resident on any list and is not OnDeck 836 assert (_OnDeck != ESelf, "invariant") ; 837 ILock (Self) ; 838 } else { 839 // A prior notify() operation moved ESelf from the WaitSet to the cxq. 840 // ESelf is now on the cxq, EntryList or at the OnDeck position. 841 // The following fragment is extracted from Monitor::ILock() 842 for (;;) { 843 if (_OnDeck == ESelf && TrySpin(Self)) break ; 844 ParkCommon (ESelf, 0) ; 845 } 846 assert (_OnDeck == ESelf, "invariant") ; 847 _OnDeck = NULL ; 848 } 849 850 assert (ILocked(), "invariant") ; 851 return WasOnWaitSet != 0 ; // return true IFF timeout 852} 853 854 855// ON THE VMTHREAD SNEAKING PAST HELD LOCKS: 856// In particular, there are certain types of global lock that may be held 857// by a Java thread while it is blocked at a safepoint but before it has 858// written the _owner field. These locks may be sneakily acquired by the 859// VM thread during a safepoint to avoid deadlocks. Alternatively, one should 860// identify all such locks, and ensure that Java threads never block at 861// safepoints while holding them (_no_safepoint_check_flag). While it 862// seems as though this could increase the time to reach a safepoint 863// (or at least increase the mean, if not the variance), the latter 864// approach might make for a cleaner, more maintainable JVM design. 865// 866// Sneaking is vile and reprehensible and should be excised at the 1st 867// opportunity. It's possible that the need for sneaking could be obviated 868// as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock 869// or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex. 870// (b) stall at the TBIVM exit point as a safepoint is in effect. Critically, 871// it'll stall at the TBIVM reentry state transition after having acquired the 872// underlying lock, but before having set _owner and having entered the actual 873// critical section. The lock-sneaking facility leverages that fact and allowed the 874// VM thread to logically acquire locks that had already be physically locked by mutators 875// but where mutators were known blocked by the reentry thread state transition. 876// 877// If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly 878// wrapped calls to park(), then we could likely do away with sneaking. We'd 879// decouple lock acquisition and parking. The critical invariant to eliminating 880// sneaking is to ensure that we never "physically" acquire the lock while TBIVM. 881// An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket. 882// One difficulty with this approach is that the TBIVM wrapper could recurse and 883// call lock() deep from within a lock() call, while the MutexEvent was already enqueued. 884// Using a stack (N=2 at minimum) of ParkEvents would take care of that problem. 885// 886// But of course the proper ultimate approach is to avoid schemes that require explicit 887// sneaking or dependence on any any clever invariants or subtle implementation properties 888// of Mutex-Monitor and instead directly address the underlying design flaw. 889 890void Monitor::lock (Thread * Self) { 891#ifdef CHECK_UNHANDLED_OOPS 892 // Clear unhandled oops so we get a crash right away. Only clear for non-vm 893 // or GC threads. 894 if (Self->is_Java_thread()) { 895 Self->clear_unhandled_oops(); 896 } 897#endif // CHECK_UNHANDLED_OOPS 898 899 debug_only(check_prelock_state(Self)); 900 assert (_owner != Self , "invariant") ; 901 assert (_OnDeck != Self->_MutexEvent, "invariant") ; 902 903 if (TryFast()) { 904 Exeunt: 905 assert (ILocked(), "invariant") ; 906 assert (owner() == NULL, "invariant"); 907 set_owner (Self); 908 return ; 909 } 910 911 // The lock is contended ... 912 913 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 914 if (can_sneak && _owner == NULL) { 915 // a java thread has locked the lock but has not entered the 916 // critical region -- let's just pretend we've locked the lock 917 // and go on. we note this with _snuck so we can also 918 // pretend to unlock when the time comes. 919 _snuck = true; 920 goto Exeunt ; 921 } 922 923 // Try a brief spin to avoid passing thru thread state transition ... 924 if (TrySpin (Self)) goto Exeunt ; 925 926 check_block_state(Self); 927 if (Self->is_Java_thread()) { 928 // Horrible dictu - we suffer through a state transition 929 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); 930 ThreadBlockInVM tbivm ((JavaThread *) Self) ; 931 ILock (Self) ; 932 } else { 933 // Mirabile dictu 934 ILock (Self) ; 935 } 936 goto Exeunt ; 937} 938 939void Monitor::lock() { 940 this->lock(Thread::current()); 941} 942 943// Lock without safepoint check - a degenerate variant of lock(). 944// Should ONLY be used by safepoint code and other code 945// that is guaranteed not to block while running inside the VM. If this is called with 946// thread state set to be in VM, the safepoint synchronization code will deadlock! 947 948void Monitor::lock_without_safepoint_check (Thread * Self) { 949 assert (_owner != Self, "invariant") ; 950 ILock (Self) ; 951 assert (_owner == NULL, "invariant"); 952 set_owner (Self); 953} 954 955void Monitor::lock_without_safepoint_check () { 956 lock_without_safepoint_check (Thread::current()) ; 957} 958 959 960// Returns true if thread succeeds in grabbing the lock, otherwise false. 961 962bool Monitor::try_lock() { 963 Thread * const Self = Thread::current(); 964 debug_only(check_prelock_state(Self)); 965 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler"); 966 967 // Special case, where all Java threads are stopped. 968 // The lock may have been acquired but _owner is not yet set. 969 // In that case the VM thread can safely grab the lock. 970 // It strikes me this should appear _after the TryLock() fails, below. 971 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 972 if (can_sneak && _owner == NULL) { 973 set_owner(Self); // Do not need to be atomic, since we are at a safepoint 974 _snuck = true; 975 return true; 976 } 977 978 if (TryLock()) { 979 // We got the lock 980 assert (_owner == NULL, "invariant"); 981 set_owner (Self); 982 return true; 983 } 984 return false; 985} 986 987void Monitor::unlock() { 988 assert (_owner == Thread::current(), "invariant") ; 989 assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ; 990 set_owner (NULL) ; 991 if (_snuck) { 992 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 993 _snuck = false; 994 return ; 995 } 996 IUnlock (false) ; 997} 998 999// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() 1000// jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter. 1001// 1002// There's no expectation that JVM_RawMonitors will interoperate properly with the native 1003// Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of 1004// native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer 1005// over a pthread_mutex_t would work equally as well, but require more platform-specific 1006// code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease 1007// would work too. 1008// 1009// Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent 1010// instance available. Instead, we transiently allocate a ParkEvent on-demand if 1011// we encounter contention. That ParkEvent remains associated with the thread 1012// until it manages to acquire the lock, at which time we return the ParkEvent 1013// to the global ParkEvent free list. This is correct and suffices for our purposes. 1014// 1015// Beware that the original jvm_raw_unlock() had a "_snuck" test but that 1016// jvm_raw_lock() didn't have the corresponding test. I suspect that's an 1017// oversight, but I've replicated the original suspect logic in the new code ... 1018 1019void Monitor::jvm_raw_lock() { 1020 assert(rank() == native, "invariant"); 1021 1022 if (TryLock()) { 1023 Exeunt: 1024 assert (ILocked(), "invariant") ; 1025 assert (_owner == NULL, "invariant"); 1026 // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage 1027 // might return NULL. Don't call set_owner since it will break on an NULL owner 1028 // Consider installing a non-null "ANON" distinguished value instead of just NULL. 1029 _owner = ThreadLocalStorage::thread(); 1030 return ; 1031 } 1032 1033 if (TrySpin(NULL)) goto Exeunt ; 1034 1035 // slow-path - apparent contention 1036 // Allocate a ParkEvent for transient use. 1037 // The ParkEvent remains associated with this thread until 1038 // the time the thread manages to acquire the lock. 1039 ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ; 1040 ESelf->reset() ; 1041 OrderAccess::storeload() ; 1042 1043 // Either Enqueue Self on cxq or acquire the outer lock. 1044 if (AcquireOrPush (ESelf)) { 1045 ParkEvent::Release (ESelf) ; // surrender the ParkEvent 1046 goto Exeunt ; 1047 } 1048 1049 // At any given time there is at most one ondeck thread. 1050 // ondeck implies not resident on cxq and not resident on EntryList 1051 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 1052 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 1053 for (;;) { 1054 if (_OnDeck == ESelf && TrySpin(NULL)) break ; 1055 ParkCommon (ESelf, 0) ; 1056 } 1057 1058 assert (_OnDeck == ESelf, "invariant") ; 1059 _OnDeck = NULL ; 1060 ParkEvent::Release (ESelf) ; // surrender the ParkEvent 1061 goto Exeunt ; 1062} 1063 1064void Monitor::jvm_raw_unlock() { 1065 // Nearly the same as Monitor::unlock() ... 1066 // directly set _owner instead of using set_owner(null) 1067 _owner = NULL ; 1068 if (_snuck) { // ??? 1069 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1070 _snuck = false; 1071 return ; 1072 } 1073 IUnlock(false) ; 1074} 1075 1076bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) { 1077 Thread * const Self = Thread::current() ; 1078 assert (_owner == Self, "invariant") ; 1079 assert (ILocked(), "invariant") ; 1080 1081 // as_suspend_equivalent logically implies !no_safepoint_check 1082 guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ; 1083 // !no_safepoint_check logically implies java_thread 1084 guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ; 1085 1086 #ifdef ASSERT 1087 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); 1088 assert(least != this, "Specification of get_least_... call above"); 1089 if (least != NULL && least->rank() <= special) { 1090 tty->print("Attempting to wait on monitor %s/%d while holding" 1091 " lock %s/%d -- possible deadlock", 1092 name(), rank(), least->name(), least->rank()); 1093 assert(false, "Shouldn't block(wait) while holding a lock of rank special"); 1094 } 1095 #endif // ASSERT 1096 1097 int wait_status ; 1098 // conceptually set the owner to NULL in anticipation of 1099 // abdicating the lock in wait 1100 set_owner(NULL); 1101 if (no_safepoint_check) { 1102 wait_status = IWait (Self, timeout) ; 1103 } else { 1104 assert (Self->is_Java_thread(), "invariant") ; 1105 JavaThread *jt = (JavaThread *)Self; 1106 1107 // Enter safepoint region - ornate and Rococo ... 1108 ThreadBlockInVM tbivm(jt); 1109 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */); 1110 1111 if (as_suspend_equivalent) { 1112 jt->set_suspend_equivalent(); 1113 // cleared by handle_special_suspend_equivalent_condition() or 1114 // java_suspend_self() 1115 } 1116 1117 wait_status = IWait (Self, timeout) ; 1118 1119 // were we externally suspended while we were waiting? 1120 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { 1121 // Our event wait has finished and we own the lock, but 1122 // while we were waiting another thread suspended us. We don't 1123 // want to hold the lock while suspended because that 1124 // would surprise the thread that suspended us. 1125 assert (ILocked(), "invariant") ; 1126 IUnlock (true) ; 1127 jt->java_suspend_self(); 1128 ILock (Self) ; 1129 assert (ILocked(), "invariant") ; 1130 } 1131 } 1132 1133 // Conceptually reestablish ownership of the lock. 1134 // The "real" lock -- the LockByte -- was reacquired by IWait(). 1135 assert (ILocked(), "invariant") ; 1136 assert (_owner == NULL, "invariant") ; 1137 set_owner (Self) ; 1138 return wait_status != 0 ; // return true IFF timeout 1139} 1140 1141Monitor::~Monitor() { 1142 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; 1143} 1144 1145void Monitor::ClearMonitor (Monitor * m, const char *name) { 1146 m->_owner = NULL ; 1147 m->_snuck = false ; 1148 if (name == NULL) { 1149 strcpy(m->_name, "UNKNOWN") ; 1150 } else { 1151 strncpy(m->_name, name, MONITOR_NAME_LEN - 1); 1152 m->_name[MONITOR_NAME_LEN - 1] = '\0'; 1153 } 1154 m->_LockWord.FullWord = 0 ; 1155 m->_EntryList = NULL ; 1156 m->_OnDeck = NULL ; 1157 m->_WaitSet = NULL ; 1158 m->_WaitLock[0] = 0 ; 1159} 1160 1161Monitor::Monitor() { ClearMonitor(this); } 1162 1163Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { 1164 ClearMonitor (this, name) ; 1165#ifdef ASSERT 1166 _allow_vm_block = allow_vm_block; 1167 _rank = Rank ; 1168#endif 1169} 1170 1171Mutex::~Mutex() { 1172 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; 1173} 1174 1175Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { 1176 ClearMonitor ((Monitor *) this, name) ; 1177#ifdef ASSERT 1178 _allow_vm_block = allow_vm_block; 1179 _rank = Rank ; 1180#endif 1181} 1182 1183bool Monitor::owned_by_self() const { 1184 bool ret = _owner == Thread::current(); 1185 assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ; 1186 return ret; 1187} 1188 1189void Monitor::print_on_error(outputStream* st) const { 1190 st->print("[" PTR_FORMAT, this); 1191 st->print("] %s", _name); 1192 st->print(" - owner thread: " PTR_FORMAT, _owner); 1193} 1194 1195 1196 1197 1198// ---------------------------------------------------------------------------------- 1199// Non-product code 1200 1201#ifndef PRODUCT 1202void Monitor::print_on(outputStream* st) const { 1203 st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner); 1204} 1205#endif 1206 1207#ifndef PRODUCT 1208#ifdef ASSERT 1209Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { 1210 Monitor *res, *tmp; 1211 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { 1212 if (tmp->rank() < res->rank()) { 1213 res = tmp; 1214 } 1215 } 1216 if (!SafepointSynchronize::is_at_safepoint()) { 1217 // In this case, we expect the held locks to be 1218 // in increasing rank order (modulo any native ranks) 1219 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1220 if (tmp->next() != NULL) { 1221 assert(tmp->rank() == Mutex::native || 1222 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1223 } 1224 } 1225 } 1226 return res; 1227} 1228 1229Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { 1230 Monitor *res, *tmp; 1231 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { 1232 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { 1233 res = tmp; 1234 } 1235 } 1236 if (!SafepointSynchronize::is_at_safepoint()) { 1237 // In this case, we expect the held locks to be 1238 // in increasing rank order (modulo any native ranks) 1239 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1240 if (tmp->next() != NULL) { 1241 assert(tmp->rank() == Mutex::native || 1242 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1243 } 1244 } 1245 } 1246 return res; 1247} 1248 1249 1250bool Monitor::contains(Monitor* locks, Monitor * lock) { 1251 for (; locks != NULL; locks = locks->next()) { 1252 if (locks == lock) 1253 return true; 1254 } 1255 return false; 1256} 1257#endif 1258 1259// Called immediately after lock acquisition or release as a diagnostic 1260// to track the lock-set of the thread and test for rank violations that 1261// might indicate exposure to deadlock. 1262// Rather like an EventListener for _owner (:>). 1263 1264void Monitor::set_owner_implementation(Thread *new_owner) { 1265 // This function is solely responsible for maintaining 1266 // and checking the invariant that threads and locks 1267 // are in a 1/N relation, with some some locks unowned. 1268 // It uses the Mutex::_owner, Mutex::_next, and 1269 // Thread::_owned_locks fields, and no other function 1270 // changes those fields. 1271 // It is illegal to set the mutex from one non-NULL 1272 // owner to another--it must be owned by NULL as an 1273 // intermediate state. 1274 1275 if (new_owner != NULL) { 1276 // the thread is acquiring this lock 1277 1278 assert(new_owner == Thread::current(), "Should I be doing this?"); 1279 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); 1280 _owner = new_owner; // set the owner 1281 1282 // link "this" into the owned locks list 1283 1284 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef 1285 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); 1286 // Mutex::set_owner_implementation is a friend of Thread 1287 1288 assert(this->rank() >= 0, "bad lock rank"); 1289 1290 // Deadlock avoidance rules require us to acquire Mutexes only in 1291 // a global total order. For example m1 is the lowest ranked mutex 1292 // that the thread holds and m2 is the mutex the thread is trying 1293 // to acquire, then deadlock avoidance rules require that the rank 1294 // of m2 be less than the rank of m1. 1295 // The rank Mutex::native is an exception in that it is not subject 1296 // to the verification rules. 1297 // Here are some further notes relating to mutex acquisition anomalies: 1298 // . under Solaris, the interrupt lock gets acquired when doing 1299 // profiling, so any lock could be held. 1300 // . it is also ok to acquire Safepoint_lock at the very end while we 1301 // already hold Terminator_lock - may happen because of periodic safepoints 1302 if (this->rank() != Mutex::native && 1303 this->rank() != Mutex::suspend_resume && 1304 locks != NULL && locks->rank() <= this->rank() && 1305 !SafepointSynchronize::is_at_safepoint() && 1306 this != Interrupt_lock && this != ProfileVM_lock && 1307 !(this == Safepoint_lock && contains(locks, Terminator_lock) && 1308 SafepointSynchronize::is_synchronizing())) { 1309 new_owner->print_owned_locks(); 1310 fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- " 1311 "possible deadlock", this->name(), this->rank(), 1312 locks->name(), locks->rank())); 1313 } 1314 1315 this->_next = new_owner->_owned_locks; 1316 new_owner->_owned_locks = this; 1317 #endif 1318 1319 } else { 1320 // the thread is releasing this lock 1321 1322 Thread* old_owner = _owner; 1323 debug_only(_last_owner = old_owner); 1324 1325 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); 1326 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); 1327 1328 _owner = NULL; // set the owner 1329 1330 #ifdef ASSERT 1331 Monitor *locks = old_owner->owned_locks(); 1332 1333 // remove "this" from the owned locks list 1334 1335 Monitor *prev = NULL; 1336 bool found = false; 1337 for (; locks != NULL; prev = locks, locks = locks->next()) { 1338 if (locks == this) { 1339 found = true; 1340 break; 1341 } 1342 } 1343 assert(found, "Removing a lock not owned"); 1344 if (prev == NULL) { 1345 old_owner->_owned_locks = _next; 1346 } else { 1347 prev->_next = _next; 1348 } 1349 _next = NULL; 1350 #endif 1351 } 1352} 1353 1354 1355// Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock() 1356void Monitor::check_prelock_state(Thread *thread) { 1357 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) 1358 || rank() == Mutex::special, "wrong thread state for using locks"); 1359 if (StrictSafepointChecks) { 1360 if (thread->is_VM_thread() && !allow_vm_block()) { 1361 fatal(err_msg("VM thread using lock %s (not allowed to block on)", 1362 name())); 1363 } 1364 debug_only(if (rank() != Mutex::special) \ 1365 thread->check_for_valid_safepoint_state(false);) 1366 } 1367 if (thread->is_Watcher_thread()) { 1368 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 1369 "locking not allowed when crash protection is set"); 1370 } 1371} 1372 1373void Monitor::check_block_state(Thread *thread) { 1374 if (!_allow_vm_block && thread->is_VM_thread()) { 1375 warning("VM thread blocked on lock"); 1376 print(); 1377 BREAKPOINT; 1378 } 1379 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread"); 1380} 1381 1382#endif // PRODUCT 1383