synchronizer.cpp revision 10606:24c6f885d316
1/*
2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/vmSymbols.hpp"
27#include "logging/log.hpp"
28#include "memory/metaspaceShared.hpp"
29#include "memory/padded.hpp"
30#include "memory/resourceArea.hpp"
31#include "oops/markOop.hpp"
32#include "oops/oop.inline.hpp"
33#include "runtime/atomic.inline.hpp"
34#include "runtime/biasedLocking.hpp"
35#include "runtime/handles.inline.hpp"
36#include "runtime/interfaceSupport.hpp"
37#include "runtime/mutexLocker.hpp"
38#include "runtime/objectMonitor.hpp"
39#include "runtime/objectMonitor.inline.hpp"
40#include "runtime/osThread.hpp"
41#include "runtime/stubRoutines.hpp"
42#include "runtime/synchronizer.hpp"
43#include "runtime/thread.inline.hpp"
44#include "runtime/vframe.hpp"
45#include "trace/traceMacros.hpp"
46#include "trace/tracing.hpp"
47#include "utilities/dtrace.hpp"
48#include "utilities/events.hpp"
49#include "utilities/preserveException.hpp"
50
51// The "core" versions of monitor enter and exit reside in this file.
52// The interpreter and compilers contain specialized transliterated
53// variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
54// for instance.  If you make changes here, make sure to modify the
55// interpreter, and both C1 and C2 fast-path inline locking code emission.
56//
57// -----------------------------------------------------------------------------
58
59#ifdef DTRACE_ENABLED
60
61// Only bother with this argument setup if dtrace is available
62// TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
63
64#define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
65  char* bytes = NULL;                                                      \
66  int len = 0;                                                             \
67  jlong jtid = SharedRuntime::get_java_tid(thread);                        \
68  Symbol* klassname = ((oop)(obj))->klass()->name();                       \
69  if (klassname != NULL) {                                                 \
70    bytes = (char*)klassname->bytes();                                     \
71    len = klassname->utf8_length();                                        \
72  }
73
74#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
75  {                                                                        \
76    if (DTraceMonitorProbes) {                                             \
77      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
78      HOTSPOT_MONITOR_WAIT(jtid,                                           \
79                           (uintptr_t)(monitor), bytes, len, (millis));    \
80    }                                                                      \
81  }
82
83#define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
84#define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
85#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
86
87#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
88  {                                                                        \
89    if (DTraceMonitorProbes) {                                             \
90      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
91      HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
92                                    (uintptr_t)(monitor), bytes, len);     \
93    }                                                                      \
94  }
95
96#else //  ndef DTRACE_ENABLED
97
98#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
99#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
100
101#endif // ndef DTRACE_ENABLED
102
103// This exists only as a workaround of dtrace bug 6254741
104int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
105  DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
106  return 0;
107}
108
109#define NINFLATIONLOCKS 256
110static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
111
112// global list of blocks of monitors
113// gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
114// want to expose the PaddedEnd template more than necessary.
115ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
116// global monitor free list
117ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
118// global monitor in-use list, for moribund threads,
119// monitors they inflated need to be scanned for deflation
120ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
121// count of entries in gOmInUseList
122int ObjectSynchronizer::gOmInUseCount = 0;
123
124static volatile intptr_t gListLock = 0;      // protects global monitor lists
125static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
126static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
127
128static void post_monitor_inflate_event(EventJavaMonitorInflate&,
129                                       const oop,
130                                       const ObjectSynchronizer::InflateCause);
131
132#define CHAINMARKER (cast_to_oop<intptr_t>(-1))
133
134
135// =====================> Quick functions
136
137// The quick_* forms are special fast-path variants used to improve
138// performance.  In the simplest case, a "quick_*" implementation could
139// simply return false, in which case the caller will perform the necessary
140// state transitions and call the slow-path form.
141// The fast-path is designed to handle frequently arising cases in an efficient
142// manner and is just a degenerate "optimistic" variant of the slow-path.
143// returns true  -- to indicate the call was satisfied.
144// returns false -- to indicate the call needs the services of the slow-path.
145// A no-loitering ordinance is in effect for code in the quick_* family
146// operators: safepoints or indefinite blocking (blocking that might span a
147// safepoint) are forbidden. Generally the thread_state() is _in_Java upon
148// entry.
149//
150// Consider: An interesting optimization is to have the JIT recognize the
151// following common idiom:
152//   synchronized (someobj) { .... ; notify(); }
153// That is, we find a notify() or notifyAll() call that immediately precedes
154// the monitorexit operation.  In that case the JIT could fuse the operations
155// into a single notifyAndExit() runtime primitive.
156
157bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
158  assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
159  assert(self->is_Java_thread(), "invariant");
160  assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
161  NoSafepointVerifier nsv;
162  if (obj == NULL) return false;  // slow-path for invalid obj
163  const markOop mark = obj->mark();
164
165  if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
166    // Degenerate notify
167    // stack-locked by caller so by definition the implied waitset is empty.
168    return true;
169  }
170
171  if (mark->has_monitor()) {
172    ObjectMonitor * const mon = mark->monitor();
173    assert(mon->object() == obj, "invariant");
174    if (mon->owner() != self) return false;  // slow-path for IMS exception
175
176    if (mon->first_waiter() != NULL) {
177      // We have one or more waiters. Since this is an inflated monitor
178      // that we own, we can transfer one or more threads from the waitset
179      // to the entrylist here and now, avoiding the slow-path.
180      if (all) {
181        DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
182      } else {
183        DTRACE_MONITOR_PROBE(notify, mon, obj, self);
184      }
185      int tally = 0;
186      do {
187        mon->INotify(self);
188        ++tally;
189      } while (mon->first_waiter() != NULL && all);
190      OM_PERFDATA_OP(Notifications, inc(tally));
191    }
192    return true;
193  }
194
195  // biased locking and any other IMS exception states take the slow-path
196  return false;
197}
198
199
200// The LockNode emitted directly at the synchronization site would have
201// been too big if it were to have included support for the cases of inflated
202// recursive enter and exit, so they go here instead.
203// Note that we can't safely call AsyncPrintJavaStack() from within
204// quick_enter() as our thread state remains _in_Java.
205
206bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
207                                     BasicLock * Lock) {
208  assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
209  assert(Self->is_Java_thread(), "invariant");
210  assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
211  NoSafepointVerifier nsv;
212  if (obj == NULL) return false;       // Need to throw NPE
213  const markOop mark = obj->mark();
214
215  if (mark->has_monitor()) {
216    ObjectMonitor * const m = mark->monitor();
217    assert(m->object() == obj, "invariant");
218    Thread * const owner = (Thread *) m->_owner;
219
220    // Lock contention and Transactional Lock Elision (TLE) diagnostics
221    // and observability
222    // Case: light contention possibly amenable to TLE
223    // Case: TLE inimical operations such as nested/recursive synchronization
224
225    if (owner == Self) {
226      m->_recursions++;
227      return true;
228    }
229
230    if (owner == NULL &&
231        Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) {
232      assert(m->_recursions == 0, "invariant");
233      assert(m->_owner == Self, "invariant");
234      return true;
235    }
236  }
237
238  // Note that we could inflate in quick_enter.
239  // This is likely a useful optimization
240  // Critically, in quick_enter() we must not:
241  // -- perform bias revocation, or
242  // -- block indefinitely, or
243  // -- reach a safepoint
244
245  return false;        // revert to slow-path
246}
247
248// -----------------------------------------------------------------------------
249//  Fast Monitor Enter/Exit
250// This the fast monitor enter. The interpreter and compiler use
251// some assembly copies of this code. Make sure update those code
252// if the following function is changed. The implementation is
253// extremely sensitive to race condition. Be careful.
254
255void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
256                                    bool attempt_rebias, TRAPS) {
257  if (UseBiasedLocking) {
258    if (!SafepointSynchronize::is_at_safepoint()) {
259      BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
260      if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
261        return;
262      }
263    } else {
264      assert(!attempt_rebias, "can not rebias toward VM thread");
265      BiasedLocking::revoke_at_safepoint(obj);
266    }
267    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
268  }
269
270  slow_enter(obj, lock, THREAD);
271}
272
273void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
274  assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
275  // if displaced header is null, the previous enter is recursive enter, no-op
276  markOop dhw = lock->displaced_header();
277  markOop mark;
278  if (dhw == NULL) {
279    // Recursive stack-lock.
280    // Diagnostics -- Could be: stack-locked, inflating, inflated.
281    mark = object->mark();
282    assert(!mark->is_neutral(), "invariant");
283    if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
284      assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
285    }
286    if (mark->has_monitor()) {
287      ObjectMonitor * m = mark->monitor();
288      assert(((oop)(m->object()))->mark() == mark, "invariant");
289      assert(m->is_entered(THREAD), "invariant");
290    }
291    return;
292  }
293
294  mark = object->mark();
295
296  // If the object is stack-locked by the current thread, try to
297  // swing the displaced header from the box back to the mark.
298  if (mark == (markOop) lock) {
299    assert(dhw->is_neutral(), "invariant");
300    if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
301      TEVENT(fast_exit: release stacklock);
302      return;
303    }
304  }
305
306  ObjectSynchronizer::inflate(THREAD,
307                              object,
308                              inflate_cause_vm_internal)->exit(true, THREAD);
309}
310
311// -----------------------------------------------------------------------------
312// Interpreter/Compiler Slow Case
313// This routine is used to handle interpreter/compiler slow case
314// We don't need to use fast path here, because it must have been
315// failed in the interpreter/compiler code.
316void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
317  markOop mark = obj->mark();
318  assert(!mark->has_bias_pattern(), "should not see bias pattern here");
319
320  if (mark->is_neutral()) {
321    // Anticipate successful CAS -- the ST of the displaced mark must
322    // be visible <= the ST performed by the CAS.
323    lock->set_displaced_header(mark);
324    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
325      TEVENT(slow_enter: release stacklock);
326      return;
327    }
328    // Fall through to inflate() ...
329  } else if (mark->has_locker() &&
330             THREAD->is_lock_owned((address)mark->locker())) {
331    assert(lock != mark->locker(), "must not re-lock the same lock");
332    assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
333    lock->set_displaced_header(NULL);
334    return;
335  }
336
337  // The object header will never be displaced to this lock,
338  // so it does not matter what the value is, except that it
339  // must be non-zero to avoid looking like a re-entrant lock,
340  // and must not look locked either.
341  lock->set_displaced_header(markOopDesc::unused_mark());
342  ObjectSynchronizer::inflate(THREAD,
343                              obj(),
344                              inflate_cause_monitor_enter)->enter(THREAD);
345}
346
347// This routine is used to handle interpreter/compiler slow case
348// We don't need to use fast path here, because it must have
349// failed in the interpreter/compiler code. Simply use the heavy
350// weight monitor should be ok, unless someone find otherwise.
351void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
352  fast_exit(object, lock, THREAD);
353}
354
355// -----------------------------------------------------------------------------
356// Class Loader  support to workaround deadlocks on the class loader lock objects
357// Also used by GC
358// complete_exit()/reenter() are used to wait on a nested lock
359// i.e. to give up an outer lock completely and then re-enter
360// Used when holding nested locks - lock acquisition order: lock1 then lock2
361//  1) complete_exit lock1 - saving recursion count
362//  2) wait on lock2
363//  3) when notified on lock2, unlock lock2
364//  4) reenter lock1 with original recursion count
365//  5) lock lock2
366// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
367intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
368  TEVENT(complete_exit);
369  if (UseBiasedLocking) {
370    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
371    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
372  }
373
374  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
375                                                       obj(),
376                                                       inflate_cause_vm_internal);
377
378  return monitor->complete_exit(THREAD);
379}
380
381// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
382void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
383  TEVENT(reenter);
384  if (UseBiasedLocking) {
385    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
386    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
387  }
388
389  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
390                                                       obj(),
391                                                       inflate_cause_vm_internal);
392
393  monitor->reenter(recursion, THREAD);
394}
395// -----------------------------------------------------------------------------
396// JNI locks on java objects
397// NOTE: must use heavy weight monitor to handle jni monitor enter
398void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
399  // the current locking is from JNI instead of Java code
400  TEVENT(jni_enter);
401  if (UseBiasedLocking) {
402    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
403    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
404  }
405  THREAD->set_current_pending_monitor_is_from_java(false);
406  ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
407  THREAD->set_current_pending_monitor_is_from_java(true);
408}
409
410// NOTE: must use heavy weight monitor to handle jni monitor exit
411void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
412  TEVENT(jni_exit);
413  if (UseBiasedLocking) {
414    Handle h_obj(THREAD, obj);
415    BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
416    obj = h_obj();
417  }
418  assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
419
420  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
421                                                       obj,
422                                                       inflate_cause_jni_exit);
423  // If this thread has locked the object, exit the monitor.  Note:  can't use
424  // monitor->check(CHECK); must exit even if an exception is pending.
425  if (monitor->check(THREAD)) {
426    monitor->exit(true, THREAD);
427  }
428}
429
430// -----------------------------------------------------------------------------
431// Internal VM locks on java objects
432// standard constructor, allows locking failures
433ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
434  _dolock = doLock;
435  _thread = thread;
436  debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
437  _obj = obj;
438
439  if (_dolock) {
440    TEVENT(ObjectLocker);
441
442    ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
443  }
444}
445
446ObjectLocker::~ObjectLocker() {
447  if (_dolock) {
448    ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
449  }
450}
451
452
453// -----------------------------------------------------------------------------
454//  Wait/Notify/NotifyAll
455// NOTE: must use heavy weight monitor to handle wait()
456int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
457  if (UseBiasedLocking) {
458    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
459    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
460  }
461  if (millis < 0) {
462    TEVENT(wait - throw IAX);
463    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
464  }
465  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
466                                                       obj(),
467                                                       inflate_cause_wait);
468
469  DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
470  monitor->wait(millis, true, THREAD);
471
472  // This dummy call is in place to get around dtrace bug 6254741.  Once
473  // that's fixed we can uncomment the following line, remove the call
474  // and change this function back into a "void" func.
475  // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
476  return dtrace_waited_probe(monitor, obj, THREAD);
477}
478
479void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
480  if (UseBiasedLocking) {
481    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
482    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
483  }
484  if (millis < 0) {
485    TEVENT(wait - throw IAX);
486    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
487  }
488  ObjectSynchronizer::inflate(THREAD,
489                              obj(),
490                              inflate_cause_wait)->wait(millis, false, THREAD);
491}
492
493void ObjectSynchronizer::notify(Handle obj, TRAPS) {
494  if (UseBiasedLocking) {
495    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
496    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
497  }
498
499  markOop mark = obj->mark();
500  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
501    return;
502  }
503  ObjectSynchronizer::inflate(THREAD,
504                              obj(),
505                              inflate_cause_notify)->notify(THREAD);
506}
507
508// NOTE: see comment of notify()
509void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
510  if (UseBiasedLocking) {
511    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
512    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
513  }
514
515  markOop mark = obj->mark();
516  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
517    return;
518  }
519  ObjectSynchronizer::inflate(THREAD,
520                              obj(),
521                              inflate_cause_notify)->notifyAll(THREAD);
522}
523
524// -----------------------------------------------------------------------------
525// Hash Code handling
526//
527// Performance concern:
528// OrderAccess::storestore() calls release() which at one time stored 0
529// into the global volatile OrderAccess::dummy variable. This store was
530// unnecessary for correctness. Many threads storing into a common location
531// causes considerable cache migration or "sloshing" on large SMP systems.
532// As such, I avoided using OrderAccess::storestore(). In some cases
533// OrderAccess::fence() -- which incurs local latency on the executing
534// processor -- is a better choice as it scales on SMP systems.
535//
536// See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
537// a discussion of coherency costs. Note that all our current reference
538// platforms provide strong ST-ST order, so the issue is moot on IA32,
539// x64, and SPARC.
540//
541// As a general policy we use "volatile" to control compiler-based reordering
542// and explicit fences (barriers) to control for architectural reordering
543// performed by the CPU(s) or platform.
544
545struct SharedGlobals {
546  char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
547  // These are highly shared mostly-read variables.
548  // To avoid false-sharing they need to be the sole occupants of a cache line.
549  volatile int stwRandom;
550  volatile int stwCycle;
551  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
552  // Hot RW variable -- Sequester to avoid false-sharing
553  volatile int hcSequence;
554  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
555};
556
557static SharedGlobals GVars;
558static int MonitorScavengeThreshold = 1000000;
559static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
560
561static markOop ReadStableMark(oop obj) {
562  markOop mark = obj->mark();
563  if (!mark->is_being_inflated()) {
564    return mark;       // normal fast-path return
565  }
566
567  int its = 0;
568  for (;;) {
569    markOop mark = obj->mark();
570    if (!mark->is_being_inflated()) {
571      return mark;    // normal fast-path return
572    }
573
574    // The object is being inflated by some other thread.
575    // The caller of ReadStableMark() must wait for inflation to complete.
576    // Avoid live-lock
577    // TODO: consider calling SafepointSynchronize::do_call_back() while
578    // spinning to see if there's a safepoint pending.  If so, immediately
579    // yielding or blocking would be appropriate.  Avoid spinning while
580    // there is a safepoint pending.
581    // TODO: add inflation contention performance counters.
582    // TODO: restrict the aggregate number of spinners.
583
584    ++its;
585    if (its > 10000 || !os::is_MP()) {
586      if (its & 1) {
587        os::naked_yield();
588        TEVENT(Inflate: INFLATING - yield);
589      } else {
590        // Note that the following code attenuates the livelock problem but is not
591        // a complete remedy.  A more complete solution would require that the inflating
592        // thread hold the associated inflation lock.  The following code simply restricts
593        // the number of spinners to at most one.  We'll have N-2 threads blocked
594        // on the inflationlock, 1 thread holding the inflation lock and using
595        // a yield/park strategy, and 1 thread in the midst of inflation.
596        // A more refined approach would be to change the encoding of INFLATING
597        // to allow encapsulation of a native thread pointer.  Threads waiting for
598        // inflation to complete would use CAS to push themselves onto a singly linked
599        // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
600        // and calling park().  When inflation was complete the thread that accomplished inflation
601        // would detach the list and set the markword to inflated with a single CAS and
602        // then for each thread on the list, set the flag and unpark() the thread.
603        // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
604        // wakes at most one thread whereas we need to wake the entire list.
605        int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
606        int YieldThenBlock = 0;
607        assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
608        assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
609        Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
610        while (obj->mark() == markOopDesc::INFLATING()) {
611          // Beware: NakedYield() is advisory and has almost no effect on some platforms
612          // so we periodically call Self->_ParkEvent->park(1).
613          // We use a mixed spin/yield/block mechanism.
614          if ((YieldThenBlock++) >= 16) {
615            Thread::current()->_ParkEvent->park(1);
616          } else {
617            os::naked_yield();
618          }
619        }
620        Thread::muxRelease(gInflationLocks + ix);
621        TEVENT(Inflate: INFLATING - yield/park);
622      }
623    } else {
624      SpinPause();       // SMP-polite spinning
625    }
626  }
627}
628
629// hashCode() generation :
630//
631// Possibilities:
632// * MD5Digest of {obj,stwRandom}
633// * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
634// * A DES- or AES-style SBox[] mechanism
635// * One of the Phi-based schemes, such as:
636//   2654435761 = 2^32 * Phi (golden ratio)
637//   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
638// * A variation of Marsaglia's shift-xor RNG scheme.
639// * (obj ^ stwRandom) is appealing, but can result
640//   in undesirable regularity in the hashCode values of adjacent objects
641//   (objects allocated back-to-back, in particular).  This could potentially
642//   result in hashtable collisions and reduced hashtable efficiency.
643//   There are simple ways to "diffuse" the middle address bits over the
644//   generated hashCode values:
645
646static inline intptr_t get_next_hash(Thread * Self, oop obj) {
647  intptr_t value = 0;
648  if (hashCode == 0) {
649    // This form uses an unguarded global Park-Miller RNG,
650    // so it's possible for two threads to race and generate the same RNG.
651    // On MP system we'll have lots of RW access to a global, so the
652    // mechanism induces lots of coherency traffic.
653    value = os::random();
654  } else if (hashCode == 1) {
655    // This variation has the property of being stable (idempotent)
656    // between STW operations.  This can be useful in some of the 1-0
657    // synchronization schemes.
658    intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
659    value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
660  } else if (hashCode == 2) {
661    value = 1;            // for sensitivity testing
662  } else if (hashCode == 3) {
663    value = ++GVars.hcSequence;
664  } else if (hashCode == 4) {
665    value = cast_from_oop<intptr_t>(obj);
666  } else {
667    // Marsaglia's xor-shift scheme with thread-specific state
668    // This is probably the best overall implementation -- we'll
669    // likely make this the default in future releases.
670    unsigned t = Self->_hashStateX;
671    t ^= (t << 11);
672    Self->_hashStateX = Self->_hashStateY;
673    Self->_hashStateY = Self->_hashStateZ;
674    Self->_hashStateZ = Self->_hashStateW;
675    unsigned v = Self->_hashStateW;
676    v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
677    Self->_hashStateW = v;
678    value = v;
679  }
680
681  value &= markOopDesc::hash_mask;
682  if (value == 0) value = 0xBAD;
683  assert(value != markOopDesc::no_hash, "invariant");
684  TEVENT(hashCode: GENERATE);
685  return value;
686}
687
688intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
689  if (UseBiasedLocking) {
690    // NOTE: many places throughout the JVM do not expect a safepoint
691    // to be taken here, in particular most operations on perm gen
692    // objects. However, we only ever bias Java instances and all of
693    // the call sites of identity_hash that might revoke biases have
694    // been checked to make sure they can handle a safepoint. The
695    // added check of the bias pattern is to avoid useless calls to
696    // thread-local storage.
697    if (obj->mark()->has_bias_pattern()) {
698      // Handle for oop obj in case of STW safepoint
699      Handle hobj(Self, obj);
700      // Relaxing assertion for bug 6320749.
701      assert(Universe::verify_in_progress() ||
702             !SafepointSynchronize::is_at_safepoint(),
703             "biases should not be seen by VM thread here");
704      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
705      obj = hobj();
706      assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
707    }
708  }
709
710  // hashCode() is a heap mutator ...
711  // Relaxing assertion for bug 6320749.
712  assert(Universe::verify_in_progress() || DumpSharedSpaces ||
713         !SafepointSynchronize::is_at_safepoint(), "invariant");
714  assert(Universe::verify_in_progress() || DumpSharedSpaces ||
715         Self->is_Java_thread() , "invariant");
716  assert(Universe::verify_in_progress() || DumpSharedSpaces ||
717         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
718
719  ObjectMonitor* monitor = NULL;
720  markOop temp, test;
721  intptr_t hash;
722  markOop mark = ReadStableMark(obj);
723
724  // object should remain ineligible for biased locking
725  assert(!mark->has_bias_pattern(), "invariant");
726
727  if (mark->is_neutral()) {
728    hash = mark->hash();              // this is a normal header
729    if (hash) {                       // if it has hash, just return it
730      return hash;
731    }
732    hash = get_next_hash(Self, obj);  // allocate a new hash code
733    temp = mark->copy_set_hash(hash); // merge the hash code into header
734    // use (machine word version) atomic operation to install the hash
735    test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
736    if (test == mark) {
737      return hash;
738    }
739    // If atomic operation failed, we must inflate the header
740    // into heavy weight monitor. We could add more code here
741    // for fast path, but it does not worth the complexity.
742  } else if (mark->has_monitor()) {
743    monitor = mark->monitor();
744    temp = monitor->header();
745    assert(temp->is_neutral(), "invariant");
746    hash = temp->hash();
747    if (hash) {
748      return hash;
749    }
750    // Skip to the following code to reduce code size
751  } else if (Self->is_lock_owned((address)mark->locker())) {
752    temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
753    assert(temp->is_neutral(), "invariant");
754    hash = temp->hash();              // by current thread, check if the displaced
755    if (hash) {                       // header contains hash code
756      return hash;
757    }
758    // WARNING:
759    //   The displaced header is strictly immutable.
760    // It can NOT be changed in ANY cases. So we have
761    // to inflate the header into heavyweight monitor
762    // even the current thread owns the lock. The reason
763    // is the BasicLock (stack slot) will be asynchronously
764    // read by other threads during the inflate() function.
765    // Any change to stack may not propagate to other threads
766    // correctly.
767  }
768
769  // Inflate the monitor to set hash code
770  monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
771  // Load displaced header and check it has hash code
772  mark = monitor->header();
773  assert(mark->is_neutral(), "invariant");
774  hash = mark->hash();
775  if (hash == 0) {
776    hash = get_next_hash(Self, obj);
777    temp = mark->copy_set_hash(hash); // merge hash code into header
778    assert(temp->is_neutral(), "invariant");
779    test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
780    if (test != mark) {
781      // The only update to the header in the monitor (outside GC)
782      // is install the hash code. If someone add new usage of
783      // displaced header, please update this code
784      hash = test->hash();
785      assert(test->is_neutral(), "invariant");
786      assert(hash != 0, "Trivial unexpected object/monitor header usage.");
787    }
788  }
789  // We finally get the hash
790  return hash;
791}
792
793// Deprecated -- use FastHashCode() instead.
794
795intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
796  return FastHashCode(Thread::current(), obj());
797}
798
799
800bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
801                                                   Handle h_obj) {
802  if (UseBiasedLocking) {
803    BiasedLocking::revoke_and_rebias(h_obj, false, thread);
804    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
805  }
806
807  assert(thread == JavaThread::current(), "Can only be called on current thread");
808  oop obj = h_obj();
809
810  markOop mark = ReadStableMark(obj);
811
812  // Uncontended case, header points to stack
813  if (mark->has_locker()) {
814    return thread->is_lock_owned((address)mark->locker());
815  }
816  // Contended case, header points to ObjectMonitor (tagged pointer)
817  if (mark->has_monitor()) {
818    ObjectMonitor* monitor = mark->monitor();
819    return monitor->is_entered(thread) != 0;
820  }
821  // Unlocked case, header in place
822  assert(mark->is_neutral(), "sanity check");
823  return false;
824}
825
826// Be aware of this method could revoke bias of the lock object.
827// This method queries the ownership of the lock handle specified by 'h_obj'.
828// If the current thread owns the lock, it returns owner_self. If no
829// thread owns the lock, it returns owner_none. Otherwise, it will return
830// owner_other.
831ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
832(JavaThread *self, Handle h_obj) {
833  // The caller must beware this method can revoke bias, and
834  // revocation can result in a safepoint.
835  assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
836  assert(self->thread_state() != _thread_blocked, "invariant");
837
838  // Possible mark states: neutral, biased, stack-locked, inflated
839
840  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
841    // CASE: biased
842    BiasedLocking::revoke_and_rebias(h_obj, false, self);
843    assert(!h_obj->mark()->has_bias_pattern(),
844           "biases should be revoked by now");
845  }
846
847  assert(self == JavaThread::current(), "Can only be called on current thread");
848  oop obj = h_obj();
849  markOop mark = ReadStableMark(obj);
850
851  // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
852  if (mark->has_locker()) {
853    return self->is_lock_owned((address)mark->locker()) ?
854      owner_self : owner_other;
855  }
856
857  // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
858  // The Object:ObjectMonitor relationship is stable as long as we're
859  // not at a safepoint.
860  if (mark->has_monitor()) {
861    void * owner = mark->monitor()->_owner;
862    if (owner == NULL) return owner_none;
863    return (owner == self ||
864            self->is_lock_owned((address)owner)) ? owner_self : owner_other;
865  }
866
867  // CASE: neutral
868  assert(mark->is_neutral(), "sanity check");
869  return owner_none;           // it's unlocked
870}
871
872// FIXME: jvmti should call this
873JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
874  if (UseBiasedLocking) {
875    if (SafepointSynchronize::is_at_safepoint()) {
876      BiasedLocking::revoke_at_safepoint(h_obj);
877    } else {
878      BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
879    }
880    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
881  }
882
883  oop obj = h_obj();
884  address owner = NULL;
885
886  markOop mark = ReadStableMark(obj);
887
888  // Uncontended case, header points to stack
889  if (mark->has_locker()) {
890    owner = (address) mark->locker();
891  }
892
893  // Contended case, header points to ObjectMonitor (tagged pointer)
894  if (mark->has_monitor()) {
895    ObjectMonitor* monitor = mark->monitor();
896    assert(monitor != NULL, "monitor should be non-null");
897    owner = (address) monitor->owner();
898  }
899
900  if (owner != NULL) {
901    // owning_thread_from_monitor_owner() may also return NULL here
902    return Threads::owning_thread_from_monitor_owner(owner, doLock);
903  }
904
905  // Unlocked case, header in place
906  // Cannot have assertion since this object may have been
907  // locked by another thread when reaching here.
908  // assert(mark->is_neutral(), "sanity check");
909
910  return NULL;
911}
912
913// Visitors ...
914
915void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
916  PaddedEnd<ObjectMonitor> * block =
917    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
918  while (block != NULL) {
919    assert(block->object() == CHAINMARKER, "must be a block header");
920    for (int i = _BLOCKSIZE - 1; i > 0; i--) {
921      ObjectMonitor* mid = (ObjectMonitor *)(block + i);
922      oop object = (oop)mid->object();
923      if (object != NULL) {
924        closure->do_monitor(mid);
925      }
926    }
927    block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
928  }
929}
930
931// Get the next block in the block list.
932static inline ObjectMonitor* next(ObjectMonitor* block) {
933  assert(block->object() == CHAINMARKER, "must be a block header");
934  block = block->FreeNext;
935  assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
936  return block;
937}
938
939
940void ObjectSynchronizer::oops_do(OopClosure* f) {
941  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
942  PaddedEnd<ObjectMonitor> * block =
943    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
944  for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
945    assert(block->object() == CHAINMARKER, "must be a block header");
946    for (int i = 1; i < _BLOCKSIZE; i++) {
947      ObjectMonitor* mid = (ObjectMonitor *)&block[i];
948      if (mid->object() != NULL) {
949        f->do_oop((oop*)mid->object_addr());
950      }
951    }
952  }
953}
954
955
956// -----------------------------------------------------------------------------
957// ObjectMonitor Lifecycle
958// -----------------------
959// Inflation unlinks monitors from the global gFreeList and
960// associates them with objects.  Deflation -- which occurs at
961// STW-time -- disassociates idle monitors from objects.  Such
962// scavenged monitors are returned to the gFreeList.
963//
964// The global list is protected by gListLock.  All the critical sections
965// are short and operate in constant-time.
966//
967// ObjectMonitors reside in type-stable memory (TSM) and are immortal.
968//
969// Lifecycle:
970// --   unassigned and on the global free list
971// --   unassigned and on a thread's private omFreeList
972// --   assigned to an object.  The object is inflated and the mark refers
973//      to the objectmonitor.
974
975
976// Constraining monitor pool growth via MonitorBound ...
977//
978// The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
979// the rate of scavenging is driven primarily by GC.  As such,  we can find
980// an inordinate number of monitors in circulation.
981// To avoid that scenario we can artificially induce a STW safepoint
982// if the pool appears to be growing past some reasonable bound.
983// Generally we favor time in space-time tradeoffs, but as there's no
984// natural back-pressure on the # of extant monitors we need to impose some
985// type of limit.  Beware that if MonitorBound is set to too low a value
986// we could just loop. In addition, if MonitorBound is set to a low value
987// we'll incur more safepoints, which are harmful to performance.
988// See also: GuaranteedSafepointInterval
989//
990// The current implementation uses asynchronous VM operations.
991
992static void InduceScavenge(Thread * Self, const char * Whence) {
993  // Induce STW safepoint to trim monitors
994  // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
995  // More precisely, trigger an asynchronous STW safepoint as the number
996  // of active monitors passes the specified threshold.
997  // TODO: assert thread state is reasonable
998
999  if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1000    if (ObjectMonitor::Knob_Verbose) {
1001      tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)",
1002                    Whence, ForceMonitorScavenge) ;
1003      tty->flush();
1004    }
1005    // Induce a 'null' safepoint to scavenge monitors
1006    // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1007    // to the VMthread and have a lifespan longer than that of this activation record.
1008    // The VMThread will delete the op when completed.
1009    VMThread::execute(new VM_ForceAsyncSafepoint());
1010
1011    if (ObjectMonitor::Knob_Verbose) {
1012      tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)",
1013                    Whence, ForceMonitorScavenge) ;
1014      tty->flush();
1015    }
1016  }
1017}
1018
1019void ObjectSynchronizer::verifyInUse(Thread *Self) {
1020  ObjectMonitor* mid;
1021  int in_use_tally = 0;
1022  for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
1023    in_use_tally++;
1024  }
1025  assert(in_use_tally == Self->omInUseCount, "in-use count off");
1026
1027  int free_tally = 0;
1028  for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
1029    free_tally++;
1030  }
1031  assert(free_tally == Self->omFreeCount, "free count off");
1032}
1033
1034ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1035  // A large MAXPRIVATE value reduces both list lock contention
1036  // and list coherency traffic, but also tends to increase the
1037  // number of objectMonitors in circulation as well as the STW
1038  // scavenge costs.  As usual, we lean toward time in space-time
1039  // tradeoffs.
1040  const int MAXPRIVATE = 1024;
1041  for (;;) {
1042    ObjectMonitor * m;
1043
1044    // 1: try to allocate from the thread's local omFreeList.
1045    // Threads will attempt to allocate first from their local list, then
1046    // from the global list, and only after those attempts fail will the thread
1047    // attempt to instantiate new monitors.   Thread-local free lists take
1048    // heat off the gListLock and improve allocation latency, as well as reducing
1049    // coherency traffic on the shared global list.
1050    m = Self->omFreeList;
1051    if (m != NULL) {
1052      Self->omFreeList = m->FreeNext;
1053      Self->omFreeCount--;
1054      // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1055      guarantee(m->object() == NULL, "invariant");
1056      if (MonitorInUseLists) {
1057        m->FreeNext = Self->omInUseList;
1058        Self->omInUseList = m;
1059        Self->omInUseCount++;
1060        if (ObjectMonitor::Knob_VerifyInUse) {
1061          verifyInUse(Self);
1062        }
1063      } else {
1064        m->FreeNext = NULL;
1065      }
1066      return m;
1067    }
1068
1069    // 2: try to allocate from the global gFreeList
1070    // CONSIDER: use muxTry() instead of muxAcquire().
1071    // If the muxTry() fails then drop immediately into case 3.
1072    // If we're using thread-local free lists then try
1073    // to reprovision the caller's free list.
1074    if (gFreeList != NULL) {
1075      // Reprovision the thread's omFreeList.
1076      // Use bulk transfers to reduce the allocation rate and heat
1077      // on various locks.
1078      Thread::muxAcquire(&gListLock, "omAlloc");
1079      for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1080        gMonitorFreeCount--;
1081        ObjectMonitor * take = gFreeList;
1082        gFreeList = take->FreeNext;
1083        guarantee(take->object() == NULL, "invariant");
1084        guarantee(!take->is_busy(), "invariant");
1085        take->Recycle();
1086        omRelease(Self, take, false);
1087      }
1088      Thread::muxRelease(&gListLock);
1089      Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1090      if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1091      TEVENT(omFirst - reprovision);
1092
1093      const int mx = MonitorBound;
1094      if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1095        // We can't safely induce a STW safepoint from omAlloc() as our thread
1096        // state may not be appropriate for such activities and callers may hold
1097        // naked oops, so instead we defer the action.
1098        InduceScavenge(Self, "omAlloc");
1099      }
1100      continue;
1101    }
1102
1103    // 3: allocate a block of new ObjectMonitors
1104    // Both the local and global free lists are empty -- resort to malloc().
1105    // In the current implementation objectMonitors are TSM - immortal.
1106    // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1107    // each ObjectMonitor to start at the beginning of a cache line,
1108    // so we use align_size_up().
1109    // A better solution would be to use C++ placement-new.
1110    // BEWARE: As it stands currently, we don't run the ctors!
1111    assert(_BLOCKSIZE > 1, "invariant");
1112    size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
1113    PaddedEnd<ObjectMonitor> * temp;
1114    size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1115    void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
1116                                                      mtInternal);
1117    temp = (PaddedEnd<ObjectMonitor> *)
1118             align_size_up((intptr_t)real_malloc_addr,
1119                           DEFAULT_CACHE_LINE_SIZE);
1120
1121    // NOTE: (almost) no way to recover if allocation failed.
1122    // We might be able to induce a STW safepoint and scavenge enough
1123    // objectMonitors to permit progress.
1124    if (temp == NULL) {
1125      vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1126                            "Allocate ObjectMonitors");
1127    }
1128    (void)memset((void *) temp, 0, neededsize);
1129
1130    // Format the block.
1131    // initialize the linked list, each monitor points to its next
1132    // forming the single linked free list, the very first monitor
1133    // will points to next block, which forms the block list.
1134    // The trick of using the 1st element in the block as gBlockList
1135    // linkage should be reconsidered.  A better implementation would
1136    // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1137
1138    for (int i = 1; i < _BLOCKSIZE; i++) {
1139      temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
1140    }
1141
1142    // terminate the last monitor as the end of list
1143    temp[_BLOCKSIZE - 1].FreeNext = NULL;
1144
1145    // Element [0] is reserved for global list linkage
1146    temp[0].set_object(CHAINMARKER);
1147
1148    // Consider carving out this thread's current request from the
1149    // block in hand.  This avoids some lock traffic and redundant
1150    // list activity.
1151
1152    // Acquire the gListLock to manipulate gBlockList and gFreeList.
1153    // An Oyama-Taura-Yonezawa scheme might be more efficient.
1154    Thread::muxAcquire(&gListLock, "omAlloc [2]");
1155    gMonitorPopulation += _BLOCKSIZE-1;
1156    gMonitorFreeCount += _BLOCKSIZE-1;
1157
1158    // Add the new block to the list of extant blocks (gBlockList).
1159    // The very first objectMonitor in a block is reserved and dedicated.
1160    // It serves as blocklist "next" linkage.
1161    temp[0].FreeNext = gBlockList;
1162    // There are lock-free uses of gBlockList so make sure that
1163    // the previous stores happen before we update gBlockList.
1164    OrderAccess::release_store_ptr(&gBlockList, temp);
1165
1166    // Add the new string of objectMonitors to the global free list
1167    temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1168    gFreeList = temp + 1;
1169    Thread::muxRelease(&gListLock);
1170    TEVENT(Allocate block of monitors);
1171  }
1172}
1173
1174// Place "m" on the caller's private per-thread omFreeList.
1175// In practice there's no need to clamp or limit the number of
1176// monitors on a thread's omFreeList as the only time we'll call
1177// omRelease is to return a monitor to the free list after a CAS
1178// attempt failed.  This doesn't allow unbounded #s of monitors to
1179// accumulate on a thread's free list.
1180//
1181// Key constraint: all ObjectMonitors on a thread's free list and the global
1182// free list must have their object field set to null. This prevents the
1183// scavenger -- deflate_idle_monitors -- from reclaiming them.
1184
1185void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1186                                   bool fromPerThreadAlloc) {
1187  guarantee(m->object() == NULL, "invariant");
1188  guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1189  // Remove from omInUseList
1190  if (MonitorInUseLists && fromPerThreadAlloc) {
1191    ObjectMonitor* cur_mid_in_use = NULL;
1192    bool extracted = false;
1193    for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1194      if (m == mid) {
1195        // extract from per-thread in-use list
1196        if (mid == Self->omInUseList) {
1197          Self->omInUseList = mid->FreeNext;
1198        } else if (cur_mid_in_use != NULL) {
1199          cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1200        }
1201        extracted = true;
1202        Self->omInUseCount--;
1203        if (ObjectMonitor::Knob_VerifyInUse) {
1204          verifyInUse(Self);
1205        }
1206        break;
1207      }
1208    }
1209    assert(extracted, "Should have extracted from in-use list");
1210  }
1211
1212  // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1213  m->FreeNext = Self->omFreeList;
1214  Self->omFreeList = m;
1215  Self->omFreeCount++;
1216}
1217
1218// Return the monitors of a moribund thread's local free list to
1219// the global free list.  Typically a thread calls omFlush() when
1220// it's dying.  We could also consider having the VM thread steal
1221// monitors from threads that have not run java code over a few
1222// consecutive STW safepoints.  Relatedly, we might decay
1223// omFreeProvision at STW safepoints.
1224//
1225// Also return the monitors of a moribund thread's omInUseList to
1226// a global gOmInUseList under the global list lock so these
1227// will continue to be scanned.
1228//
1229// We currently call omFlush() from the Thread:: dtor _after the thread
1230// has been excised from the thread list and is no longer a mutator.
1231// That means that omFlush() can run concurrently with a safepoint and
1232// the scavenge operator.  Calling omFlush() from JavaThread::exit() might
1233// be a better choice as we could safely reason that that the JVM is
1234// not at a safepoint at the time of the call, and thus there could
1235// be not inopportune interleavings between omFlush() and the scavenge
1236// operator.
1237
1238void ObjectSynchronizer::omFlush(Thread * Self) {
1239  ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
1240  Self->omFreeList = NULL;
1241  ObjectMonitor * tail = NULL;
1242  int tally = 0;
1243  if (list != NULL) {
1244    ObjectMonitor * s;
1245    // The thread is going away, the per-thread free monitors
1246    // are freed via set_owner(NULL)
1247    // Link them to tail, which will be linked into the global free list
1248    // gFreeList below, under the gListLock
1249    for (s = list; s != NULL; s = s->FreeNext) {
1250      tally++;
1251      tail = s;
1252      guarantee(s->object() == NULL, "invariant");
1253      guarantee(!s->is_busy(), "invariant");
1254      s->set_owner(NULL);   // redundant but good hygiene
1255      TEVENT(omFlush - Move one);
1256    }
1257    guarantee(tail != NULL && list != NULL, "invariant");
1258  }
1259
1260  ObjectMonitor * inUseList = Self->omInUseList;
1261  ObjectMonitor * inUseTail = NULL;
1262  int inUseTally = 0;
1263  if (inUseList != NULL) {
1264    Self->omInUseList = NULL;
1265    ObjectMonitor *cur_om;
1266    // The thread is going away, however the omInUseList inflated
1267    // monitors may still be in-use by other threads.
1268    // Link them to inUseTail, which will be linked into the global in-use list
1269    // gOmInUseList below, under the gListLock
1270    for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1271      inUseTail = cur_om;
1272      inUseTally++;
1273    }
1274    assert(Self->omInUseCount == inUseTally, "in-use count off");
1275    Self->omInUseCount = 0;
1276    guarantee(inUseTail != NULL && inUseList != NULL, "invariant");
1277  }
1278
1279  Thread::muxAcquire(&gListLock, "omFlush");
1280  if (tail != NULL) {
1281    tail->FreeNext = gFreeList;
1282    gFreeList = list;
1283    gMonitorFreeCount += tally;
1284  }
1285
1286  if (inUseTail != NULL) {
1287    inUseTail->FreeNext = gOmInUseList;
1288    gOmInUseList = inUseList;
1289    gOmInUseCount += inUseTally;
1290  }
1291
1292  Thread::muxRelease(&gListLock);
1293  TEVENT(omFlush);
1294}
1295
1296// Fast path code shared by multiple functions
1297ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1298  markOop mark = obj->mark();
1299  if (mark->has_monitor()) {
1300    assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1301    assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1302    return mark->monitor();
1303  }
1304  return ObjectSynchronizer::inflate(Thread::current(),
1305                                     obj,
1306                                     inflate_cause_vm_internal);
1307}
1308
1309ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1310                                                     oop object,
1311                                                     const InflateCause cause) {
1312
1313  // Inflate mutates the heap ...
1314  // Relaxing assertion for bug 6320749.
1315  assert(Universe::verify_in_progress() ||
1316         !SafepointSynchronize::is_at_safepoint(), "invariant");
1317
1318  EventJavaMonitorInflate event;
1319
1320  for (;;) {
1321    const markOop mark = object->mark();
1322    assert(!mark->has_bias_pattern(), "invariant");
1323
1324    // The mark can be in one of the following states:
1325    // *  Inflated     - just return
1326    // *  Stack-locked - coerce it to inflated
1327    // *  INFLATING    - busy wait for conversion to complete
1328    // *  Neutral      - aggressively inflate the object.
1329    // *  BIASED       - Illegal.  We should never see this
1330
1331    // CASE: inflated
1332    if (mark->has_monitor()) {
1333      ObjectMonitor * inf = mark->monitor();
1334      assert(inf->header()->is_neutral(), "invariant");
1335      assert(inf->object() == object, "invariant");
1336      assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1337      event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
1338      return inf;
1339    }
1340
1341    // CASE: inflation in progress - inflating over a stack-lock.
1342    // Some other thread is converting from stack-locked to inflated.
1343    // Only that thread can complete inflation -- other threads must wait.
1344    // The INFLATING value is transient.
1345    // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1346    // We could always eliminate polling by parking the thread on some auxiliary list.
1347    if (mark == markOopDesc::INFLATING()) {
1348      TEVENT(Inflate: spin while INFLATING);
1349      ReadStableMark(object);
1350      continue;
1351    }
1352
1353    // CASE: stack-locked
1354    // Could be stack-locked either by this thread or by some other thread.
1355    //
1356    // Note that we allocate the objectmonitor speculatively, _before_ attempting
1357    // to install INFLATING into the mark word.  We originally installed INFLATING,
1358    // allocated the objectmonitor, and then finally STed the address of the
1359    // objectmonitor into the mark.  This was correct, but artificially lengthened
1360    // the interval in which INFLATED appeared in the mark, thus increasing
1361    // the odds of inflation contention.
1362    //
1363    // We now use per-thread private objectmonitor free lists.
1364    // These list are reprovisioned from the global free list outside the
1365    // critical INFLATING...ST interval.  A thread can transfer
1366    // multiple objectmonitors en-mass from the global free list to its local free list.
1367    // This reduces coherency traffic and lock contention on the global free list.
1368    // Using such local free lists, it doesn't matter if the omAlloc() call appears
1369    // before or after the CAS(INFLATING) operation.
1370    // See the comments in omAlloc().
1371
1372    if (mark->has_locker()) {
1373      ObjectMonitor * m = omAlloc(Self);
1374      // Optimistically prepare the objectmonitor - anticipate successful CAS
1375      // We do this before the CAS in order to minimize the length of time
1376      // in which INFLATING appears in the mark.
1377      m->Recycle();
1378      m->_Responsible  = NULL;
1379      m->_recursions   = 0;
1380      m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1381
1382      markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
1383      if (cmp != mark) {
1384        omRelease(Self, m, true);
1385        continue;       // Interference -- just retry
1386      }
1387
1388      // We've successfully installed INFLATING (0) into the mark-word.
1389      // This is the only case where 0 will appear in a mark-word.
1390      // Only the singular thread that successfully swings the mark-word
1391      // to 0 can perform (or more precisely, complete) inflation.
1392      //
1393      // Why do we CAS a 0 into the mark-word instead of just CASing the
1394      // mark-word from the stack-locked value directly to the new inflated state?
1395      // Consider what happens when a thread unlocks a stack-locked object.
1396      // It attempts to use CAS to swing the displaced header value from the
1397      // on-stack basiclock back into the object header.  Recall also that the
1398      // header value (hashcode, etc) can reside in (a) the object header, or
1399      // (b) a displaced header associated with the stack-lock, or (c) a displaced
1400      // header in an objectMonitor.  The inflate() routine must copy the header
1401      // value from the basiclock on the owner's stack to the objectMonitor, all
1402      // the while preserving the hashCode stability invariants.  If the owner
1403      // decides to release the lock while the value is 0, the unlock will fail
1404      // and control will eventually pass from slow_exit() to inflate.  The owner
1405      // will then spin, waiting for the 0 value to disappear.   Put another way,
1406      // the 0 causes the owner to stall if the owner happens to try to
1407      // drop the lock (restoring the header from the basiclock to the object)
1408      // while inflation is in-progress.  This protocol avoids races that might
1409      // would otherwise permit hashCode values to change or "flicker" for an object.
1410      // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1411      // 0 serves as a "BUSY" inflate-in-progress indicator.
1412
1413
1414      // fetch the displaced mark from the owner's stack.
1415      // The owner can't die or unwind past the lock while our INFLATING
1416      // object is in the mark.  Furthermore the owner can't complete
1417      // an unlock on the object, either.
1418      markOop dmw = mark->displaced_mark_helper();
1419      assert(dmw->is_neutral(), "invariant");
1420
1421      // Setup monitor fields to proper values -- prepare the monitor
1422      m->set_header(dmw);
1423
1424      // Optimization: if the mark->locker stack address is associated
1425      // with this thread we could simply set m->_owner = Self.
1426      // Note that a thread can inflate an object
1427      // that it has stack-locked -- as might happen in wait() -- directly
1428      // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1429      m->set_owner(mark->locker());
1430      m->set_object(object);
1431      // TODO-FIXME: assert BasicLock->dhw != 0.
1432
1433      // Must preserve store ordering. The monitor state must
1434      // be stable at the time of publishing the monitor address.
1435      guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1436      object->release_set_mark(markOopDesc::encode(m));
1437
1438      // Hopefully the performance counters are allocated on distinct cache lines
1439      // to avoid false sharing on MP systems ...
1440      OM_PERFDATA_OP(Inflations, inc());
1441      TEVENT(Inflate: overwrite stacklock);
1442      if (log_is_enabled(Debug, monitorinflation)) {
1443        if (object->is_instance()) {
1444          ResourceMark rm;
1445          log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1446                                      p2i(object), p2i(object->mark()),
1447                                      object->klass()->external_name());
1448        }
1449      }
1450      if (event.should_commit()) {
1451        post_monitor_inflate_event(event, object, cause);
1452      }
1453      return m;
1454    }
1455
1456    // CASE: neutral
1457    // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1458    // If we know we're inflating for entry it's better to inflate by swinging a
1459    // pre-locked objectMonitor pointer into the object header.   A successful
1460    // CAS inflates the object *and* confers ownership to the inflating thread.
1461    // In the current implementation we use a 2-step mechanism where we CAS()
1462    // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1463    // An inflateTry() method that we could call from fast_enter() and slow_enter()
1464    // would be useful.
1465
1466    assert(mark->is_neutral(), "invariant");
1467    ObjectMonitor * m = omAlloc(Self);
1468    // prepare m for installation - set monitor to initial state
1469    m->Recycle();
1470    m->set_header(mark);
1471    m->set_owner(NULL);
1472    m->set_object(object);
1473    m->_recursions   = 0;
1474    m->_Responsible  = NULL;
1475    m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1476
1477    if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1478      m->set_object(NULL);
1479      m->set_owner(NULL);
1480      m->Recycle();
1481      omRelease(Self, m, true);
1482      m = NULL;
1483      continue;
1484      // interference - the markword changed - just retry.
1485      // The state-transitions are one-way, so there's no chance of
1486      // live-lock -- "Inflated" is an absorbing state.
1487    }
1488
1489    // Hopefully the performance counters are allocated on distinct
1490    // cache lines to avoid false sharing on MP systems ...
1491    OM_PERFDATA_OP(Inflations, inc());
1492    TEVENT(Inflate: overwrite neutral);
1493    if (log_is_enabled(Debug, monitorinflation)) {
1494      if (object->is_instance()) {
1495        ResourceMark rm;
1496        log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1497                                    p2i(object), p2i(object->mark()),
1498                                    object->klass()->external_name());
1499      }
1500    }
1501    if (event.should_commit()) {
1502      post_monitor_inflate_event(event, object, cause);
1503    }
1504    return m;
1505  }
1506}
1507
1508
1509// Deflate_idle_monitors() is called at all safepoints, immediately
1510// after all mutators are stopped, but before any objects have moved.
1511// It traverses the list of known monitors, deflating where possible.
1512// The scavenged monitor are returned to the monitor free list.
1513//
1514// Beware that we scavenge at *every* stop-the-world point.
1515// Having a large number of monitors in-circulation negatively
1516// impacts the performance of some applications (e.g., PointBase).
1517// Broadly, we want to minimize the # of monitors in circulation.
1518//
1519// We have added a flag, MonitorInUseLists, which creates a list
1520// of active monitors for each thread. deflate_idle_monitors()
1521// only scans the per-thread in-use lists. omAlloc() puts all
1522// assigned monitors on the per-thread list. deflate_idle_monitors()
1523// returns the non-busy monitors to the global free list.
1524// When a thread dies, omFlush() adds the list of active monitors for
1525// that thread to a global gOmInUseList acquiring the
1526// global list lock. deflate_idle_monitors() acquires the global
1527// list lock to scan for non-busy monitors to the global free list.
1528// An alternative could have used a single global in-use list. The
1529// downside would have been the additional cost of acquiring the global list lock
1530// for every omAlloc().
1531//
1532// Perversely, the heap size -- and thus the STW safepoint rate --
1533// typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1534// which in turn can mean large(r) numbers of objectmonitors in circulation.
1535// This is an unfortunate aspect of this design.
1536
1537enum ManifestConstants {
1538  ClearResponsibleAtSTW = 0
1539};
1540
1541// Deflate a single monitor if not in-use
1542// Return true if deflated, false if in-use
1543bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1544                                         ObjectMonitor** freeHeadp,
1545                                         ObjectMonitor** freeTailp) {
1546  bool deflated;
1547  // Normal case ... The monitor is associated with obj.
1548  guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1549  guarantee(mid == obj->mark()->monitor(), "invariant");
1550  guarantee(mid->header()->is_neutral(), "invariant");
1551
1552  if (mid->is_busy()) {
1553    if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1554    deflated = false;
1555  } else {
1556    // Deflate the monitor if it is no longer being used
1557    // It's idle - scavenge and return to the global free list
1558    // plain old deflation ...
1559    TEVENT(deflate_idle_monitors - scavenge1);
1560    if (log_is_enabled(Debug, monitorinflation)) {
1561      if (obj->is_instance()) {
1562        ResourceMark rm;
1563        log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1564                                    "mark " INTPTR_FORMAT " , type %s",
1565                                    p2i(obj), p2i(obj->mark()),
1566                                    obj->klass()->external_name());
1567      }
1568    }
1569
1570    // Restore the header back to obj
1571    obj->release_set_mark(mid->header());
1572    mid->clear();
1573
1574    assert(mid->object() == NULL, "invariant");
1575
1576    // Move the object to the working free list defined by freeHeadp, freeTailp
1577    if (*freeHeadp == NULL) *freeHeadp = mid;
1578    if (*freeTailp != NULL) {
1579      ObjectMonitor * prevtail = *freeTailp;
1580      assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1581      prevtail->FreeNext = mid;
1582    }
1583    *freeTailp = mid;
1584    deflated = true;
1585  }
1586  return deflated;
1587}
1588
1589// Walk a given monitor list, and deflate idle monitors
1590// The given list could be a per-thread list or a global list
1591// Caller acquires gListLock
1592int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1593                                             ObjectMonitor** freeHeadp,
1594                                             ObjectMonitor** freeTailp) {
1595  ObjectMonitor* mid;
1596  ObjectMonitor* next;
1597  ObjectMonitor* cur_mid_in_use = NULL;
1598  int deflated_count = 0;
1599
1600  for (mid = *listHeadp; mid != NULL;) {
1601    oop obj = (oop) mid->object();
1602    if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) {
1603      // if deflate_monitor succeeded,
1604      // extract from per-thread in-use list
1605      if (mid == *listHeadp) {
1606        *listHeadp = mid->FreeNext;
1607      } else if (cur_mid_in_use != NULL) {
1608        cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1609      }
1610      next = mid->FreeNext;
1611      mid->FreeNext = NULL;  // This mid is current tail in the freeHeadp list
1612      mid = next;
1613      deflated_count++;
1614    } else {
1615      cur_mid_in_use = mid;
1616      mid = mid->FreeNext;
1617    }
1618  }
1619  return deflated_count;
1620}
1621
1622void ObjectSynchronizer::deflate_idle_monitors() {
1623  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1624  int nInuse = 0;              // currently associated with objects
1625  int nInCirculation = 0;      // extant
1626  int nScavenged = 0;          // reclaimed
1627  bool deflated = false;
1628
1629  ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1630  ObjectMonitor * freeTailp = NULL;
1631
1632  TEVENT(deflate_idle_monitors);
1633  // Prevent omFlush from changing mids in Thread dtor's during deflation
1634  // And in case the vm thread is acquiring a lock during a safepoint
1635  // See e.g. 6320749
1636  Thread::muxAcquire(&gListLock, "scavenge - return");
1637
1638  if (MonitorInUseLists) {
1639    int inUse = 0;
1640    for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1641      nInCirculation+= cur->omInUseCount;
1642      int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp);
1643      cur->omInUseCount-= deflated_count;
1644      if (ObjectMonitor::Knob_VerifyInUse) {
1645        verifyInUse(cur);
1646      }
1647      nScavenged += deflated_count;
1648      nInuse += cur->omInUseCount;
1649    }
1650
1651    // For moribund threads, scan gOmInUseList
1652    if (gOmInUseList) {
1653      nInCirculation += gOmInUseCount;
1654      int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1655      gOmInUseCount-= deflated_count;
1656      nScavenged += deflated_count;
1657      nInuse += gOmInUseCount;
1658    }
1659
1660  } else {
1661    PaddedEnd<ObjectMonitor> * block =
1662      (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1663    for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1664      // Iterate over all extant monitors - Scavenge all idle monitors.
1665      assert(block->object() == CHAINMARKER, "must be a block header");
1666      nInCirculation += _BLOCKSIZE;
1667      for (int i = 1; i < _BLOCKSIZE; i++) {
1668        ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1669        oop obj = (oop)mid->object();
1670
1671        if (obj == NULL) {
1672          // The monitor is not associated with an object.
1673          // The monitor should either be a thread-specific private
1674          // free list or the global free list.
1675          // obj == NULL IMPLIES mid->is_busy() == 0
1676          guarantee(!mid->is_busy(), "invariant");
1677          continue;
1678        }
1679        deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
1680
1681        if (deflated) {
1682          mid->FreeNext = NULL;
1683          nScavenged++;
1684        } else {
1685          nInuse++;
1686        }
1687      }
1688    }
1689  }
1690
1691  gMonitorFreeCount += nScavenged;
1692
1693  // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree.
1694
1695  if (ObjectMonitor::Knob_Verbose) {
1696    tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d "
1697                  "ForceMonitorScavenge=%d : pop=%d free=%d",
1698                  nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1699                  gMonitorPopulation, gMonitorFreeCount);
1700    tty->flush();
1701  }
1702
1703  ForceMonitorScavenge = 0;    // Reset
1704
1705  // Move the scavenged monitors back to the global free list.
1706  if (freeHeadp != NULL) {
1707    guarantee(freeTailp != NULL && nScavenged > 0, "invariant");
1708    assert(freeTailp->FreeNext == NULL, "invariant");
1709    // constant-time list splice - prepend scavenged segment to gFreeList
1710    freeTailp->FreeNext = gFreeList;
1711    gFreeList = freeHeadp;
1712  }
1713  Thread::muxRelease(&gListLock);
1714
1715  OM_PERFDATA_OP(Deflations, inc(nScavenged));
1716  OM_PERFDATA_OP(MonExtant, set_value(nInCirculation));
1717
1718  // TODO: Add objectMonitor leak detection.
1719  // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1720  GVars.stwRandom = os::random();
1721  GVars.stwCycle++;
1722}
1723
1724// Monitor cleanup on JavaThread::exit
1725
1726// Iterate through monitor cache and attempt to release thread's monitors
1727// Gives up on a particular monitor if an exception occurs, but continues
1728// the overall iteration, swallowing the exception.
1729class ReleaseJavaMonitorsClosure: public MonitorClosure {
1730 private:
1731  TRAPS;
1732
1733 public:
1734  ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1735  void do_monitor(ObjectMonitor* mid) {
1736    if (mid->owner() == THREAD) {
1737      if (ObjectMonitor::Knob_VerifyMatch != 0) {
1738        Handle obj((oop) mid->object());
1739        tty->print("INFO: unexpected locked object:");
1740        javaVFrame::print_locked_object_class_name(tty, obj, "locked");
1741        fatal("exiting JavaThread=" INTPTR_FORMAT
1742              " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT,
1743              p2i(THREAD), p2i(mid));
1744      }
1745      (void)mid->complete_exit(CHECK);
1746    }
1747  }
1748};
1749
1750// Release all inflated monitors owned by THREAD.  Lightweight monitors are
1751// ignored.  This is meant to be called during JNI thread detach which assumes
1752// all remaining monitors are heavyweight.  All exceptions are swallowed.
1753// Scanning the extant monitor list can be time consuming.
1754// A simple optimization is to add a per-thread flag that indicates a thread
1755// called jni_monitorenter() during its lifetime.
1756//
1757// Instead of No_Savepoint_Verifier it might be cheaper to
1758// use an idiom of the form:
1759//   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1760//   <code that must not run at safepoint>
1761//   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1762// Since the tests are extremely cheap we could leave them enabled
1763// for normal product builds.
1764
1765void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1766  assert(THREAD == JavaThread::current(), "must be current Java thread");
1767  NoSafepointVerifier nsv;
1768  ReleaseJavaMonitorsClosure rjmc(THREAD);
1769  Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
1770  ObjectSynchronizer::monitors_iterate(&rjmc);
1771  Thread::muxRelease(&gListLock);
1772  THREAD->clear_pending_exception();
1773}
1774
1775const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1776  switch (cause) {
1777    case inflate_cause_vm_internal:    return "VM Internal";
1778    case inflate_cause_monitor_enter:  return "Monitor Enter";
1779    case inflate_cause_wait:           return "Monitor Wait";
1780    case inflate_cause_notify:         return "Monitor Notify";
1781    case inflate_cause_hash_code:      return "Monitor Hash Code";
1782    case inflate_cause_jni_enter:      return "JNI Monitor Enter";
1783    case inflate_cause_jni_exit:       return "JNI Monitor Exit";
1784    default:
1785      ShouldNotReachHere();
1786  }
1787  return "Unknown";
1788}
1789
1790static void post_monitor_inflate_event(EventJavaMonitorInflate& event,
1791                                       const oop obj,
1792                                       const ObjectSynchronizer::InflateCause cause) {
1793#if INCLUDE_TRACE
1794  assert(event.should_commit(), "check outside");
1795  event.set_klass(obj->klass());
1796  event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj);
1797  event.set_cause((u1)cause);
1798  event.commit();
1799#endif
1800}
1801
1802//------------------------------------------------------------------------------
1803// Debugging code
1804
1805void ObjectSynchronizer::sanity_checks(const bool verbose,
1806                                       const uint cache_line_size,
1807                                       int *error_cnt_ptr,
1808                                       int *warning_cnt_ptr) {
1809  u_char *addr_begin      = (u_char*)&GVars;
1810  u_char *addr_stwRandom  = (u_char*)&GVars.stwRandom;
1811  u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1812
1813  if (verbose) {
1814    tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1815                  sizeof(SharedGlobals));
1816  }
1817
1818  uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1819  if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1820
1821  uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
1822  if (verbose) {
1823    tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
1824  }
1825
1826  if (cache_line_size != 0) {
1827    // We were able to determine the L1 data cache line size so
1828    // do some cache line specific sanity checks
1829
1830    if (offset_stwRandom < cache_line_size) {
1831      tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
1832                    "to the struct beginning than a cache line which permits "
1833                    "false sharing.");
1834      (*warning_cnt_ptr)++;
1835    }
1836
1837    if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
1838      tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
1839                    "SharedGlobals.hcSequence fields are closer than a cache "
1840                    "line which permits false sharing.");
1841      (*warning_cnt_ptr)++;
1842    }
1843
1844    if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1845      tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1846                    "to the struct end than a cache line which permits false "
1847                    "sharing.");
1848      (*warning_cnt_ptr)++;
1849    }
1850  }
1851}
1852
1853#ifndef PRODUCT
1854
1855// Verify all monitors in the monitor cache, the verification is weak.
1856void ObjectSynchronizer::verify() {
1857  PaddedEnd<ObjectMonitor> * block =
1858    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1859  while (block != NULL) {
1860    assert(block->object() == CHAINMARKER, "must be a block header");
1861    for (int i = 1; i < _BLOCKSIZE; i++) {
1862      ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1863      oop object = (oop)mid->object();
1864      if (object != NULL) {
1865        mid->verify();
1866      }
1867    }
1868    block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1869  }
1870}
1871
1872// Check if monitor belongs to the monitor cache
1873// The list is grow-only so it's *relatively* safe to traverse
1874// the list of extant blocks without taking a lock.
1875
1876int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1877  PaddedEnd<ObjectMonitor> * block =
1878    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1879  while (block != NULL) {
1880    assert(block->object() == CHAINMARKER, "must be a block header");
1881    if (monitor > (ObjectMonitor *)&block[0] &&
1882        monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1883      address mon = (address)monitor;
1884      address blk = (address)block;
1885      size_t diff = mon - blk;
1886      assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1887      return 1;
1888    }
1889    block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1890  }
1891  return 0;
1892}
1893
1894#endif
1895