hotspot/src/share/vm/runtime/objectMonitor.cpp
changeset 6975 dc9b63952682
child 7397 5b173b4ca846
equal deleted inserted replaced
6971:11c11e616b91 6975:dc9b63952682
       
     1 /*
       
     2  * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 # include "incls/_precompiled.incl"
       
    26 # include "incls/_objectMonitor.cpp.incl"
       
    27 
       
    28 #if defined(__GNUC__) && !defined(IA64)
       
    29   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
       
    30   #define ATTR __attribute__((noinline))
       
    31 #else
       
    32   #define ATTR
       
    33 #endif
       
    34 
       
    35 
       
    36 #ifdef DTRACE_ENABLED
       
    37 
       
    38 // Only bother with this argument setup if dtrace is available
       
    39 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
       
    40 
       
    41 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
       
    42   jlong, uintptr_t, char*, int);
       
    43 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
       
    44   jlong, uintptr_t, char*, int);
       
    45 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
       
    46   jlong, uintptr_t, char*, int);
       
    47 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
       
    48   jlong, uintptr_t, char*, int);
       
    49 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
       
    50   jlong, uintptr_t, char*, int);
       
    51 
       
    52 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
       
    53   char* bytes = NULL;                                                      \
       
    54   int len = 0;                                                             \
       
    55   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
       
    56   symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name();  \
       
    57   if (klassname != NULL) {                                                 \
       
    58     bytes = (char*)klassname->bytes();                                     \
       
    59     len = klassname->utf8_length();                                        \
       
    60   }
       
    61 
       
    62 #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis)       \
       
    63   {                                                                        \
       
    64     if (DTraceMonitorProbes) {                                            \
       
    65       DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
       
    66       HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
       
    67                        (monitor), bytes, len, (millis));                   \
       
    68     }                                                                      \
       
    69   }
       
    70 
       
    71 #define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread)             \
       
    72   {                                                                        \
       
    73     if (DTraceMonitorProbes) {                                            \
       
    74       DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
       
    75       HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
       
    76                        (uintptr_t)(monitor), bytes, len);                  \
       
    77     }                                                                      \
       
    78   }
       
    79 
       
    80 #else //  ndef DTRACE_ENABLED
       
    81 
       
    82 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon)    {;}
       
    83 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon)          {;}
       
    84 
       
    85 #endif // ndef DTRACE_ENABLED
       
    86 
       
    87 // Tunables ...
       
    88 // The knob* variables are effectively final.  Once set they should
       
    89 // never be modified hence.  Consider using __read_mostly with GCC.
       
    90 
       
    91 int ObjectMonitor::Knob_Verbose    = 0 ;
       
    92 int ObjectMonitor::Knob_SpinLimit  = 5000 ;    // derived by an external tool -
       
    93 static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
       
    94 static int Knob_HandOff            = 0 ;
       
    95 static int Knob_ReportSettings     = 0 ;
       
    96 
       
    97 static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
       
    98 static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
       
    99 static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
       
   100 static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
       
   101 static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
       
   102 static int Knob_SpinEarly          = 1 ;
       
   103 static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
       
   104 static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
       
   105 static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
       
   106 static int Knob_Bonus              = 100 ;     // spin success bonus
       
   107 static int Knob_BonusB             = 100 ;     // spin success bonus
       
   108 static int Knob_Penalty            = 200 ;     // spin failure penalty
       
   109 static int Knob_Poverty            = 1000 ;
       
   110 static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
       
   111 static int Knob_FixedSpin          = 0 ;
       
   112 static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
       
   113 static int Knob_UsePause           = 1 ;
       
   114 static int Knob_ExitPolicy         = 0 ;
       
   115 static int Knob_PreSpin            = 10 ;      // 20-100 likely better
       
   116 static int Knob_ResetEvent         = 0 ;
       
   117 static int BackOffMask             = 0 ;
       
   118 
       
   119 static int Knob_FastHSSEC          = 0 ;
       
   120 static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
       
   121 static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
       
   122 static volatile int InitDone       = 0 ;
       
   123 
       
   124 #define TrySpin TrySpin_VaryDuration
       
   125 
       
   126 // -----------------------------------------------------------------------------
       
   127 // Theory of operations -- Monitors lists, thread residency, etc:
       
   128 //
       
   129 // * A thread acquires ownership of a monitor by successfully
       
   130 //   CAS()ing the _owner field from null to non-null.
       
   131 //
       
   132 // * Invariant: A thread appears on at most one monitor list --
       
   133 //   cxq, EntryList or WaitSet -- at any one time.
       
   134 //
       
   135 // * Contending threads "push" themselves onto the cxq with CAS
       
   136 //   and then spin/park.
       
   137 //
       
   138 // * After a contending thread eventually acquires the lock it must
       
   139 //   dequeue itself from either the EntryList or the cxq.
       
   140 //
       
   141 // * The exiting thread identifies and unparks an "heir presumptive"
       
   142 //   tentative successor thread on the EntryList.  Critically, the
       
   143 //   exiting thread doesn't unlink the successor thread from the EntryList.
       
   144 //   After having been unparked, the wakee will recontend for ownership of
       
   145 //   the monitor.   The successor (wakee) will either acquire the lock or
       
   146 //   re-park itself.
       
   147 //
       
   148 //   Succession is provided for by a policy of competitive handoff.
       
   149 //   The exiting thread does _not_ grant or pass ownership to the
       
   150 //   successor thread.  (This is also referred to as "handoff" succession").
       
   151 //   Instead the exiting thread releases ownership and possibly wakes
       
   152 //   a successor, so the successor can (re)compete for ownership of the lock.
       
   153 //   If the EntryList is empty but the cxq is populated the exiting
       
   154 //   thread will drain the cxq into the EntryList.  It does so by
       
   155 //   by detaching the cxq (installing null with CAS) and folding
       
   156 //   the threads from the cxq into the EntryList.  The EntryList is
       
   157 //   doubly linked, while the cxq is singly linked because of the
       
   158 //   CAS-based "push" used to enqueue recently arrived threads (RATs).
       
   159 //
       
   160 // * Concurrency invariants:
       
   161 //
       
   162 //   -- only the monitor owner may access or mutate the EntryList.
       
   163 //      The mutex property of the monitor itself protects the EntryList
       
   164 //      from concurrent interference.
       
   165 //   -- Only the monitor owner may detach the cxq.
       
   166 //
       
   167 // * The monitor entry list operations avoid locks, but strictly speaking
       
   168 //   they're not lock-free.  Enter is lock-free, exit is not.
       
   169 //   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
       
   170 //
       
   171 // * The cxq can have multiple concurrent "pushers" but only one concurrent
       
   172 //   detaching thread.  This mechanism is immune from the ABA corruption.
       
   173 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
       
   174 //
       
   175 // * Taken together, the cxq and the EntryList constitute or form a
       
   176 //   single logical queue of threads stalled trying to acquire the lock.
       
   177 //   We use two distinct lists to improve the odds of a constant-time
       
   178 //   dequeue operation after acquisition (in the ::enter() epilog) and
       
   179 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
       
   180 //   A key desideratum is to minimize queue & monitor metadata manipulation
       
   181 //   that occurs while holding the monitor lock -- that is, we want to
       
   182 //   minimize monitor lock holds times.  Note that even a small amount of
       
   183 //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
       
   184 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
       
   185 //   locks and monitor metadata.
       
   186 //
       
   187 //   Cxq points to the the set of Recently Arrived Threads attempting entry.
       
   188 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
       
   189 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
       
   190 //   the unlocking thread notices that EntryList is null but _cxq is != null.
       
   191 //
       
   192 //   The EntryList is ordered by the prevailing queue discipline and
       
   193 //   can be organized in any convenient fashion, such as a doubly-linked list or
       
   194 //   a circular doubly-linked list.  Critically, we want insert and delete operations
       
   195 //   to operate in constant-time.  If we need a priority queue then something akin
       
   196 //   to Solaris' sleepq would work nicely.  Viz.,
       
   197 //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
       
   198 //   Queue discipline is enforced at ::exit() time, when the unlocking thread
       
   199 //   drains the cxq into the EntryList, and orders or reorders the threads on the
       
   200 //   EntryList accordingly.
       
   201 //
       
   202 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
       
   203 //   somewhat similar to an elevator-scan.
       
   204 //
       
   205 // * The monitor synchronization subsystem avoids the use of native
       
   206 //   synchronization primitives except for the narrow platform-specific
       
   207 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
       
   208 //   the semantics of park-unpark.  Put another way, this monitor implementation
       
   209 //   depends only on atomic operations and park-unpark.  The monitor subsystem
       
   210 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
       
   211 //   underlying OS manages the READY<->RUN transitions.
       
   212 //
       
   213 // * Waiting threads reside on the WaitSet list -- wait() puts
       
   214 //   the caller onto the WaitSet.
       
   215 //
       
   216 // * notify() or notifyAll() simply transfers threads from the WaitSet to
       
   217 //   either the EntryList or cxq.  Subsequent exit() operations will
       
   218 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
       
   219 //   it's likely the notifyee would simply impale itself on the lock held
       
   220 //   by the notifier.
       
   221 //
       
   222 // * An interesting alternative is to encode cxq as (List,LockByte) where
       
   223 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
       
   224 //   variable, like _recursions, in the scheme.  The threads or Events that form
       
   225 //   the list would have to be aligned in 256-byte addresses.  A thread would
       
   226 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
       
   227 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
       
   228 //   Note that is is *not* word-tearing, but it does presume that full-word
       
   229 //   CAS operations are coherent with intermix with STB operations.  That's true
       
   230 //   on most common processors.
       
   231 //
       
   232 // * See also http://blogs.sun.com/dave
       
   233 
       
   234 
       
   235 // -----------------------------------------------------------------------------
       
   236 // Enter support
       
   237 
       
   238 bool ObjectMonitor::try_enter(Thread* THREAD) {
       
   239   if (THREAD != _owner) {
       
   240     if (THREAD->is_lock_owned ((address)_owner)) {
       
   241        assert(_recursions == 0, "internal state error");
       
   242        _owner = THREAD ;
       
   243        _recursions = 1 ;
       
   244        OwnerIsThread = 1 ;
       
   245        return true;
       
   246     }
       
   247     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       
   248       return false;
       
   249     }
       
   250     return true;
       
   251   } else {
       
   252     _recursions++;
       
   253     return true;
       
   254   }
       
   255 }
       
   256 
       
   257 void ATTR ObjectMonitor::enter(TRAPS) {
       
   258   // The following code is ordered to check the most common cases first
       
   259   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
       
   260   Thread * const Self = THREAD ;
       
   261   void * cur ;
       
   262 
       
   263   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
       
   264   if (cur == NULL) {
       
   265      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
       
   266      assert (_recursions == 0   , "invariant") ;
       
   267      assert (_owner      == Self, "invariant") ;
       
   268      // CONSIDER: set or assert OwnerIsThread == 1
       
   269      return ;
       
   270   }
       
   271 
       
   272   if (cur == Self) {
       
   273      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
       
   274      _recursions ++ ;
       
   275      return ;
       
   276   }
       
   277 
       
   278   if (Self->is_lock_owned ((address)cur)) {
       
   279     assert (_recursions == 0, "internal state error");
       
   280     _recursions = 1 ;
       
   281     // Commute owner from a thread-specific on-stack BasicLockObject address to
       
   282     // a full-fledged "Thread *".
       
   283     _owner = Self ;
       
   284     OwnerIsThread = 1 ;
       
   285     return ;
       
   286   }
       
   287 
       
   288   // We've encountered genuine contention.
       
   289   assert (Self->_Stalled == 0, "invariant") ;
       
   290   Self->_Stalled = intptr_t(this) ;
       
   291 
       
   292   // Try one round of spinning *before* enqueueing Self
       
   293   // and before going through the awkward and expensive state
       
   294   // transitions.  The following spin is strictly optional ...
       
   295   // Note that if we acquire the monitor from an initial spin
       
   296   // we forgo posting JVMTI events and firing DTRACE probes.
       
   297   if (Knob_SpinEarly && TrySpin (Self) > 0) {
       
   298      assert (_owner == Self      , "invariant") ;
       
   299      assert (_recursions == 0    , "invariant") ;
       
   300      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
   301      Self->_Stalled = 0 ;
       
   302      return ;
       
   303   }
       
   304 
       
   305   assert (_owner != Self          , "invariant") ;
       
   306   assert (_succ  != Self          , "invariant") ;
       
   307   assert (Self->is_Java_thread()  , "invariant") ;
       
   308   JavaThread * jt = (JavaThread *) Self ;
       
   309   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
       
   310   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
       
   311   assert (this->object() != NULL  , "invariant") ;
       
   312   assert (_count >= 0, "invariant") ;
       
   313 
       
   314   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
       
   315   // Ensure the object-monitor relationship remains stable while there's contention.
       
   316   Atomic::inc_ptr(&_count);
       
   317 
       
   318   { // Change java thread status to indicate blocked on monitor enter.
       
   319     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
       
   320 
       
   321     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
       
   322     if (JvmtiExport::should_post_monitor_contended_enter()) {
       
   323       JvmtiExport::post_monitor_contended_enter(jt, this);
       
   324     }
       
   325 
       
   326     OSThreadContendState osts(Self->osthread());
       
   327     ThreadBlockInVM tbivm(jt);
       
   328 
       
   329     Self->set_current_pending_monitor(this);
       
   330 
       
   331     // TODO-FIXME: change the following for(;;) loop to straight-line code.
       
   332     for (;;) {
       
   333       jt->set_suspend_equivalent();
       
   334       // cleared by handle_special_suspend_equivalent_condition()
       
   335       // or java_suspend_self()
       
   336 
       
   337       EnterI (THREAD) ;
       
   338 
       
   339       if (!ExitSuspendEquivalent(jt)) break ;
       
   340 
       
   341       //
       
   342       // We have acquired the contended monitor, but while we were
       
   343       // waiting another thread suspended us. We don't want to enter
       
   344       // the monitor while suspended because that would surprise the
       
   345       // thread that suspended us.
       
   346       //
       
   347           _recursions = 0 ;
       
   348       _succ = NULL ;
       
   349       exit (Self) ;
       
   350 
       
   351       jt->java_suspend_self();
       
   352     }
       
   353     Self->set_current_pending_monitor(NULL);
       
   354   }
       
   355 
       
   356   Atomic::dec_ptr(&_count);
       
   357   assert (_count >= 0, "invariant") ;
       
   358   Self->_Stalled = 0 ;
       
   359 
       
   360   // Must either set _recursions = 0 or ASSERT _recursions == 0.
       
   361   assert (_recursions == 0     , "invariant") ;
       
   362   assert (_owner == Self       , "invariant") ;
       
   363   assert (_succ  != Self       , "invariant") ;
       
   364   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
   365 
       
   366   // The thread -- now the owner -- is back in vm mode.
       
   367   // Report the glorious news via TI,DTrace and jvmstat.
       
   368   // The probe effect is non-trivial.  All the reportage occurs
       
   369   // while we hold the monitor, increasing the length of the critical
       
   370   // section.  Amdahl's parallel speedup law comes vividly into play.
       
   371   //
       
   372   // Another option might be to aggregate the events (thread local or
       
   373   // per-monitor aggregation) and defer reporting until a more opportune
       
   374   // time -- such as next time some thread encounters contention but has
       
   375   // yet to acquire the lock.  While spinning that thread could
       
   376   // spinning we could increment JVMStat counters, etc.
       
   377 
       
   378   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
       
   379   if (JvmtiExport::should_post_monitor_contended_entered()) {
       
   380     JvmtiExport::post_monitor_contended_entered(jt, this);
       
   381   }
       
   382   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
       
   383      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
       
   384   }
       
   385 }
       
   386 
       
   387 
       
   388 // Caveat: TryLock() is not necessarily serializing if it returns failure.
       
   389 // Callers must compensate as needed.
       
   390 
       
   391 int ObjectMonitor::TryLock (Thread * Self) {
       
   392    for (;;) {
       
   393       void * own = _owner ;
       
   394       if (own != NULL) return 0 ;
       
   395       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
       
   396          // Either guarantee _recursions == 0 or set _recursions = 0.
       
   397          assert (_recursions == 0, "invariant") ;
       
   398          assert (_owner == Self, "invariant") ;
       
   399          // CONSIDER: set or assert that OwnerIsThread == 1
       
   400          return 1 ;
       
   401       }
       
   402       // The lock had been free momentarily, but we lost the race to the lock.
       
   403       // Interference -- the CAS failed.
       
   404       // We can either return -1 or retry.
       
   405       // Retry doesn't make as much sense because the lock was just acquired.
       
   406       if (true) return -1 ;
       
   407    }
       
   408 }
       
   409 
       
   410 void ATTR ObjectMonitor::EnterI (TRAPS) {
       
   411     Thread * Self = THREAD ;
       
   412     assert (Self->is_Java_thread(), "invariant") ;
       
   413     assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
       
   414 
       
   415     // Try the lock - TATAS
       
   416     if (TryLock (Self) > 0) {
       
   417         assert (_succ != Self              , "invariant") ;
       
   418         assert (_owner == Self             , "invariant") ;
       
   419         assert (_Responsible != Self       , "invariant") ;
       
   420         return ;
       
   421     }
       
   422 
       
   423     DeferredInitialize () ;
       
   424 
       
   425     // We try one round of spinning *before* enqueueing Self.
       
   426     //
       
   427     // If the _owner is ready but OFFPROC we could use a YieldTo()
       
   428     // operation to donate the remainder of this thread's quantum
       
   429     // to the owner.  This has subtle but beneficial affinity
       
   430     // effects.
       
   431 
       
   432     if (TrySpin (Self) > 0) {
       
   433         assert (_owner == Self        , "invariant") ;
       
   434         assert (_succ != Self         , "invariant") ;
       
   435         assert (_Responsible != Self  , "invariant") ;
       
   436         return ;
       
   437     }
       
   438 
       
   439     // The Spin failed -- Enqueue and park the thread ...
       
   440     assert (_succ  != Self            , "invariant") ;
       
   441     assert (_owner != Self            , "invariant") ;
       
   442     assert (_Responsible != Self      , "invariant") ;
       
   443 
       
   444     // Enqueue "Self" on ObjectMonitor's _cxq.
       
   445     //
       
   446     // Node acts as a proxy for Self.
       
   447     // As an aside, if were to ever rewrite the synchronization code mostly
       
   448     // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
       
   449     // Java objects.  This would avoid awkward lifecycle and liveness issues,
       
   450     // as well as eliminate a subset of ABA issues.
       
   451     // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
       
   452     //
       
   453 
       
   454     ObjectWaiter node(Self) ;
       
   455     Self->_ParkEvent->reset() ;
       
   456     node._prev   = (ObjectWaiter *) 0xBAD ;
       
   457     node.TState  = ObjectWaiter::TS_CXQ ;
       
   458 
       
   459     // Push "Self" onto the front of the _cxq.
       
   460     // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
       
   461     // Note that spinning tends to reduce the rate at which threads
       
   462     // enqueue and dequeue on EntryList|cxq.
       
   463     ObjectWaiter * nxt ;
       
   464     for (;;) {
       
   465         node._next = nxt = _cxq ;
       
   466         if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
       
   467 
       
   468         // Interference - the CAS failed because _cxq changed.  Just retry.
       
   469         // As an optional optimization we retry the lock.
       
   470         if (TryLock (Self) > 0) {
       
   471             assert (_succ != Self         , "invariant") ;
       
   472             assert (_owner == Self        , "invariant") ;
       
   473             assert (_Responsible != Self  , "invariant") ;
       
   474             return ;
       
   475         }
       
   476     }
       
   477 
       
   478     // Check for cxq|EntryList edge transition to non-null.  This indicates
       
   479     // the onset of contention.  While contention persists exiting threads
       
   480     // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
       
   481     // operations revert to the faster 1-0 mode.  This enter operation may interleave
       
   482     // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
       
   483     // arrange for one of the contending thread to use a timed park() operations
       
   484     // to detect and recover from the race.  (Stranding is form of progress failure
       
   485     // where the monitor is unlocked but all the contending threads remain parked).
       
   486     // That is, at least one of the contended threads will periodically poll _owner.
       
   487     // One of the contending threads will become the designated "Responsible" thread.
       
   488     // The Responsible thread uses a timed park instead of a normal indefinite park
       
   489     // operation -- it periodically wakes and checks for and recovers from potential
       
   490     // strandings admitted by 1-0 exit operations.   We need at most one Responsible
       
   491     // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
       
   492     // be responsible for a monitor.
       
   493     //
       
   494     // Currently, one of the contended threads takes on the added role of "Responsible".
       
   495     // A viable alternative would be to use a dedicated "stranding checker" thread
       
   496     // that periodically iterated over all the threads (or active monitors) and unparked
       
   497     // successors where there was risk of stranding.  This would help eliminate the
       
   498     // timer scalability issues we see on some platforms as we'd only have one thread
       
   499     // -- the checker -- parked on a timer.
       
   500 
       
   501     if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
       
   502         // Try to assume the role of responsible thread for the monitor.
       
   503         // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
       
   504         Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
       
   505     }
       
   506 
       
   507     // The lock have been released while this thread was occupied queueing
       
   508     // itself onto _cxq.  To close the race and avoid "stranding" and
       
   509     // progress-liveness failure we must resample-retry _owner before parking.
       
   510     // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
       
   511     // In this case the ST-MEMBAR is accomplished with CAS().
       
   512     //
       
   513     // TODO: Defer all thread state transitions until park-time.
       
   514     // Since state transitions are heavy and inefficient we'd like
       
   515     // to defer the state transitions until absolutely necessary,
       
   516     // and in doing so avoid some transitions ...
       
   517 
       
   518     TEVENT (Inflated enter - Contention) ;
       
   519     int nWakeups = 0 ;
       
   520     int RecheckInterval = 1 ;
       
   521 
       
   522     for (;;) {
       
   523 
       
   524         if (TryLock (Self) > 0) break ;
       
   525         assert (_owner != Self, "invariant") ;
       
   526 
       
   527         if ((SyncFlags & 2) && _Responsible == NULL) {
       
   528            Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
       
   529         }
       
   530 
       
   531         // park self
       
   532         if (_Responsible == Self || (SyncFlags & 1)) {
       
   533             TEVENT (Inflated enter - park TIMED) ;
       
   534             Self->_ParkEvent->park ((jlong) RecheckInterval) ;
       
   535             // Increase the RecheckInterval, but clamp the value.
       
   536             RecheckInterval *= 8 ;
       
   537             if (RecheckInterval > 1000) RecheckInterval = 1000 ;
       
   538         } else {
       
   539             TEVENT (Inflated enter - park UNTIMED) ;
       
   540             Self->_ParkEvent->park() ;
       
   541         }
       
   542 
       
   543         if (TryLock(Self) > 0) break ;
       
   544 
       
   545         // The lock is still contested.
       
   546         // Keep a tally of the # of futile wakeups.
       
   547         // Note that the counter is not protected by a lock or updated by atomics.
       
   548         // That is by design - we trade "lossy" counters which are exposed to
       
   549         // races during updates for a lower probe effect.
       
   550         TEVENT (Inflated enter - Futile wakeup) ;
       
   551         if (ObjectMonitor::_sync_FutileWakeups != NULL) {
       
   552            ObjectMonitor::_sync_FutileWakeups->inc() ;
       
   553         }
       
   554         ++ nWakeups ;
       
   555 
       
   556         // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
       
   557         // We can defer clearing _succ until after the spin completes
       
   558         // TrySpin() must tolerate being called with _succ == Self.
       
   559         // Try yet another round of adaptive spinning.
       
   560         if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
       
   561 
       
   562         // We can find that we were unpark()ed and redesignated _succ while
       
   563         // we were spinning.  That's harmless.  If we iterate and call park(),
       
   564         // park() will consume the event and return immediately and we'll
       
   565         // just spin again.  This pattern can repeat, leaving _succ to simply
       
   566         // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
       
   567         // Alternately, we can sample fired() here, and if set, forgo spinning
       
   568         // in the next iteration.
       
   569 
       
   570         if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
       
   571            Self->_ParkEvent->reset() ;
       
   572            OrderAccess::fence() ;
       
   573         }
       
   574         if (_succ == Self) _succ = NULL ;
       
   575 
       
   576         // Invariant: after clearing _succ a thread *must* retry _owner before parking.
       
   577         OrderAccess::fence() ;
       
   578     }
       
   579 
       
   580     // Egress :
       
   581     // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
       
   582     // Normally we'll find Self on the EntryList .
       
   583     // From the perspective of the lock owner (this thread), the
       
   584     // EntryList is stable and cxq is prepend-only.
       
   585     // The head of cxq is volatile but the interior is stable.
       
   586     // In addition, Self.TState is stable.
       
   587 
       
   588     assert (_owner == Self      , "invariant") ;
       
   589     assert (object() != NULL    , "invariant") ;
       
   590     // I'd like to write:
       
   591     //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
   592     // but as we're at a safepoint that's not safe.
       
   593 
       
   594     UnlinkAfterAcquire (Self, &node) ;
       
   595     if (_succ == Self) _succ = NULL ;
       
   596 
       
   597     assert (_succ != Self, "invariant") ;
       
   598     if (_Responsible == Self) {
       
   599         _Responsible = NULL ;
       
   600         // Dekker pivot-point.
       
   601         // Consider OrderAccess::storeload() here
       
   602 
       
   603         // We may leave threads on cxq|EntryList without a designated
       
   604         // "Responsible" thread.  This is benign.  When this thread subsequently
       
   605         // exits the monitor it can "see" such preexisting "old" threads --
       
   606         // threads that arrived on the cxq|EntryList before the fence, above --
       
   607         // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
       
   608         // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
       
   609         // non-null and elect a new "Responsible" timer thread.
       
   610         //
       
   611         // This thread executes:
       
   612         //    ST Responsible=null; MEMBAR    (in enter epilog - here)
       
   613         //    LD cxq|EntryList               (in subsequent exit)
       
   614         //
       
   615         // Entering threads in the slow/contended path execute:
       
   616         //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
       
   617         //    The (ST cxq; MEMBAR) is accomplished with CAS().
       
   618         //
       
   619         // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
       
   620         // exit operation from floating above the ST Responsible=null.
       
   621         //
       
   622         // In *practice* however, EnterI() is always followed by some atomic
       
   623         // operation such as the decrement of _count in ::enter().  Those atomics
       
   624         // obviate the need for the explicit MEMBAR, above.
       
   625     }
       
   626 
       
   627     // We've acquired ownership with CAS().
       
   628     // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
       
   629     // But since the CAS() this thread may have also stored into _succ,
       
   630     // EntryList, cxq or Responsible.  These meta-data updates must be
       
   631     // visible __before this thread subsequently drops the lock.
       
   632     // Consider what could occur if we didn't enforce this constraint --
       
   633     // STs to monitor meta-data and user-data could reorder with (become
       
   634     // visible after) the ST in exit that drops ownership of the lock.
       
   635     // Some other thread could then acquire the lock, but observe inconsistent
       
   636     // or old monitor meta-data and heap data.  That violates the JMM.
       
   637     // To that end, the 1-0 exit() operation must have at least STST|LDST
       
   638     // "release" barrier semantics.  Specifically, there must be at least a
       
   639     // STST|LDST barrier in exit() before the ST of null into _owner that drops
       
   640     // the lock.   The barrier ensures that changes to monitor meta-data and data
       
   641     // protected by the lock will be visible before we release the lock, and
       
   642     // therefore before some other thread (CPU) has a chance to acquire the lock.
       
   643     // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
       
   644     //
       
   645     // Critically, any prior STs to _succ or EntryList must be visible before
       
   646     // the ST of null into _owner in the *subsequent* (following) corresponding
       
   647     // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
       
   648     // execute a serializing instruction.
       
   649 
       
   650     if (SyncFlags & 8) {
       
   651        OrderAccess::fence() ;
       
   652     }
       
   653     return ;
       
   654 }
       
   655 
       
   656 // ReenterI() is a specialized inline form of the latter half of the
       
   657 // contended slow-path from EnterI().  We use ReenterI() only for
       
   658 // monitor reentry in wait().
       
   659 //
       
   660 // In the future we should reconcile EnterI() and ReenterI(), adding
       
   661 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
       
   662 // loop accordingly.
       
   663 
       
   664 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
       
   665     assert (Self != NULL                , "invariant") ;
       
   666     assert (SelfNode != NULL            , "invariant") ;
       
   667     assert (SelfNode->_thread == Self   , "invariant") ;
       
   668     assert (_waiters > 0                , "invariant") ;
       
   669     assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
       
   670     assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
       
   671     JavaThread * jt = (JavaThread *) Self ;
       
   672 
       
   673     int nWakeups = 0 ;
       
   674     for (;;) {
       
   675         ObjectWaiter::TStates v = SelfNode->TState ;
       
   676         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
       
   677         assert    (_owner != Self, "invariant") ;
       
   678 
       
   679         if (TryLock (Self) > 0) break ;
       
   680         if (TrySpin (Self) > 0) break ;
       
   681 
       
   682         TEVENT (Wait Reentry - parking) ;
       
   683 
       
   684         // State transition wrappers around park() ...
       
   685         // ReenterI() wisely defers state transitions until
       
   686         // it's clear we must park the thread.
       
   687         {
       
   688            OSThreadContendState osts(Self->osthread());
       
   689            ThreadBlockInVM tbivm(jt);
       
   690 
       
   691            // cleared by handle_special_suspend_equivalent_condition()
       
   692            // or java_suspend_self()
       
   693            jt->set_suspend_equivalent();
       
   694            if (SyncFlags & 1) {
       
   695               Self->_ParkEvent->park ((jlong)1000) ;
       
   696            } else {
       
   697               Self->_ParkEvent->park () ;
       
   698            }
       
   699 
       
   700            // were we externally suspended while we were waiting?
       
   701            for (;;) {
       
   702               if (!ExitSuspendEquivalent (jt)) break ;
       
   703               if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
       
   704               jt->java_suspend_self();
       
   705               jt->set_suspend_equivalent();
       
   706            }
       
   707         }
       
   708 
       
   709         // Try again, but just so we distinguish between futile wakeups and
       
   710         // successful wakeups.  The following test isn't algorithmically
       
   711         // necessary, but it helps us maintain sensible statistics.
       
   712         if (TryLock(Self) > 0) break ;
       
   713 
       
   714         // The lock is still contested.
       
   715         // Keep a tally of the # of futile wakeups.
       
   716         // Note that the counter is not protected by a lock or updated by atomics.
       
   717         // That is by design - we trade "lossy" counters which are exposed to
       
   718         // races during updates for a lower probe effect.
       
   719         TEVENT (Wait Reentry - futile wakeup) ;
       
   720         ++ nWakeups ;
       
   721 
       
   722         // Assuming this is not a spurious wakeup we'll normally
       
   723         // find that _succ == Self.
       
   724         if (_succ == Self) _succ = NULL ;
       
   725 
       
   726         // Invariant: after clearing _succ a contending thread
       
   727         // *must* retry  _owner before parking.
       
   728         OrderAccess::fence() ;
       
   729 
       
   730         if (ObjectMonitor::_sync_FutileWakeups != NULL) {
       
   731           ObjectMonitor::_sync_FutileWakeups->inc() ;
       
   732         }
       
   733     }
       
   734 
       
   735     // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
       
   736     // Normally we'll find Self on the EntryList.
       
   737     // Unlinking from the EntryList is constant-time and atomic-free.
       
   738     // From the perspective of the lock owner (this thread), the
       
   739     // EntryList is stable and cxq is prepend-only.
       
   740     // The head of cxq is volatile but the interior is stable.
       
   741     // In addition, Self.TState is stable.
       
   742 
       
   743     assert (_owner == Self, "invariant") ;
       
   744     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
   745     UnlinkAfterAcquire (Self, SelfNode) ;
       
   746     if (_succ == Self) _succ = NULL ;
       
   747     assert (_succ != Self, "invariant") ;
       
   748     SelfNode->TState = ObjectWaiter::TS_RUN ;
       
   749     OrderAccess::fence() ;      // see comments at the end of EnterI()
       
   750 }
       
   751 
       
   752 // after the thread acquires the lock in ::enter().  Equally, we could defer
       
   753 // unlinking the thread until ::exit()-time.
       
   754 
       
   755 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
       
   756 {
       
   757     assert (_owner == Self, "invariant") ;
       
   758     assert (SelfNode->_thread == Self, "invariant") ;
       
   759 
       
   760     if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
       
   761         // Normal case: remove Self from the DLL EntryList .
       
   762         // This is a constant-time operation.
       
   763         ObjectWaiter * nxt = SelfNode->_next ;
       
   764         ObjectWaiter * prv = SelfNode->_prev ;
       
   765         if (nxt != NULL) nxt->_prev = prv ;
       
   766         if (prv != NULL) prv->_next = nxt ;
       
   767         if (SelfNode == _EntryList ) _EntryList = nxt ;
       
   768         assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
   769         assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
   770         TEVENT (Unlink from EntryList) ;
       
   771     } else {
       
   772         guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
       
   773         // Inopportune interleaving -- Self is still on the cxq.
       
   774         // This usually means the enqueue of self raced an exiting thread.
       
   775         // Normally we'll find Self near the front of the cxq, so
       
   776         // dequeueing is typically fast.  If needbe we can accelerate
       
   777         // this with some MCS/CHL-like bidirectional list hints and advisory
       
   778         // back-links so dequeueing from the interior will normally operate
       
   779         // in constant-time.
       
   780         // Dequeue Self from either the head (with CAS) or from the interior
       
   781         // with a linear-time scan and normal non-atomic memory operations.
       
   782         // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
       
   783         // and then unlink Self from EntryList.  We have to drain eventually,
       
   784         // so it might as well be now.
       
   785 
       
   786         ObjectWaiter * v = _cxq ;
       
   787         assert (v != NULL, "invariant") ;
       
   788         if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
       
   789             // The CAS above can fail from interference IFF a "RAT" arrived.
       
   790             // In that case Self must be in the interior and can no longer be
       
   791             // at the head of cxq.
       
   792             if (v == SelfNode) {
       
   793                 assert (_cxq != v, "invariant") ;
       
   794                 v = _cxq ;          // CAS above failed - start scan at head of list
       
   795             }
       
   796             ObjectWaiter * p ;
       
   797             ObjectWaiter * q = NULL ;
       
   798             for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
       
   799                 q = p ;
       
   800                 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
       
   801             }
       
   802             assert (v != SelfNode,  "invariant") ;
       
   803             assert (p == SelfNode,  "Node not found on cxq") ;
       
   804             assert (p != _cxq,      "invariant") ;
       
   805             assert (q != NULL,      "invariant") ;
       
   806             assert (q->_next == p,  "invariant") ;
       
   807             q->_next = p->_next ;
       
   808         }
       
   809         TEVENT (Unlink from cxq) ;
       
   810     }
       
   811 
       
   812     // Diagnostic hygiene ...
       
   813     SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
       
   814     SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
       
   815     SelfNode->TState = ObjectWaiter::TS_RUN ;
       
   816 }
       
   817 
       
   818 // -----------------------------------------------------------------------------
       
   819 // Exit support
       
   820 //
       
   821 // exit()
       
   822 // ~~~~~~
       
   823 // Note that the collector can't reclaim the objectMonitor or deflate
       
   824 // the object out from underneath the thread calling ::exit() as the
       
   825 // thread calling ::exit() never transitions to a stable state.
       
   826 // This inhibits GC, which in turn inhibits asynchronous (and
       
   827 // inopportune) reclamation of "this".
       
   828 //
       
   829 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
       
   830 // There's one exception to the claim above, however.  EnterI() can call
       
   831 // exit() to drop a lock if the acquirer has been externally suspended.
       
   832 // In that case exit() is called with _thread_state as _thread_blocked,
       
   833 // but the monitor's _count field is > 0, which inhibits reclamation.
       
   834 //
       
   835 // 1-0 exit
       
   836 // ~~~~~~~~
       
   837 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
       
   838 // the fast-path operators have been optimized so the common ::exit()
       
   839 // operation is 1-0.  See i486.ad fast_unlock(), for instance.
       
   840 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
       
   841 // greatly improves latency -- MEMBAR and CAS having considerable local
       
   842 // latency on modern processors -- but at the cost of "stranding".  Absent the
       
   843 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
       
   844 // ::enter() path, resulting in the entering thread being stranding
       
   845 // and a progress-liveness failure.   Stranding is extremely rare.
       
   846 // We use timers (timed park operations) & periodic polling to detect
       
   847 // and recover from stranding.  Potentially stranded threads periodically
       
   848 // wake up and poll the lock.  See the usage of the _Responsible variable.
       
   849 //
       
   850 // The CAS() in enter provides for safety and exclusion, while the CAS or
       
   851 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
       
   852 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
       
   853 // We detect and recover from stranding with timers.
       
   854 //
       
   855 // If a thread transiently strands it'll park until (a) another
       
   856 // thread acquires the lock and then drops the lock, at which time the
       
   857 // exiting thread will notice and unpark the stranded thread, or, (b)
       
   858 // the timer expires.  If the lock is high traffic then the stranding latency
       
   859 // will be low due to (a).  If the lock is low traffic then the odds of
       
   860 // stranding are lower, although the worst-case stranding latency
       
   861 // is longer.  Critically, we don't want to put excessive load in the
       
   862 // platform's timer subsystem.  We want to minimize both the timer injection
       
   863 // rate (timers created/sec) as well as the number of timers active at
       
   864 // any one time.  (more precisely, we want to minimize timer-seconds, which is
       
   865 // the integral of the # of active timers at any instant over time).
       
   866 // Both impinge on OS scalability.  Given that, at most one thread parked on
       
   867 // a monitor will use a timer.
       
   868 
       
   869 void ATTR ObjectMonitor::exit(TRAPS) {
       
   870    Thread * Self = THREAD ;
       
   871    if (THREAD != _owner) {
       
   872      if (THREAD->is_lock_owned((address) _owner)) {
       
   873        // Transmute _owner from a BasicLock pointer to a Thread address.
       
   874        // We don't need to hold _mutex for this transition.
       
   875        // Non-null to Non-null is safe as long as all readers can
       
   876        // tolerate either flavor.
       
   877        assert (_recursions == 0, "invariant") ;
       
   878        _owner = THREAD ;
       
   879        _recursions = 0 ;
       
   880        OwnerIsThread = 1 ;
       
   881      } else {
       
   882        // NOTE: we need to handle unbalanced monitor enter/exit
       
   883        // in native code by throwing an exception.
       
   884        // TODO: Throw an IllegalMonitorStateException ?
       
   885        TEVENT (Exit - Throw IMSX) ;
       
   886        assert(false, "Non-balanced monitor enter/exit!");
       
   887        if (false) {
       
   888           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
       
   889        }
       
   890        return;
       
   891      }
       
   892    }
       
   893 
       
   894    if (_recursions != 0) {
       
   895      _recursions--;        // this is simple recursive enter
       
   896      TEVENT (Inflated exit - recursive) ;
       
   897      return ;
       
   898    }
       
   899 
       
   900    // Invariant: after setting Responsible=null an thread must execute
       
   901    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
       
   902    if ((SyncFlags & 4) == 0) {
       
   903       _Responsible = NULL ;
       
   904    }
       
   905 
       
   906    for (;;) {
       
   907       assert (THREAD == _owner, "invariant") ;
       
   908 
       
   909 
       
   910       if (Knob_ExitPolicy == 0) {
       
   911          // release semantics: prior loads and stores from within the critical section
       
   912          // must not float (reorder) past the following store that drops the lock.
       
   913          // On SPARC that requires MEMBAR #loadstore|#storestore.
       
   914          // But of course in TSO #loadstore|#storestore is not required.
       
   915          // I'd like to write one of the following:
       
   916          // A.  OrderAccess::release() ; _owner = NULL
       
   917          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
       
   918          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
       
   919          // store into a _dummy variable.  That store is not needed, but can result
       
   920          // in massive wasteful coherency traffic on classic SMP systems.
       
   921          // Instead, I use release_store(), which is implemented as just a simple
       
   922          // ST on x64, x86 and SPARC.
       
   923          OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
       
   924          OrderAccess::storeload() ;                         // See if we need to wake a successor
       
   925          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
       
   926             TEVENT (Inflated exit - simple egress) ;
       
   927             return ;
       
   928          }
       
   929          TEVENT (Inflated exit - complex egress) ;
       
   930 
       
   931          // Normally the exiting thread is responsible for ensuring succession,
       
   932          // but if other successors are ready or other entering threads are spinning
       
   933          // then this thread can simply store NULL into _owner and exit without
       
   934          // waking a successor.  The existence of spinners or ready successors
       
   935          // guarantees proper succession (liveness).  Responsibility passes to the
       
   936          // ready or running successors.  The exiting thread delegates the duty.
       
   937          // More precisely, if a successor already exists this thread is absolved
       
   938          // of the responsibility of waking (unparking) one.
       
   939          //
       
   940          // The _succ variable is critical to reducing futile wakeup frequency.
       
   941          // _succ identifies the "heir presumptive" thread that has been made
       
   942          // ready (unparked) but that has not yet run.  We need only one such
       
   943          // successor thread to guarantee progress.
       
   944          // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
       
   945          // section 3.3 "Futile Wakeup Throttling" for details.
       
   946          //
       
   947          // Note that spinners in Enter() also set _succ non-null.
       
   948          // In the current implementation spinners opportunistically set
       
   949          // _succ so that exiting threads might avoid waking a successor.
       
   950          // Another less appealing alternative would be for the exiting thread
       
   951          // to drop the lock and then spin briefly to see if a spinner managed
       
   952          // to acquire the lock.  If so, the exiting thread could exit
       
   953          // immediately without waking a successor, otherwise the exiting
       
   954          // thread would need to dequeue and wake a successor.
       
   955          // (Note that we'd need to make the post-drop spin short, but no
       
   956          // shorter than the worst-case round-trip cache-line migration time.
       
   957          // The dropped lock needs to become visible to the spinner, and then
       
   958          // the acquisition of the lock by the spinner must become visible to
       
   959          // the exiting thread).
       
   960          //
       
   961 
       
   962          // It appears that an heir-presumptive (successor) must be made ready.
       
   963          // Only the current lock owner can manipulate the EntryList or
       
   964          // drain _cxq, so we need to reacquire the lock.  If we fail
       
   965          // to reacquire the lock the responsibility for ensuring succession
       
   966          // falls to the new owner.
       
   967          //
       
   968          if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       
   969             return ;
       
   970          }
       
   971          TEVENT (Exit - Reacquired) ;
       
   972       } else {
       
   973          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
       
   974             OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
       
   975             OrderAccess::storeload() ;
       
   976             // Ratify the previously observed values.
       
   977             if (_cxq == NULL || _succ != NULL) {
       
   978                 TEVENT (Inflated exit - simple egress) ;
       
   979                 return ;
       
   980             }
       
   981 
       
   982             // inopportune interleaving -- the exiting thread (this thread)
       
   983             // in the fast-exit path raced an entering thread in the slow-enter
       
   984             // path.
       
   985             // We have two choices:
       
   986             // A.  Try to reacquire the lock.
       
   987             //     If the CAS() fails return immediately, otherwise
       
   988             //     we either restart/rerun the exit operation, or simply
       
   989             //     fall-through into the code below which wakes a successor.
       
   990             // B.  If the elements forming the EntryList|cxq are TSM
       
   991             //     we could simply unpark() the lead thread and return
       
   992             //     without having set _succ.
       
   993             if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       
   994                TEVENT (Inflated exit - reacquired succeeded) ;
       
   995                return ;
       
   996             }
       
   997             TEVENT (Inflated exit - reacquired failed) ;
       
   998          } else {
       
   999             TEVENT (Inflated exit - complex egress) ;
       
  1000          }
       
  1001       }
       
  1002 
       
  1003       guarantee (_owner == THREAD, "invariant") ;
       
  1004 
       
  1005       ObjectWaiter * w = NULL ;
       
  1006       int QMode = Knob_QMode ;
       
  1007 
       
  1008       if (QMode == 2 && _cxq != NULL) {
       
  1009           // QMode == 2 : cxq has precedence over EntryList.
       
  1010           // Try to directly wake a successor from the cxq.
       
  1011           // If successful, the successor will need to unlink itself from cxq.
       
  1012           w = _cxq ;
       
  1013           assert (w != NULL, "invariant") ;
       
  1014           assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  1015           ExitEpilog (Self, w) ;
       
  1016           return ;
       
  1017       }
       
  1018 
       
  1019       if (QMode == 3 && _cxq != NULL) {
       
  1020           // Aggressively drain cxq into EntryList at the first opportunity.
       
  1021           // This policy ensure that recently-run threads live at the head of EntryList.
       
  1022           // Drain _cxq into EntryList - bulk transfer.
       
  1023           // First, detach _cxq.
       
  1024           // The following loop is tantamount to: w = swap (&cxq, NULL)
       
  1025           w = _cxq ;
       
  1026           for (;;) {
       
  1027              assert (w != NULL, "Invariant") ;
       
  1028              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
       
  1029              if (u == w) break ;
       
  1030              w = u ;
       
  1031           }
       
  1032           assert (w != NULL              , "invariant") ;
       
  1033 
       
  1034           ObjectWaiter * q = NULL ;
       
  1035           ObjectWaiter * p ;
       
  1036           for (p = w ; p != NULL ; p = p->_next) {
       
  1037               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  1038               p->TState = ObjectWaiter::TS_ENTER ;
       
  1039               p->_prev = q ;
       
  1040               q = p ;
       
  1041           }
       
  1042 
       
  1043           // Append the RATs to the EntryList
       
  1044           // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
       
  1045           ObjectWaiter * Tail ;
       
  1046           for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
       
  1047           if (Tail == NULL) {
       
  1048               _EntryList = w ;
       
  1049           } else {
       
  1050               Tail->_next = w ;
       
  1051               w->_prev = Tail ;
       
  1052           }
       
  1053 
       
  1054           // Fall thru into code that tries to wake a successor from EntryList
       
  1055       }
       
  1056 
       
  1057       if (QMode == 4 && _cxq != NULL) {
       
  1058           // Aggressively drain cxq into EntryList at the first opportunity.
       
  1059           // This policy ensure that recently-run threads live at the head of EntryList.
       
  1060 
       
  1061           // Drain _cxq into EntryList - bulk transfer.
       
  1062           // First, detach _cxq.
       
  1063           // The following loop is tantamount to: w = swap (&cxq, NULL)
       
  1064           w = _cxq ;
       
  1065           for (;;) {
       
  1066              assert (w != NULL, "Invariant") ;
       
  1067              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
       
  1068              if (u == w) break ;
       
  1069              w = u ;
       
  1070           }
       
  1071           assert (w != NULL              , "invariant") ;
       
  1072 
       
  1073           ObjectWaiter * q = NULL ;
       
  1074           ObjectWaiter * p ;
       
  1075           for (p = w ; p != NULL ; p = p->_next) {
       
  1076               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  1077               p->TState = ObjectWaiter::TS_ENTER ;
       
  1078               p->_prev = q ;
       
  1079               q = p ;
       
  1080           }
       
  1081 
       
  1082           // Prepend the RATs to the EntryList
       
  1083           if (_EntryList != NULL) {
       
  1084               q->_next = _EntryList ;
       
  1085               _EntryList->_prev = q ;
       
  1086           }
       
  1087           _EntryList = w ;
       
  1088 
       
  1089           // Fall thru into code that tries to wake a successor from EntryList
       
  1090       }
       
  1091 
       
  1092       w = _EntryList  ;
       
  1093       if (w != NULL) {
       
  1094           // I'd like to write: guarantee (w->_thread != Self).
       
  1095           // But in practice an exiting thread may find itself on the EntryList.
       
  1096           // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
       
  1097           // then calls exit().  Exit release the lock by setting O._owner to NULL.
       
  1098           // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
       
  1099           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
       
  1100           // release the lock "O".  T2 resumes immediately after the ST of null into
       
  1101           // _owner, above.  T2 notices that the EntryList is populated, so it
       
  1102           // reacquires the lock and then finds itself on the EntryList.
       
  1103           // Given all that, we have to tolerate the circumstance where "w" is
       
  1104           // associated with Self.
       
  1105           assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  1106           ExitEpilog (Self, w) ;
       
  1107           return ;
       
  1108       }
       
  1109 
       
  1110       // If we find that both _cxq and EntryList are null then just
       
  1111       // re-run the exit protocol from the top.
       
  1112       w = _cxq ;
       
  1113       if (w == NULL) continue ;
       
  1114 
       
  1115       // Drain _cxq into EntryList - bulk transfer.
       
  1116       // First, detach _cxq.
       
  1117       // The following loop is tantamount to: w = swap (&cxq, NULL)
       
  1118       for (;;) {
       
  1119           assert (w != NULL, "Invariant") ;
       
  1120           ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
       
  1121           if (u == w) break ;
       
  1122           w = u ;
       
  1123       }
       
  1124       TEVENT (Inflated exit - drain cxq into EntryList) ;
       
  1125 
       
  1126       assert (w != NULL              , "invariant") ;
       
  1127       assert (_EntryList  == NULL    , "invariant") ;
       
  1128 
       
  1129       // Convert the LIFO SLL anchored by _cxq into a DLL.
       
  1130       // The list reorganization step operates in O(LENGTH(w)) time.
       
  1131       // It's critical that this step operate quickly as
       
  1132       // "Self" still holds the outer-lock, restricting parallelism
       
  1133       // and effectively lengthening the critical section.
       
  1134       // Invariant: s chases t chases u.
       
  1135       // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
       
  1136       // we have faster access to the tail.
       
  1137 
       
  1138       if (QMode == 1) {
       
  1139          // QMode == 1 : drain cxq to EntryList, reversing order
       
  1140          // We also reverse the order of the list.
       
  1141          ObjectWaiter * s = NULL ;
       
  1142          ObjectWaiter * t = w ;
       
  1143          ObjectWaiter * u = NULL ;
       
  1144          while (t != NULL) {
       
  1145              guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
       
  1146              t->TState = ObjectWaiter::TS_ENTER ;
       
  1147              u = t->_next ;
       
  1148              t->_prev = u ;
       
  1149              t->_next = s ;
       
  1150              s = t;
       
  1151              t = u ;
       
  1152          }
       
  1153          _EntryList  = s ;
       
  1154          assert (s != NULL, "invariant") ;
       
  1155       } else {
       
  1156          // QMode == 0 or QMode == 2
       
  1157          _EntryList = w ;
       
  1158          ObjectWaiter * q = NULL ;
       
  1159          ObjectWaiter * p ;
       
  1160          for (p = w ; p != NULL ; p = p->_next) {
       
  1161              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  1162              p->TState = ObjectWaiter::TS_ENTER ;
       
  1163              p->_prev = q ;
       
  1164              q = p ;
       
  1165          }
       
  1166       }
       
  1167 
       
  1168       // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
       
  1169       // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
       
  1170 
       
  1171       // See if we can abdicate to a spinner instead of waking a thread.
       
  1172       // A primary goal of the implementation is to reduce the
       
  1173       // context-switch rate.
       
  1174       if (_succ != NULL) continue;
       
  1175 
       
  1176       w = _EntryList  ;
       
  1177       if (w != NULL) {
       
  1178           guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  1179           ExitEpilog (Self, w) ;
       
  1180           return ;
       
  1181       }
       
  1182    }
       
  1183 }
       
  1184 
       
  1185 // ExitSuspendEquivalent:
       
  1186 // A faster alternate to handle_special_suspend_equivalent_condition()
       
  1187 //
       
  1188 // handle_special_suspend_equivalent_condition() unconditionally
       
  1189 // acquires the SR_lock.  On some platforms uncontended MutexLocker()
       
  1190 // operations have high latency.  Note that in ::enter() we call HSSEC
       
  1191 // while holding the monitor, so we effectively lengthen the critical sections.
       
  1192 //
       
  1193 // There are a number of possible solutions:
       
  1194 //
       
  1195 // A.  To ameliorate the problem we might also defer state transitions
       
  1196 //     to as late as possible -- just prior to parking.
       
  1197 //     Given that, we'd call HSSEC after having returned from park(),
       
  1198 //     but before attempting to acquire the monitor.  This is only a
       
  1199 //     partial solution.  It avoids calling HSSEC while holding the
       
  1200 //     monitor (good), but it still increases successor reacquisition latency --
       
  1201 //     the interval between unparking a successor and the time the successor
       
  1202 //     resumes and retries the lock.  See ReenterI(), which defers state transitions.
       
  1203 //     If we use this technique we can also avoid EnterI()-exit() loop
       
  1204 //     in ::enter() where we iteratively drop the lock and then attempt
       
  1205 //     to reacquire it after suspending.
       
  1206 //
       
  1207 // B.  In the future we might fold all the suspend bits into a
       
  1208 //     composite per-thread suspend flag and then update it with CAS().
       
  1209 //     Alternately, a Dekker-like mechanism with multiple variables
       
  1210 //     would suffice:
       
  1211 //       ST Self->_suspend_equivalent = false
       
  1212 //       MEMBAR
       
  1213 //       LD Self_>_suspend_flags
       
  1214 //
       
  1215 
       
  1216 
       
  1217 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
       
  1218    int Mode = Knob_FastHSSEC ;
       
  1219    if (Mode && !jSelf->is_external_suspend()) {
       
  1220       assert (jSelf->is_suspend_equivalent(), "invariant") ;
       
  1221       jSelf->clear_suspend_equivalent() ;
       
  1222       if (2 == Mode) OrderAccess::storeload() ;
       
  1223       if (!jSelf->is_external_suspend()) return false ;
       
  1224       // We raced a suspension -- fall thru into the slow path
       
  1225       TEVENT (ExitSuspendEquivalent - raced) ;
       
  1226       jSelf->set_suspend_equivalent() ;
       
  1227    }
       
  1228    return jSelf->handle_special_suspend_equivalent_condition() ;
       
  1229 }
       
  1230 
       
  1231 
       
  1232 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
       
  1233    assert (_owner == Self, "invariant") ;
       
  1234 
       
  1235    // Exit protocol:
       
  1236    // 1. ST _succ = wakee
       
  1237    // 2. membar #loadstore|#storestore;
       
  1238    // 2. ST _owner = NULL
       
  1239    // 3. unpark(wakee)
       
  1240 
       
  1241    _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
       
  1242    ParkEvent * Trigger = Wakee->_event ;
       
  1243 
       
  1244    // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
       
  1245    // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
       
  1246    // out-of-scope (non-extant).
       
  1247    Wakee  = NULL ;
       
  1248 
       
  1249    // Drop the lock
       
  1250    OrderAccess::release_store_ptr (&_owner, NULL) ;
       
  1251    OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
       
  1252 
       
  1253    if (SafepointSynchronize::do_call_back()) {
       
  1254       TEVENT (unpark before SAFEPOINT) ;
       
  1255    }
       
  1256 
       
  1257    DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
       
  1258    Trigger->unpark() ;
       
  1259 
       
  1260    // Maintain stats and report events to JVMTI
       
  1261    if (ObjectMonitor::_sync_Parks != NULL) {
       
  1262       ObjectMonitor::_sync_Parks->inc() ;
       
  1263    }
       
  1264 }
       
  1265 
       
  1266 
       
  1267 // -----------------------------------------------------------------------------
       
  1268 // Class Loader deadlock handling.
       
  1269 //
       
  1270 // complete_exit exits a lock returning recursion count
       
  1271 // complete_exit/reenter operate as a wait without waiting
       
  1272 // complete_exit requires an inflated monitor
       
  1273 // The _owner field is not always the Thread addr even with an
       
  1274 // inflated monitor, e.g. the monitor can be inflated by a non-owning
       
  1275 // thread due to contention.
       
  1276 intptr_t ObjectMonitor::complete_exit(TRAPS) {
       
  1277    Thread * const Self = THREAD;
       
  1278    assert(Self->is_Java_thread(), "Must be Java thread!");
       
  1279    JavaThread *jt = (JavaThread *)THREAD;
       
  1280 
       
  1281    DeferredInitialize();
       
  1282 
       
  1283    if (THREAD != _owner) {
       
  1284     if (THREAD->is_lock_owned ((address)_owner)) {
       
  1285        assert(_recursions == 0, "internal state error");
       
  1286        _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
       
  1287        _recursions = 0 ;
       
  1288        OwnerIsThread = 1 ;
       
  1289     }
       
  1290    }
       
  1291 
       
  1292    guarantee(Self == _owner, "complete_exit not owner");
       
  1293    intptr_t save = _recursions; // record the old recursion count
       
  1294    _recursions = 0;        // set the recursion level to be 0
       
  1295    exit (Self) ;           // exit the monitor
       
  1296    guarantee (_owner != Self, "invariant");
       
  1297    return save;
       
  1298 }
       
  1299 
       
  1300 // reenter() enters a lock and sets recursion count
       
  1301 // complete_exit/reenter operate as a wait without waiting
       
  1302 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
       
  1303    Thread * const Self = THREAD;
       
  1304    assert(Self->is_Java_thread(), "Must be Java thread!");
       
  1305    JavaThread *jt = (JavaThread *)THREAD;
       
  1306 
       
  1307    guarantee(_owner != Self, "reenter already owner");
       
  1308    enter (THREAD);       // enter the monitor
       
  1309    guarantee (_recursions == 0, "reenter recursion");
       
  1310    _recursions = recursions;
       
  1311    return;
       
  1312 }
       
  1313 
       
  1314 
       
  1315 // -----------------------------------------------------------------------------
       
  1316 // A macro is used below because there may already be a pending
       
  1317 // exception which should not abort the execution of the routines
       
  1318 // which use this (which is why we don't put this into check_slow and
       
  1319 // call it with a CHECK argument).
       
  1320 
       
  1321 #define CHECK_OWNER()                                                             \
       
  1322   do {                                                                            \
       
  1323     if (THREAD != _owner) {                                                       \
       
  1324       if (THREAD->is_lock_owned((address) _owner)) {                              \
       
  1325         _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
       
  1326         _recursions = 0;                                                          \
       
  1327         OwnerIsThread = 1 ;                                                       \
       
  1328       } else {                                                                    \
       
  1329         TEVENT (Throw IMSX) ;                                                     \
       
  1330         THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
       
  1331       }                                                                           \
       
  1332     }                                                                             \
       
  1333   } while (false)
       
  1334 
       
  1335 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
       
  1336 // TODO-FIXME: remove check_slow() -- it's likely dead.
       
  1337 
       
  1338 void ObjectMonitor::check_slow(TRAPS) {
       
  1339   TEVENT (check_slow - throw IMSX) ;
       
  1340   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
       
  1341   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
       
  1342 }
       
  1343 
       
  1344 static int Adjust (volatile int * adr, int dx) {
       
  1345   int v ;
       
  1346   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
       
  1347   return v ;
       
  1348 }
       
  1349 // -----------------------------------------------------------------------------
       
  1350 // Wait/Notify/NotifyAll
       
  1351 //
       
  1352 // Note: a subset of changes to ObjectMonitor::wait()
       
  1353 // will need to be replicated in complete_exit above
       
  1354 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
       
  1355    Thread * const Self = THREAD ;
       
  1356    assert(Self->is_Java_thread(), "Must be Java thread!");
       
  1357    JavaThread *jt = (JavaThread *)THREAD;
       
  1358 
       
  1359    DeferredInitialize () ;
       
  1360 
       
  1361    // Throw IMSX or IEX.
       
  1362    CHECK_OWNER();
       
  1363 
       
  1364    // check for a pending interrupt
       
  1365    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
       
  1366      // post monitor waited event.  Note that this is past-tense, we are done waiting.
       
  1367      if (JvmtiExport::should_post_monitor_waited()) {
       
  1368         // Note: 'false' parameter is passed here because the
       
  1369         // wait was not timed out due to thread interrupt.
       
  1370         JvmtiExport::post_monitor_waited(jt, this, false);
       
  1371      }
       
  1372      TEVENT (Wait - Throw IEX) ;
       
  1373      THROW(vmSymbols::java_lang_InterruptedException());
       
  1374      return ;
       
  1375    }
       
  1376    TEVENT (Wait) ;
       
  1377 
       
  1378    assert (Self->_Stalled == 0, "invariant") ;
       
  1379    Self->_Stalled = intptr_t(this) ;
       
  1380    jt->set_current_waiting_monitor(this);
       
  1381 
       
  1382    // create a node to be put into the queue
       
  1383    // Critically, after we reset() the event but prior to park(), we must check
       
  1384    // for a pending interrupt.
       
  1385    ObjectWaiter node(Self);
       
  1386    node.TState = ObjectWaiter::TS_WAIT ;
       
  1387    Self->_ParkEvent->reset() ;
       
  1388    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
       
  1389 
       
  1390    // Enter the waiting queue, which is a circular doubly linked list in this case
       
  1391    // but it could be a priority queue or any data structure.
       
  1392    // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
       
  1393    // by the the owner of the monitor *except* in the case where park()
       
  1394    // returns because of a timeout of interrupt.  Contention is exceptionally rare
       
  1395    // so we use a simple spin-lock instead of a heavier-weight blocking lock.
       
  1396 
       
  1397    Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
       
  1398    AddWaiter (&node) ;
       
  1399    Thread::SpinRelease (&_WaitSetLock) ;
       
  1400 
       
  1401    if ((SyncFlags & 4) == 0) {
       
  1402       _Responsible = NULL ;
       
  1403    }
       
  1404    intptr_t save = _recursions; // record the old recursion count
       
  1405    _waiters++;                  // increment the number of waiters
       
  1406    _recursions = 0;             // set the recursion level to be 1
       
  1407    exit (Self) ;                    // exit the monitor
       
  1408    guarantee (_owner != Self, "invariant") ;
       
  1409 
       
  1410    // As soon as the ObjectMonitor's ownership is dropped in the exit()
       
  1411    // call above, another thread can enter() the ObjectMonitor, do the
       
  1412    // notify(), and exit() the ObjectMonitor. If the other thread's
       
  1413    // exit() call chooses this thread as the successor and the unpark()
       
  1414    // call happens to occur while this thread is posting a
       
  1415    // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
       
  1416    // handler using RawMonitors and consuming the unpark().
       
  1417    //
       
  1418    // To avoid the problem, we re-post the event. This does no harm
       
  1419    // even if the original unpark() was not consumed because we are the
       
  1420    // chosen successor for this monitor.
       
  1421    if (node._notified != 0 && _succ == Self) {
       
  1422       node._event->unpark();
       
  1423    }
       
  1424 
       
  1425    // The thread is on the WaitSet list - now park() it.
       
  1426    // On MP systems it's conceivable that a brief spin before we park
       
  1427    // could be profitable.
       
  1428    //
       
  1429    // TODO-FIXME: change the following logic to a loop of the form
       
  1430    //   while (!timeout && !interrupted && _notified == 0) park()
       
  1431 
       
  1432    int ret = OS_OK ;
       
  1433    int WasNotified = 0 ;
       
  1434    { // State transition wrappers
       
  1435      OSThread* osthread = Self->osthread();
       
  1436      OSThreadWaitState osts(osthread, true);
       
  1437      {
       
  1438        ThreadBlockInVM tbivm(jt);
       
  1439        // Thread is in thread_blocked state and oop access is unsafe.
       
  1440        jt->set_suspend_equivalent();
       
  1441 
       
  1442        if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
       
  1443            // Intentionally empty
       
  1444        } else
       
  1445        if (node._notified == 0) {
       
  1446          if (millis <= 0) {
       
  1447             Self->_ParkEvent->park () ;
       
  1448          } else {
       
  1449             ret = Self->_ParkEvent->park (millis) ;
       
  1450          }
       
  1451        }
       
  1452 
       
  1453        // were we externally suspended while we were waiting?
       
  1454        if (ExitSuspendEquivalent (jt)) {
       
  1455           // TODO-FIXME: add -- if succ == Self then succ = null.
       
  1456           jt->java_suspend_self();
       
  1457        }
       
  1458 
       
  1459      } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
       
  1460 
       
  1461 
       
  1462      // Node may be on the WaitSet, the EntryList (or cxq), or in transition
       
  1463      // from the WaitSet to the EntryList.
       
  1464      // See if we need to remove Node from the WaitSet.
       
  1465      // We use double-checked locking to avoid grabbing _WaitSetLock
       
  1466      // if the thread is not on the wait queue.
       
  1467      //
       
  1468      // Note that we don't need a fence before the fetch of TState.
       
  1469      // In the worst case we'll fetch a old-stale value of TS_WAIT previously
       
  1470      // written by the is thread. (perhaps the fetch might even be satisfied
       
  1471      // by a look-aside into the processor's own store buffer, although given
       
  1472      // the length of the code path between the prior ST and this load that's
       
  1473      // highly unlikely).  If the following LD fetches a stale TS_WAIT value
       
  1474      // then we'll acquire the lock and then re-fetch a fresh TState value.
       
  1475      // That is, we fail toward safety.
       
  1476 
       
  1477      if (node.TState == ObjectWaiter::TS_WAIT) {
       
  1478          Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
       
  1479          if (node.TState == ObjectWaiter::TS_WAIT) {
       
  1480             DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
       
  1481             assert(node._notified == 0, "invariant");
       
  1482             node.TState = ObjectWaiter::TS_RUN ;
       
  1483          }
       
  1484          Thread::SpinRelease (&_WaitSetLock) ;
       
  1485      }
       
  1486 
       
  1487      // The thread is now either on off-list (TS_RUN),
       
  1488      // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
       
  1489      // The Node's TState variable is stable from the perspective of this thread.
       
  1490      // No other threads will asynchronously modify TState.
       
  1491      guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
       
  1492      OrderAccess::loadload() ;
       
  1493      if (_succ == Self) _succ = NULL ;
       
  1494      WasNotified = node._notified ;
       
  1495 
       
  1496      // Reentry phase -- reacquire the monitor.
       
  1497      // re-enter contended monitor after object.wait().
       
  1498      // retain OBJECT_WAIT state until re-enter successfully completes
       
  1499      // Thread state is thread_in_vm and oop access is again safe,
       
  1500      // although the raw address of the object may have changed.
       
  1501      // (Don't cache naked oops over safepoints, of course).
       
  1502 
       
  1503      // post monitor waited event. Note that this is past-tense, we are done waiting.
       
  1504      if (JvmtiExport::should_post_monitor_waited()) {
       
  1505        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
       
  1506      }
       
  1507      OrderAccess::fence() ;
       
  1508 
       
  1509      assert (Self->_Stalled != 0, "invariant") ;
       
  1510      Self->_Stalled = 0 ;
       
  1511 
       
  1512      assert (_owner != Self, "invariant") ;
       
  1513      ObjectWaiter::TStates v = node.TState ;
       
  1514      if (v == ObjectWaiter::TS_RUN) {
       
  1515          enter (Self) ;
       
  1516      } else {
       
  1517          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
       
  1518          ReenterI (Self, &node) ;
       
  1519          node.wait_reenter_end(this);
       
  1520      }
       
  1521 
       
  1522      // Self has reacquired the lock.
       
  1523      // Lifecycle - the node representing Self must not appear on any queues.
       
  1524      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
       
  1525      // want residual elements associated with this thread left on any lists.
       
  1526      guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
       
  1527      assert    (_owner == Self, "invariant") ;
       
  1528      assert    (_succ != Self , "invariant") ;
       
  1529    } // OSThreadWaitState()
       
  1530 
       
  1531    jt->set_current_waiting_monitor(NULL);
       
  1532 
       
  1533    guarantee (_recursions == 0, "invariant") ;
       
  1534    _recursions = save;     // restore the old recursion count
       
  1535    _waiters--;             // decrement the number of waiters
       
  1536 
       
  1537    // Verify a few postconditions
       
  1538    assert (_owner == Self       , "invariant") ;
       
  1539    assert (_succ  != Self       , "invariant") ;
       
  1540    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
  1541 
       
  1542    if (SyncFlags & 32) {
       
  1543       OrderAccess::fence() ;
       
  1544    }
       
  1545 
       
  1546    // check if the notification happened
       
  1547    if (!WasNotified) {
       
  1548      // no, it could be timeout or Thread.interrupt() or both
       
  1549      // check for interrupt event, otherwise it is timeout
       
  1550      if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
       
  1551        TEVENT (Wait - throw IEX from epilog) ;
       
  1552        THROW(vmSymbols::java_lang_InterruptedException());
       
  1553      }
       
  1554    }
       
  1555 
       
  1556    // NOTE: Spurious wake up will be consider as timeout.
       
  1557    // Monitor notify has precedence over thread interrupt.
       
  1558 }
       
  1559 
       
  1560 
       
  1561 // Consider:
       
  1562 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
       
  1563 // then instead of transferring a thread from the WaitSet to the EntryList
       
  1564 // we might just dequeue a thread from the WaitSet and directly unpark() it.
       
  1565 
       
  1566 void ObjectMonitor::notify(TRAPS) {
       
  1567   CHECK_OWNER();
       
  1568   if (_WaitSet == NULL) {
       
  1569      TEVENT (Empty-Notify) ;
       
  1570      return ;
       
  1571   }
       
  1572   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
       
  1573 
       
  1574   int Policy = Knob_MoveNotifyee ;
       
  1575 
       
  1576   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
       
  1577   ObjectWaiter * iterator = DequeueWaiter() ;
       
  1578   if (iterator != NULL) {
       
  1579      TEVENT (Notify1 - Transfer) ;
       
  1580      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
       
  1581      guarantee (iterator->_notified == 0, "invariant") ;
       
  1582      if (Policy != 4) {
       
  1583         iterator->TState = ObjectWaiter::TS_ENTER ;
       
  1584      }
       
  1585      iterator->_notified = 1 ;
       
  1586 
       
  1587      ObjectWaiter * List = _EntryList ;
       
  1588      if (List != NULL) {
       
  1589         assert (List->_prev == NULL, "invariant") ;
       
  1590         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  1591         assert (List != iterator, "invariant") ;
       
  1592      }
       
  1593 
       
  1594      if (Policy == 0) {       // prepend to EntryList
       
  1595          if (List == NULL) {
       
  1596              iterator->_next = iterator->_prev = NULL ;
       
  1597              _EntryList = iterator ;
       
  1598          } else {
       
  1599              List->_prev = iterator ;
       
  1600              iterator->_next = List ;
       
  1601              iterator->_prev = NULL ;
       
  1602              _EntryList = iterator ;
       
  1603         }
       
  1604      } else
       
  1605      if (Policy == 1) {      // append to EntryList
       
  1606          if (List == NULL) {
       
  1607              iterator->_next = iterator->_prev = NULL ;
       
  1608              _EntryList = iterator ;
       
  1609          } else {
       
  1610             // CONSIDER:  finding the tail currently requires a linear-time walk of
       
  1611             // the EntryList.  We can make tail access constant-time by converting to
       
  1612             // a CDLL instead of using our current DLL.
       
  1613             ObjectWaiter * Tail ;
       
  1614             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
       
  1615             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
       
  1616             Tail->_next = iterator ;
       
  1617             iterator->_prev = Tail ;
       
  1618             iterator->_next = NULL ;
       
  1619         }
       
  1620      } else
       
  1621      if (Policy == 2) {      // prepend to cxq
       
  1622          // prepend to cxq
       
  1623          if (List == NULL) {
       
  1624              iterator->_next = iterator->_prev = NULL ;
       
  1625              _EntryList = iterator ;
       
  1626          } else {
       
  1627             iterator->TState = ObjectWaiter::TS_CXQ ;
       
  1628             for (;;) {
       
  1629                 ObjectWaiter * Front = _cxq ;
       
  1630                 iterator->_next = Front ;
       
  1631                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
       
  1632                     break ;
       
  1633                 }
       
  1634             }
       
  1635          }
       
  1636      } else
       
  1637      if (Policy == 3) {      // append to cxq
       
  1638         iterator->TState = ObjectWaiter::TS_CXQ ;
       
  1639         for (;;) {
       
  1640             ObjectWaiter * Tail ;
       
  1641             Tail = _cxq ;
       
  1642             if (Tail == NULL) {
       
  1643                 iterator->_next = NULL ;
       
  1644                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
       
  1645                    break ;
       
  1646                 }
       
  1647             } else {
       
  1648                 while (Tail->_next != NULL) Tail = Tail->_next ;
       
  1649                 Tail->_next = iterator ;
       
  1650                 iterator->_prev = Tail ;
       
  1651                 iterator->_next = NULL ;
       
  1652                 break ;
       
  1653             }
       
  1654         }
       
  1655      } else {
       
  1656         ParkEvent * ev = iterator->_event ;
       
  1657         iterator->TState = ObjectWaiter::TS_RUN ;
       
  1658         OrderAccess::fence() ;
       
  1659         ev->unpark() ;
       
  1660      }
       
  1661 
       
  1662      if (Policy < 4) {
       
  1663        iterator->wait_reenter_begin(this);
       
  1664      }
       
  1665 
       
  1666      // _WaitSetLock protects the wait queue, not the EntryList.  We could
       
  1667      // move the add-to-EntryList operation, above, outside the critical section
       
  1668      // protected by _WaitSetLock.  In practice that's not useful.  With the
       
  1669      // exception of  wait() timeouts and interrupts the monitor owner
       
  1670      // is the only thread that grabs _WaitSetLock.  There's almost no contention
       
  1671      // on _WaitSetLock so it's not profitable to reduce the length of the
       
  1672      // critical section.
       
  1673   }
       
  1674 
       
  1675   Thread::SpinRelease (&_WaitSetLock) ;
       
  1676 
       
  1677   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
       
  1678      ObjectMonitor::_sync_Notifications->inc() ;
       
  1679   }
       
  1680 }
       
  1681 
       
  1682 
       
  1683 void ObjectMonitor::notifyAll(TRAPS) {
       
  1684   CHECK_OWNER();
       
  1685   ObjectWaiter* iterator;
       
  1686   if (_WaitSet == NULL) {
       
  1687       TEVENT (Empty-NotifyAll) ;
       
  1688       return ;
       
  1689   }
       
  1690   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
       
  1691 
       
  1692   int Policy = Knob_MoveNotifyee ;
       
  1693   int Tally = 0 ;
       
  1694   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
       
  1695 
       
  1696   for (;;) {
       
  1697      iterator = DequeueWaiter () ;
       
  1698      if (iterator == NULL) break ;
       
  1699      TEVENT (NotifyAll - Transfer1) ;
       
  1700      ++Tally ;
       
  1701 
       
  1702      // Disposition - what might we do with iterator ?
       
  1703      // a.  add it directly to the EntryList - either tail or head.
       
  1704      // b.  push it onto the front of the _cxq.
       
  1705      // For now we use (a).
       
  1706 
       
  1707      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
       
  1708      guarantee (iterator->_notified == 0, "invariant") ;
       
  1709      iterator->_notified = 1 ;
       
  1710      if (Policy != 4) {
       
  1711         iterator->TState = ObjectWaiter::TS_ENTER ;
       
  1712      }
       
  1713 
       
  1714      ObjectWaiter * List = _EntryList ;
       
  1715      if (List != NULL) {
       
  1716         assert (List->_prev == NULL, "invariant") ;
       
  1717         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  1718         assert (List != iterator, "invariant") ;
       
  1719      }
       
  1720 
       
  1721      if (Policy == 0) {       // prepend to EntryList
       
  1722          if (List == NULL) {
       
  1723              iterator->_next = iterator->_prev = NULL ;
       
  1724              _EntryList = iterator ;
       
  1725          } else {
       
  1726              List->_prev = iterator ;
       
  1727              iterator->_next = List ;
       
  1728              iterator->_prev = NULL ;
       
  1729              _EntryList = iterator ;
       
  1730         }
       
  1731      } else
       
  1732      if (Policy == 1) {      // append to EntryList
       
  1733          if (List == NULL) {
       
  1734              iterator->_next = iterator->_prev = NULL ;
       
  1735              _EntryList = iterator ;
       
  1736          } else {
       
  1737             // CONSIDER:  finding the tail currently requires a linear-time walk of
       
  1738             // the EntryList.  We can make tail access constant-time by converting to
       
  1739             // a CDLL instead of using our current DLL.
       
  1740             ObjectWaiter * Tail ;
       
  1741             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
       
  1742             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
       
  1743             Tail->_next = iterator ;
       
  1744             iterator->_prev = Tail ;
       
  1745             iterator->_next = NULL ;
       
  1746         }
       
  1747      } else
       
  1748      if (Policy == 2) {      // prepend to cxq
       
  1749          // prepend to cxq
       
  1750          iterator->TState = ObjectWaiter::TS_CXQ ;
       
  1751          for (;;) {
       
  1752              ObjectWaiter * Front = _cxq ;
       
  1753              iterator->_next = Front ;
       
  1754              if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
       
  1755                  break ;
       
  1756              }
       
  1757          }
       
  1758      } else
       
  1759      if (Policy == 3) {      // append to cxq
       
  1760         iterator->TState = ObjectWaiter::TS_CXQ ;
       
  1761         for (;;) {
       
  1762             ObjectWaiter * Tail ;
       
  1763             Tail = _cxq ;
       
  1764             if (Tail == NULL) {
       
  1765                 iterator->_next = NULL ;
       
  1766                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
       
  1767                    break ;
       
  1768                 }
       
  1769             } else {
       
  1770                 while (Tail->_next != NULL) Tail = Tail->_next ;
       
  1771                 Tail->_next = iterator ;
       
  1772                 iterator->_prev = Tail ;
       
  1773                 iterator->_next = NULL ;
       
  1774                 break ;
       
  1775             }
       
  1776         }
       
  1777      } else {
       
  1778         ParkEvent * ev = iterator->_event ;
       
  1779         iterator->TState = ObjectWaiter::TS_RUN ;
       
  1780         OrderAccess::fence() ;
       
  1781         ev->unpark() ;
       
  1782      }
       
  1783 
       
  1784      if (Policy < 4) {
       
  1785        iterator->wait_reenter_begin(this);
       
  1786      }
       
  1787 
       
  1788      // _WaitSetLock protects the wait queue, not the EntryList.  We could
       
  1789      // move the add-to-EntryList operation, above, outside the critical section
       
  1790      // protected by _WaitSetLock.  In practice that's not useful.  With the
       
  1791      // exception of  wait() timeouts and interrupts the monitor owner
       
  1792      // is the only thread that grabs _WaitSetLock.  There's almost no contention
       
  1793      // on _WaitSetLock so it's not profitable to reduce the length of the
       
  1794      // critical section.
       
  1795   }
       
  1796 
       
  1797   Thread::SpinRelease (&_WaitSetLock) ;
       
  1798 
       
  1799   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
       
  1800      ObjectMonitor::_sync_Notifications->inc(Tally) ;
       
  1801   }
       
  1802 }
       
  1803 
       
  1804 // -----------------------------------------------------------------------------
       
  1805 // Adaptive Spinning Support
       
  1806 //
       
  1807 // Adaptive spin-then-block - rational spinning
       
  1808 //
       
  1809 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
       
  1810 // algorithm.  On high order SMP systems it would be better to start with
       
  1811 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
       
  1812 // a contending thread could enqueue itself on the cxq and then spin locally
       
  1813 // on a thread-specific variable such as its ParkEvent._Event flag.
       
  1814 // That's left as an exercise for the reader.  Note that global spinning is
       
  1815 // not problematic on Niagara, as the L2$ serves the interconnect and has both
       
  1816 // low latency and massive bandwidth.
       
  1817 //
       
  1818 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
       
  1819 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
       
  1820 // (duration) or we can fix the count at approximately the duration of
       
  1821 // a context switch and vary the frequency.   Of course we could also
       
  1822 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
       
  1823 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
       
  1824 //
       
  1825 // This implementation varies the duration "D", where D varies with
       
  1826 // the success rate of recent spin attempts. (D is capped at approximately
       
  1827 // length of a round-trip context switch).  The success rate for recent
       
  1828 // spin attempts is a good predictor of the success rate of future spin
       
  1829 // attempts.  The mechanism adapts automatically to varying critical
       
  1830 // section length (lock modality), system load and degree of parallelism.
       
  1831 // D is maintained per-monitor in _SpinDuration and is initialized
       
  1832 // optimistically.  Spin frequency is fixed at 100%.
       
  1833 //
       
  1834 // Note that _SpinDuration is volatile, but we update it without locks
       
  1835 // or atomics.  The code is designed so that _SpinDuration stays within
       
  1836 // a reasonable range even in the presence of races.  The arithmetic
       
  1837 // operations on _SpinDuration are closed over the domain of legal values,
       
  1838 // so at worst a race will install and older but still legal value.
       
  1839 // At the very worst this introduces some apparent non-determinism.
       
  1840 // We might spin when we shouldn't or vice-versa, but since the spin
       
  1841 // count are relatively short, even in the worst case, the effect is harmless.
       
  1842 //
       
  1843 // Care must be taken that a low "D" value does not become an
       
  1844 // an absorbing state.  Transient spinning failures -- when spinning
       
  1845 // is overall profitable -- should not cause the system to converge
       
  1846 // on low "D" values.  We want spinning to be stable and predictable
       
  1847 // and fairly responsive to change and at the same time we don't want
       
  1848 // it to oscillate, become metastable, be "too" non-deterministic,
       
  1849 // or converge on or enter undesirable stable absorbing states.
       
  1850 //
       
  1851 // We implement a feedback-based control system -- using past behavior
       
  1852 // to predict future behavior.  We face two issues: (a) if the
       
  1853 // input signal is random then the spin predictor won't provide optimal
       
  1854 // results, and (b) if the signal frequency is too high then the control
       
  1855 // system, which has some natural response lag, will "chase" the signal.
       
  1856 // (b) can arise from multimodal lock hold times.  Transient preemption
       
  1857 // can also result in apparent bimodal lock hold times.
       
  1858 // Although sub-optimal, neither condition is particularly harmful, as
       
  1859 // in the worst-case we'll spin when we shouldn't or vice-versa.
       
  1860 // The maximum spin duration is rather short so the failure modes aren't bad.
       
  1861 // To be conservative, I've tuned the gain in system to bias toward
       
  1862 // _not spinning.  Relatedly, the system can sometimes enter a mode where it
       
  1863 // "rings" or oscillates between spinning and not spinning.  This happens
       
  1864 // when spinning is just on the cusp of profitability, however, so the
       
  1865 // situation is not dire.  The state is benign -- there's no need to add
       
  1866 // hysteresis control to damp the transition rate between spinning and
       
  1867 // not spinning.
       
  1868 //
       
  1869 
       
  1870 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
       
  1871 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
       
  1872 
       
  1873 // Spinning: Fixed frequency (100%), vary duration
       
  1874 
       
  1875 
       
  1876 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
       
  1877 
       
  1878     // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
       
  1879     int ctr = Knob_FixedSpin ;
       
  1880     if (ctr != 0) {
       
  1881         while (--ctr >= 0) {
       
  1882             if (TryLock (Self) > 0) return 1 ;
       
  1883             SpinPause () ;
       
  1884         }
       
  1885         return 0 ;
       
  1886     }
       
  1887 
       
  1888     for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
       
  1889       if (TryLock(Self) > 0) {
       
  1890         // Increase _SpinDuration ...
       
  1891         // Note that we don't clamp SpinDuration precisely at SpinLimit.
       
  1892         // Raising _SpurDuration to the poverty line is key.
       
  1893         int x = _SpinDuration ;
       
  1894         if (x < Knob_SpinLimit) {
       
  1895            if (x < Knob_Poverty) x = Knob_Poverty ;
       
  1896            _SpinDuration = x + Knob_BonusB ;
       
  1897         }
       
  1898         return 1 ;
       
  1899       }
       
  1900       SpinPause () ;
       
  1901     }
       
  1902 
       
  1903     // Admission control - verify preconditions for spinning
       
  1904     //
       
  1905     // We always spin a little bit, just to prevent _SpinDuration == 0 from
       
  1906     // becoming an absorbing state.  Put another way, we spin briefly to
       
  1907     // sample, just in case the system load, parallelism, contention, or lock
       
  1908     // modality changed.
       
  1909     //
       
  1910     // Consider the following alternative:
       
  1911     // Periodically set _SpinDuration = _SpinLimit and try a long/full
       
  1912     // spin attempt.  "Periodically" might mean after a tally of
       
  1913     // the # of failed spin attempts (or iterations) reaches some threshold.
       
  1914     // This takes us into the realm of 1-out-of-N spinning, where we
       
  1915     // hold the duration constant but vary the frequency.
       
  1916 
       
  1917     ctr = _SpinDuration  ;
       
  1918     if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
       
  1919     if (ctr <= 0) return 0 ;
       
  1920 
       
  1921     if (Knob_SuccRestrict && _succ != NULL) return 0 ;
       
  1922     if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
       
  1923        TEVENT (Spin abort - notrunnable [TOP]);
       
  1924        return 0 ;
       
  1925     }
       
  1926 
       
  1927     int MaxSpin = Knob_MaxSpinners ;
       
  1928     if (MaxSpin >= 0) {
       
  1929        if (_Spinner > MaxSpin) {
       
  1930           TEVENT (Spin abort -- too many spinners) ;
       
  1931           return 0 ;
       
  1932        }
       
  1933        // Slighty racy, but benign ...
       
  1934        Adjust (&_Spinner, 1) ;
       
  1935     }
       
  1936 
       
  1937     // We're good to spin ... spin ingress.
       
  1938     // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
       
  1939     // when preparing to LD...CAS _owner, etc and the CAS is likely
       
  1940     // to succeed.
       
  1941     int hits    = 0 ;
       
  1942     int msk     = 0 ;
       
  1943     int caspty  = Knob_CASPenalty ;
       
  1944     int oxpty   = Knob_OXPenalty ;
       
  1945     int sss     = Knob_SpinSetSucc ;
       
  1946     if (sss && _succ == NULL ) _succ = Self ;
       
  1947     Thread * prv = NULL ;
       
  1948 
       
  1949     // There are three ways to exit the following loop:
       
  1950     // 1.  A successful spin where this thread has acquired the lock.
       
  1951     // 2.  Spin failure with prejudice
       
  1952     // 3.  Spin failure without prejudice
       
  1953 
       
  1954     while (--ctr >= 0) {
       
  1955 
       
  1956       // Periodic polling -- Check for pending GC
       
  1957       // Threads may spin while they're unsafe.
       
  1958       // We don't want spinning threads to delay the JVM from reaching
       
  1959       // a stop-the-world safepoint or to steal cycles from GC.
       
  1960       // If we detect a pending safepoint we abort in order that
       
  1961       // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
       
  1962       // this thread, if safe, doesn't steal cycles from GC.
       
  1963       // This is in keeping with the "no loitering in runtime" rule.
       
  1964       // We periodically check to see if there's a safepoint pending.
       
  1965       if ((ctr & 0xFF) == 0) {
       
  1966          if (SafepointSynchronize::do_call_back()) {
       
  1967             TEVENT (Spin: safepoint) ;
       
  1968             goto Abort ;           // abrupt spin egress
       
  1969          }
       
  1970          if (Knob_UsePause & 1) SpinPause () ;
       
  1971 
       
  1972          int (*scb)(intptr_t,int) = SpinCallbackFunction ;
       
  1973          if (hits > 50 && scb != NULL) {
       
  1974             int abend = (*scb)(SpinCallbackArgument, 0) ;
       
  1975          }
       
  1976       }
       
  1977 
       
  1978       if (Knob_UsePause & 2) SpinPause() ;
       
  1979 
       
  1980       // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
       
  1981       // This is useful on classic SMP systems, but is of less utility on
       
  1982       // N1-style CMT platforms.
       
  1983       //
       
  1984       // Trade-off: lock acquisition latency vs coherency bandwidth.
       
  1985       // Lock hold times are typically short.  A histogram
       
  1986       // of successful spin attempts shows that we usually acquire
       
  1987       // the lock early in the spin.  That suggests we want to
       
  1988       // sample _owner frequently in the early phase of the spin,
       
  1989       // but then back-off and sample less frequently as the spin
       
  1990       // progresses.  The back-off makes a good citizen on SMP big
       
  1991       // SMP systems.  Oversampling _owner can consume excessive
       
  1992       // coherency bandwidth.  Relatedly, if we _oversample _owner we
       
  1993       // can inadvertently interfere with the the ST m->owner=null.
       
  1994       // executed by the lock owner.
       
  1995       if (ctr & msk) continue ;
       
  1996       ++hits ;
       
  1997       if ((hits & 0xF) == 0) {
       
  1998         // The 0xF, above, corresponds to the exponent.
       
  1999         // Consider: (msk+1)|msk
       
  2000         msk = ((msk << 2)|3) & BackOffMask ;
       
  2001       }
       
  2002 
       
  2003       // Probe _owner with TATAS
       
  2004       // If this thread observes the monitor transition or flicker
       
  2005       // from locked to unlocked to locked, then the odds that this
       
  2006       // thread will acquire the lock in this spin attempt go down
       
  2007       // considerably.  The same argument applies if the CAS fails
       
  2008       // or if we observe _owner change from one non-null value to
       
  2009       // another non-null value.   In such cases we might abort
       
  2010       // the spin without prejudice or apply a "penalty" to the
       
  2011       // spin count-down variable "ctr", reducing it by 100, say.
       
  2012 
       
  2013       Thread * ox = (Thread *) _owner ;
       
  2014       if (ox == NULL) {
       
  2015          ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
       
  2016          if (ox == NULL) {
       
  2017             // The CAS succeeded -- this thread acquired ownership
       
  2018             // Take care of some bookkeeping to exit spin state.
       
  2019             if (sss && _succ == Self) {
       
  2020                _succ = NULL ;
       
  2021             }
       
  2022             if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
       
  2023 
       
  2024             // Increase _SpinDuration :
       
  2025             // The spin was successful (profitable) so we tend toward
       
  2026             // longer spin attempts in the future.
       
  2027             // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
       
  2028             // If we acquired the lock early in the spin cycle it
       
  2029             // makes sense to increase _SpinDuration proportionally.
       
  2030             // Note that we don't clamp SpinDuration precisely at SpinLimit.
       
  2031             int x = _SpinDuration ;
       
  2032             if (x < Knob_SpinLimit) {
       
  2033                 if (x < Knob_Poverty) x = Knob_Poverty ;
       
  2034                 _SpinDuration = x + Knob_Bonus ;
       
  2035             }
       
  2036             return 1 ;
       
  2037          }
       
  2038 
       
  2039          // The CAS failed ... we can take any of the following actions:
       
  2040          // * penalize: ctr -= Knob_CASPenalty
       
  2041          // * exit spin with prejudice -- goto Abort;
       
  2042          // * exit spin without prejudice.
       
  2043          // * Since CAS is high-latency, retry again immediately.
       
  2044          prv = ox ;
       
  2045          TEVENT (Spin: cas failed) ;
       
  2046          if (caspty == -2) break ;
       
  2047          if (caspty == -1) goto Abort ;
       
  2048          ctr -= caspty ;
       
  2049          continue ;
       
  2050       }
       
  2051 
       
  2052       // Did lock ownership change hands ?
       
  2053       if (ox != prv && prv != NULL ) {
       
  2054           TEVENT (spin: Owner changed)
       
  2055           if (oxpty == -2) break ;
       
  2056           if (oxpty == -1) goto Abort ;
       
  2057           ctr -= oxpty ;
       
  2058       }
       
  2059       prv = ox ;
       
  2060 
       
  2061       // Abort the spin if the owner is not executing.
       
  2062       // The owner must be executing in order to drop the lock.
       
  2063       // Spinning while the owner is OFFPROC is idiocy.
       
  2064       // Consider: ctr -= RunnablePenalty ;
       
  2065       if (Knob_OState && NotRunnable (Self, ox)) {
       
  2066          TEVENT (Spin abort - notrunnable);
       
  2067          goto Abort ;
       
  2068       }
       
  2069       if (sss && _succ == NULL ) _succ = Self ;
       
  2070    }
       
  2071 
       
  2072    // Spin failed with prejudice -- reduce _SpinDuration.
       
  2073    // TODO: Use an AIMD-like policy to adjust _SpinDuration.
       
  2074    // AIMD is globally stable.
       
  2075    TEVENT (Spin failure) ;
       
  2076    {
       
  2077      int x = _SpinDuration ;
       
  2078      if (x > 0) {
       
  2079         // Consider an AIMD scheme like: x -= (x >> 3) + 100
       
  2080         // This is globally sample and tends to damp the response.
       
  2081         x -= Knob_Penalty ;
       
  2082         if (x < 0) x = 0 ;
       
  2083         _SpinDuration = x ;
       
  2084      }
       
  2085    }
       
  2086 
       
  2087  Abort:
       
  2088    if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
       
  2089    if (sss && _succ == Self) {
       
  2090       _succ = NULL ;
       
  2091       // Invariant: after setting succ=null a contending thread
       
  2092       // must recheck-retry _owner before parking.  This usually happens
       
  2093       // in the normal usage of TrySpin(), but it's safest
       
  2094       // to make TrySpin() as foolproof as possible.
       
  2095       OrderAccess::fence() ;
       
  2096       if (TryLock(Self) > 0) return 1 ;
       
  2097    }
       
  2098    return 0 ;
       
  2099 }
       
  2100 
       
  2101 // NotRunnable() -- informed spinning
       
  2102 //
       
  2103 // Don't bother spinning if the owner is not eligible to drop the lock.
       
  2104 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
       
  2105 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
       
  2106 // The thread must be runnable in order to drop the lock in timely fashion.
       
  2107 // If the _owner is not runnable then spinning will not likely be
       
  2108 // successful (profitable).
       
  2109 //
       
  2110 // Beware -- the thread referenced by _owner could have died
       
  2111 // so a simply fetch from _owner->_thread_state might trap.
       
  2112 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
       
  2113 // Because of the lifecycle issues the schedctl and _thread_state values
       
  2114 // observed by NotRunnable() might be garbage.  NotRunnable must
       
  2115 // tolerate this and consider the observed _thread_state value
       
  2116 // as advisory.
       
  2117 //
       
  2118 // Beware too, that _owner is sometimes a BasicLock address and sometimes
       
  2119 // a thread pointer.  We differentiate the two cases with OwnerIsThread.
       
  2120 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
       
  2121 // with the LSB of _owner.  Another option would be to probablistically probe
       
  2122 // the putative _owner->TypeTag value.
       
  2123 //
       
  2124 // Checking _thread_state isn't perfect.  Even if the thread is
       
  2125 // in_java it might be blocked on a page-fault or have been preempted
       
  2126 // and sitting on a ready/dispatch queue.  _thread state in conjunction
       
  2127 // with schedctl.sc_state gives us a good picture of what the
       
  2128 // thread is doing, however.
       
  2129 //
       
  2130 // TODO: check schedctl.sc_state.
       
  2131 // We'll need to use SafeFetch32() to read from the schedctl block.
       
  2132 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
       
  2133 //
       
  2134 // The return value from NotRunnable() is *advisory* -- the
       
  2135 // result is based on sampling and is not necessarily coherent.
       
  2136 // The caller must tolerate false-negative and false-positive errors.
       
  2137 // Spinning, in general, is probabilistic anyway.
       
  2138 
       
  2139 
       
  2140 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
       
  2141     // Check either OwnerIsThread or ox->TypeTag == 2BAD.
       
  2142     if (!OwnerIsThread) return 0 ;
       
  2143 
       
  2144     if (ox == NULL) return 0 ;
       
  2145 
       
  2146     // Avoid transitive spinning ...
       
  2147     // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
       
  2148     // Immediately after T1 acquires L it's possible that T2, also
       
  2149     // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
       
  2150     // This occurs transiently after T1 acquired L but before
       
  2151     // T1 managed to clear T1.Stalled.  T2 does not need to abort
       
  2152     // its spin in this circumstance.
       
  2153     intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
       
  2154 
       
  2155     if (BlockedOn == 1) return 1 ;
       
  2156     if (BlockedOn != 0) {
       
  2157       return BlockedOn != intptr_t(this) && _owner == ox ;
       
  2158     }
       
  2159 
       
  2160     assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
       
  2161     int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
       
  2162     // consider also: jst != _thread_in_Java -- but that's overspecific.
       
  2163     return jst == _thread_blocked || jst == _thread_in_native ;
       
  2164 }
       
  2165 
       
  2166 
       
  2167 // -----------------------------------------------------------------------------
       
  2168 // WaitSet management ...
       
  2169 
       
  2170 ObjectWaiter::ObjectWaiter(Thread* thread) {
       
  2171   _next     = NULL;
       
  2172   _prev     = NULL;
       
  2173   _notified = 0;
       
  2174   TState    = TS_RUN ;
       
  2175   _thread   = thread;
       
  2176   _event    = thread->_ParkEvent ;
       
  2177   _active   = false;
       
  2178   assert (_event != NULL, "invariant") ;
       
  2179 }
       
  2180 
       
  2181 void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
       
  2182   JavaThread *jt = (JavaThread *)this->_thread;
       
  2183   _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
       
  2184 }
       
  2185 
       
  2186 void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
       
  2187   JavaThread *jt = (JavaThread *)this->_thread;
       
  2188   JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
       
  2189 }
       
  2190 
       
  2191 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
       
  2192   assert(node != NULL, "should not dequeue NULL node");
       
  2193   assert(node->_prev == NULL, "node already in list");
       
  2194   assert(node->_next == NULL, "node already in list");
       
  2195   // put node at end of queue (circular doubly linked list)
       
  2196   if (_WaitSet == NULL) {
       
  2197     _WaitSet = node;
       
  2198     node->_prev = node;
       
  2199     node->_next = node;
       
  2200   } else {
       
  2201     ObjectWaiter* head = _WaitSet ;
       
  2202     ObjectWaiter* tail = head->_prev;
       
  2203     assert(tail->_next == head, "invariant check");
       
  2204     tail->_next = node;
       
  2205     head->_prev = node;
       
  2206     node->_next = head;
       
  2207     node->_prev = tail;
       
  2208   }
       
  2209 }
       
  2210 
       
  2211 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
       
  2212   // dequeue the very first waiter
       
  2213   ObjectWaiter* waiter = _WaitSet;
       
  2214   if (waiter) {
       
  2215     DequeueSpecificWaiter(waiter);
       
  2216   }
       
  2217   return waiter;
       
  2218 }
       
  2219 
       
  2220 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
       
  2221   assert(node != NULL, "should not dequeue NULL node");
       
  2222   assert(node->_prev != NULL, "node already removed from list");
       
  2223   assert(node->_next != NULL, "node already removed from list");
       
  2224   // when the waiter has woken up because of interrupt,
       
  2225   // timeout or other spurious wake-up, dequeue the
       
  2226   // waiter from waiting list
       
  2227   ObjectWaiter* next = node->_next;
       
  2228   if (next == node) {
       
  2229     assert(node->_prev == node, "invariant check");
       
  2230     _WaitSet = NULL;
       
  2231   } else {
       
  2232     ObjectWaiter* prev = node->_prev;
       
  2233     assert(prev->_next == node, "invariant check");
       
  2234     assert(next->_prev == node, "invariant check");
       
  2235     next->_prev = prev;
       
  2236     prev->_next = next;
       
  2237     if (_WaitSet == node) {
       
  2238       _WaitSet = next;
       
  2239     }
       
  2240   }
       
  2241   node->_next = NULL;
       
  2242   node->_prev = NULL;
       
  2243 }
       
  2244 
       
  2245 // -----------------------------------------------------------------------------
       
  2246 // PerfData support
       
  2247 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL ;
       
  2248 PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL ;
       
  2249 PerfCounter * ObjectMonitor::_sync_Parks                       = NULL ;
       
  2250 PerfCounter * ObjectMonitor::_sync_EmptyNotifications          = NULL ;
       
  2251 PerfCounter * ObjectMonitor::_sync_Notifications               = NULL ;
       
  2252 PerfCounter * ObjectMonitor::_sync_PrivateA                    = NULL ;
       
  2253 PerfCounter * ObjectMonitor::_sync_PrivateB                    = NULL ;
       
  2254 PerfCounter * ObjectMonitor::_sync_SlowExit                    = NULL ;
       
  2255 PerfCounter * ObjectMonitor::_sync_SlowEnter                   = NULL ;
       
  2256 PerfCounter * ObjectMonitor::_sync_SlowNotify                  = NULL ;
       
  2257 PerfCounter * ObjectMonitor::_sync_SlowNotifyAll               = NULL ;
       
  2258 PerfCounter * ObjectMonitor::_sync_FailedSpins                 = NULL ;
       
  2259 PerfCounter * ObjectMonitor::_sync_SuccessfulSpins             = NULL ;
       
  2260 PerfCounter * ObjectMonitor::_sync_MonInCirculation            = NULL ;
       
  2261 PerfCounter * ObjectMonitor::_sync_MonScavenged                = NULL ;
       
  2262 PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL ;
       
  2263 PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL ;
       
  2264 PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL ;
       
  2265 
       
  2266 // One-shot global initialization for the sync subsystem.
       
  2267 // We could also defer initialization and initialize on-demand
       
  2268 // the first time we call inflate().  Initialization would
       
  2269 // be protected - like so many things - by the MonitorCache_lock.
       
  2270 
       
  2271 void ObjectMonitor::Initialize () {
       
  2272   static int InitializationCompleted = 0 ;
       
  2273   assert (InitializationCompleted == 0, "invariant") ;
       
  2274   InitializationCompleted = 1 ;
       
  2275   if (UsePerfData) {
       
  2276       EXCEPTION_MARK ;
       
  2277       #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
       
  2278       #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
       
  2279       NEWPERFCOUNTER(_sync_Inflations) ;
       
  2280       NEWPERFCOUNTER(_sync_Deflations) ;
       
  2281       NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
       
  2282       NEWPERFCOUNTER(_sync_FutileWakeups) ;
       
  2283       NEWPERFCOUNTER(_sync_Parks) ;
       
  2284       NEWPERFCOUNTER(_sync_EmptyNotifications) ;
       
  2285       NEWPERFCOUNTER(_sync_Notifications) ;
       
  2286       NEWPERFCOUNTER(_sync_SlowEnter) ;
       
  2287       NEWPERFCOUNTER(_sync_SlowExit) ;
       
  2288       NEWPERFCOUNTER(_sync_SlowNotify) ;
       
  2289       NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
       
  2290       NEWPERFCOUNTER(_sync_FailedSpins) ;
       
  2291       NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
       
  2292       NEWPERFCOUNTER(_sync_PrivateA) ;
       
  2293       NEWPERFCOUNTER(_sync_PrivateB) ;
       
  2294       NEWPERFCOUNTER(_sync_MonInCirculation) ;
       
  2295       NEWPERFCOUNTER(_sync_MonScavenged) ;
       
  2296       NEWPERFVARIABLE(_sync_MonExtant) ;
       
  2297       #undef NEWPERFCOUNTER
       
  2298   }
       
  2299 }
       
  2300 
       
  2301 
       
  2302 // Compile-time asserts
       
  2303 // When possible, it's better to catch errors deterministically at
       
  2304 // compile-time than at runtime.  The down-side to using compile-time
       
  2305 // asserts is that error message -- often something about negative array
       
  2306 // indices -- is opaque.
       
  2307 
       
  2308 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
       
  2309 
       
  2310 void ObjectMonitor::ctAsserts() {
       
  2311   CTASSERT(offset_of (ObjectMonitor, _header) == 0);
       
  2312 }
       
  2313 
       
  2314 
       
  2315 static char * kvGet (char * kvList, const char * Key) {
       
  2316     if (kvList == NULL) return NULL ;
       
  2317     size_t n = strlen (Key) ;
       
  2318     char * Search ;
       
  2319     for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
       
  2320         if (strncmp (Search, Key, n) == 0) {
       
  2321             if (Search[n] == '=') return Search + n + 1 ;
       
  2322             if (Search[n] == 0)   return (char *) "1" ;
       
  2323         }
       
  2324     }
       
  2325     return NULL ;
       
  2326 }
       
  2327 
       
  2328 static int kvGetInt (char * kvList, const char * Key, int Default) {
       
  2329     char * v = kvGet (kvList, Key) ;
       
  2330     int rslt = v ? ::strtol (v, NULL, 0) : Default ;
       
  2331     if (Knob_ReportSettings && v != NULL) {
       
  2332         ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
       
  2333         ::fflush (stdout) ;
       
  2334     }
       
  2335     return rslt ;
       
  2336 }
       
  2337 
       
  2338 void ObjectMonitor::DeferredInitialize () {
       
  2339   if (InitDone > 0) return ;
       
  2340   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
       
  2341       while (InitDone != 1) ;
       
  2342       return ;
       
  2343   }
       
  2344 
       
  2345   // One-shot global initialization ...
       
  2346   // The initialization is idempotent, so we don't need locks.
       
  2347   // In the future consider doing this via os::init_2().
       
  2348   // SyncKnobs consist of <Key>=<Value> pairs in the style
       
  2349   // of environment variables.  Start by converting ':' to NUL.
       
  2350 
       
  2351   if (SyncKnobs == NULL) SyncKnobs = "" ;
       
  2352 
       
  2353   size_t sz = strlen (SyncKnobs) ;
       
  2354   char * knobs = (char *) malloc (sz + 2) ;
       
  2355   if (knobs == NULL) {
       
  2356      vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
       
  2357      guarantee (0, "invariant") ;
       
  2358   }
       
  2359   strcpy (knobs, SyncKnobs) ;
       
  2360   knobs[sz+1] = 0 ;
       
  2361   for (char * p = knobs ; *p ; p++) {
       
  2362      if (*p == ':') *p = 0 ;
       
  2363   }
       
  2364 
       
  2365   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
       
  2366   SETKNOB(ReportSettings) ;
       
  2367   SETKNOB(Verbose) ;
       
  2368   SETKNOB(FixedSpin) ;
       
  2369   SETKNOB(SpinLimit) ;
       
  2370   SETKNOB(SpinBase) ;
       
  2371   SETKNOB(SpinBackOff);
       
  2372   SETKNOB(CASPenalty) ;
       
  2373   SETKNOB(OXPenalty) ;
       
  2374   SETKNOB(LogSpins) ;
       
  2375   SETKNOB(SpinSetSucc) ;
       
  2376   SETKNOB(SuccEnabled) ;
       
  2377   SETKNOB(SuccRestrict) ;
       
  2378   SETKNOB(Penalty) ;
       
  2379   SETKNOB(Bonus) ;
       
  2380   SETKNOB(BonusB) ;
       
  2381   SETKNOB(Poverty) ;
       
  2382   SETKNOB(SpinAfterFutile) ;
       
  2383   SETKNOB(UsePause) ;
       
  2384   SETKNOB(SpinEarly) ;
       
  2385   SETKNOB(OState) ;
       
  2386   SETKNOB(MaxSpinners) ;
       
  2387   SETKNOB(PreSpin) ;
       
  2388   SETKNOB(ExitPolicy) ;
       
  2389   SETKNOB(QMode);
       
  2390   SETKNOB(ResetEvent) ;
       
  2391   SETKNOB(MoveNotifyee) ;
       
  2392   SETKNOB(FastHSSEC) ;
       
  2393   #undef SETKNOB
       
  2394 
       
  2395   if (os::is_MP()) {
       
  2396      BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
       
  2397      if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
       
  2398      // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
       
  2399   } else {
       
  2400      Knob_SpinLimit = 0 ;
       
  2401      Knob_SpinBase  = 0 ;
       
  2402      Knob_PreSpin   = 0 ;
       
  2403      Knob_FixedSpin = -1 ;
       
  2404   }
       
  2405 
       
  2406   if (Knob_LogSpins == 0) {
       
  2407      ObjectMonitor::_sync_FailedSpins = NULL ;
       
  2408   }
       
  2409 
       
  2410   free (knobs) ;
       
  2411   OrderAccess::fence() ;
       
  2412   InitDone = 1 ;
       
  2413 }
       
  2414 
       
  2415 #ifndef PRODUCT
       
  2416 void ObjectMonitor::verify() {
       
  2417 }
       
  2418 
       
  2419 void ObjectMonitor::print() {
       
  2420 }
       
  2421 #endif