hotspot/src/share/vm/runtime/objectMonitor.cpp
changeset 26683 a02753d5a0b2
parent 25633 4cd9c4622c8c
child 26684 d1221849ea3d
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -45,7 +45,7 @@
 #include "utilities/preserveException.hpp"
 
 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
-  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+// Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define NOINLINE __attribute__((noinline))
 #else
   #define NOINLINE
@@ -254,11 +254,11 @@
 bool ObjectMonitor::try_enter(Thread* THREAD) {
   if (THREAD != _owner) {
     if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD;
-       _recursions = 1;
-       OwnerIsThread = 1;
-       return true;
+      assert(_recursions == 0, "internal state error");
+      _owner = THREAD;
+      _recursions = 1;
+      OwnerIsThread = 1;
+      return true;
     }
     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       return false;
@@ -277,17 +277,17 @@
 
   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   if (cur == NULL) {
-     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
-     assert(_recursions == 0   , "invariant");
-     assert(_owner      == Self, "invariant");
-     // CONSIDER: set or assert OwnerIsThread == 1
-     return;
+    // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
+    assert(_recursions == 0   , "invariant");
+    assert(_owner      == Self, "invariant");
+    // CONSIDER: set or assert OwnerIsThread == 1
+    return;
   }
 
   if (cur == Self) {
-     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
-     _recursions++;
-     return;
+    // TODO-FIXME: check for integer overflow!  BUGID 6557169.
+    _recursions++;
+    return;
   }
 
   if (Self->is_lock_owned ((address)cur)) {
@@ -310,11 +310,11 @@
   // Note that if we acquire the monitor from an initial spin
   // we forgo posting JVMTI events and firing DTRACE probes.
   if (Knob_SpinEarly && TrySpin (Self) > 0) {
-     assert(_owner == Self      , "invariant");
-     assert(_recursions == 0    , "invariant");
-     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
-     Self->_Stalled = 0;
-     return;
+    assert(_owner == Self      , "invariant");
+    assert(_recursions == 0    , "invariant");
+    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+    Self->_Stalled = 0;
+    return;
   }
 
   assert(_owner != Self          , "invariant");
@@ -367,7 +367,7 @@
       // the monitor while suspended because that would surprise the
       // thread that suspended us.
       //
-          _recursions = 0;
+      _recursions = 0;
       _succ = NULL;
       exit(false, Self);
 
@@ -426,7 +426,7 @@
   }
 
   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
-     ObjectMonitor::_sync_ContendedLockAttempts->inc();
+    ObjectMonitor::_sync_ContendedLockAttempts->inc();
   }
 }
 
@@ -452,244 +452,244 @@
 }
 
 void NOINLINE ObjectMonitor::EnterI (TRAPS) {
-    Thread * const Self = THREAD;
-    assert(Self->is_Java_thread(), "invariant");
-    assert(((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant");
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "invariant");
+  assert(((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant");
+
+  // Try the lock - TATAS
+  if (TryLock (Self) > 0) {
+    assert(_succ != Self              , "invariant");
+    assert(_owner == Self             , "invariant");
+    assert(_Responsible != Self       , "invariant");
+    return;
+  }
+
+  DeferredInitialize();
 
-    // Try the lock - TATAS
-    if (TryLock (Self) > 0) {
-        assert(_succ != Self              , "invariant");
-        assert(_owner == Self             , "invariant");
-        assert(_Responsible != Self       , "invariant");
-        return;
-    }
+  // We try one round of spinning *before* enqueueing Self.
+  //
+  // If the _owner is ready but OFFPROC we could use a YieldTo()
+  // operation to donate the remainder of this thread's quantum
+  // to the owner.  This has subtle but beneficial affinity
+  // effects.
 
-    DeferredInitialize();
-
-    // We try one round of spinning *before* enqueueing Self.
-    //
-    // If the _owner is ready but OFFPROC we could use a YieldTo()
-    // operation to donate the remainder of this thread's quantum
-    // to the owner.  This has subtle but beneficial affinity
-    // effects.
+  if (TrySpin (Self) > 0) {
+    assert(_owner == Self        , "invariant");
+    assert(_succ != Self         , "invariant");
+    assert(_Responsible != Self  , "invariant");
+    return;
+  }
 
-    if (TrySpin (Self) > 0) {
-        assert(_owner == Self        , "invariant");
-        assert(_succ != Self         , "invariant");
-        assert(_Responsible != Self  , "invariant");
-        return;
-    }
+  // The Spin failed -- Enqueue and park the thread ...
+  assert(_succ  != Self            , "invariant");
+  assert(_owner != Self            , "invariant");
+  assert(_Responsible != Self      , "invariant");
 
-    // The Spin failed -- Enqueue and park the thread ...
-    assert(_succ  != Self            , "invariant");
-    assert(_owner != Self            , "invariant");
-    assert(_Responsible != Self      , "invariant");
+  // Enqueue "Self" on ObjectMonitor's _cxq.
+  //
+  // Node acts as a proxy for Self.
+  // As an aside, if were to ever rewrite the synchronization code mostly
+  // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
+  // Java objects.  This would avoid awkward lifecycle and liveness issues,
+  // as well as eliminate a subset of ABA issues.
+  // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
+  //
 
-    // Enqueue "Self" on ObjectMonitor's _cxq.
-    //
-    // Node acts as a proxy for Self.
-    // As an aside, if were to ever rewrite the synchronization code mostly
-    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
-    // Java objects.  This would avoid awkward lifecycle and liveness issues,
-    // as well as eliminate a subset of ABA issues.
-    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
-    //
+  ObjectWaiter node(Self);
+  Self->_ParkEvent->reset();
+  node._prev   = (ObjectWaiter *) 0xBAD;
+  node.TState  = ObjectWaiter::TS_CXQ;
+
+  // Push "Self" onto the front of the _cxq.
+  // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
+  // Note that spinning tends to reduce the rate at which threads
+  // enqueue and dequeue on EntryList|cxq.
+  ObjectWaiter * nxt;
+  for (;;) {
+    node._next = nxt = _cxq;
+    if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
 
-    ObjectWaiter node(Self);
-    Self->_ParkEvent->reset();
-    node._prev   = (ObjectWaiter *) 0xBAD;
-    node.TState  = ObjectWaiter::TS_CXQ;
-
-    // Push "Self" onto the front of the _cxq.
-    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
-    // Note that spinning tends to reduce the rate at which threads
-    // enqueue and dequeue on EntryList|cxq.
-    ObjectWaiter * nxt;
-    for (;;) {
-        node._next = nxt = _cxq;
-        if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
-
-        // Interference - the CAS failed because _cxq changed.  Just retry.
-        // As an optional optimization we retry the lock.
-        if (TryLock (Self) > 0) {
-            assert(_succ != Self         , "invariant");
-            assert(_owner == Self        , "invariant");
-            assert(_Responsible != Self  , "invariant");
-            return;
-        }
+    // Interference - the CAS failed because _cxq changed.  Just retry.
+    // As an optional optimization we retry the lock.
+    if (TryLock (Self) > 0) {
+      assert(_succ != Self         , "invariant");
+      assert(_owner == Self        , "invariant");
+      assert(_Responsible != Self  , "invariant");
+      return;
     }
+  }
 
-    // Check for cxq|EntryList edge transition to non-null.  This indicates
-    // the onset of contention.  While contention persists exiting threads
-    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
-    // operations revert to the faster 1-0 mode.  This enter operation may interleave
-    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
-    // arrange for one of the contending thread to use a timed park() operations
-    // to detect and recover from the race.  (Stranding is form of progress failure
-    // where the monitor is unlocked but all the contending threads remain parked).
-    // That is, at least one of the contended threads will periodically poll _owner.
-    // One of the contending threads will become the designated "Responsible" thread.
-    // The Responsible thread uses a timed park instead of a normal indefinite park
-    // operation -- it periodically wakes and checks for and recovers from potential
-    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
-    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
-    // be responsible for a monitor.
-    //
-    // Currently, one of the contended threads takes on the added role of "Responsible".
-    // A viable alternative would be to use a dedicated "stranding checker" thread
-    // that periodically iterated over all the threads (or active monitors) and unparked
-    // successors where there was risk of stranding.  This would help eliminate the
-    // timer scalability issues we see on some platforms as we'd only have one thread
-    // -- the checker -- parked on a timer.
+  // Check for cxq|EntryList edge transition to non-null.  This indicates
+  // the onset of contention.  While contention persists exiting threads
+  // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
+  // operations revert to the faster 1-0 mode.  This enter operation may interleave
+  // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
+  // arrange for one of the contending thread to use a timed park() operations
+  // to detect and recover from the race.  (Stranding is form of progress failure
+  // where the monitor is unlocked but all the contending threads remain parked).
+  // That is, at least one of the contended threads will periodically poll _owner.
+  // One of the contending threads will become the designated "Responsible" thread.
+  // The Responsible thread uses a timed park instead of a normal indefinite park
+  // operation -- it periodically wakes and checks for and recovers from potential
+  // strandings admitted by 1-0 exit operations.   We need at most one Responsible
+  // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
+  // be responsible for a monitor.
+  //
+  // Currently, one of the contended threads takes on the added role of "Responsible".
+  // A viable alternative would be to use a dedicated "stranding checker" thread
+  // that periodically iterated over all the threads (or active monitors) and unparked
+  // successors where there was risk of stranding.  This would help eliminate the
+  // timer scalability issues we see on some platforms as we'd only have one thread
+  // -- the checker -- parked on a timer.
 
-    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
-        // Try to assume the role of responsible thread for the monitor.
-        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
-        Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+  if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
+    // Try to assume the role of responsible thread for the monitor.
+    // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
+    Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+  }
+
+  // The lock might have been released while this thread was occupied queueing
+  // itself onto _cxq.  To close the race and avoid "stranding" and
+  // progress-liveness failure we must resample-retry _owner before parking.
+  // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
+  // In this case the ST-MEMBAR is accomplished with CAS().
+  //
+  // TODO: Defer all thread state transitions until park-time.
+  // Since state transitions are heavy and inefficient we'd like
+  // to defer the state transitions until absolutely necessary,
+  // and in doing so avoid some transitions ...
+
+  TEVENT(Inflated enter - Contention);
+  int nWakeups = 0;
+  int RecheckInterval = 1;
+
+  for (;;) {
+
+    if (TryLock(Self) > 0) break;
+    assert(_owner != Self, "invariant");
+
+    if ((SyncFlags & 2) && _Responsible == NULL) {
+      Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
     }
 
-    // The lock might have been released while this thread was occupied queueing
-    // itself onto _cxq.  To close the race and avoid "stranding" and
-    // progress-liveness failure we must resample-retry _owner before parking.
-    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
-    // In this case the ST-MEMBAR is accomplished with CAS().
-    //
-    // TODO: Defer all thread state transitions until park-time.
-    // Since state transitions are heavy and inefficient we'd like
-    // to defer the state transitions until absolutely necessary,
-    // and in doing so avoid some transitions ...
-
-    TEVENT(Inflated enter - Contention);
-    int nWakeups = 0;
-    int RecheckInterval = 1;
-
-    for (;;) {
-
-        if (TryLock(Self) > 0) break;
-        assert(_owner != Self, "invariant");
-
-        if ((SyncFlags & 2) && _Responsible == NULL) {
-           Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
-        }
-
-        // park self
-        if (_Responsible == Self || (SyncFlags & 1)) {
-            TEVENT(Inflated enter - park TIMED);
-            Self->_ParkEvent->park((jlong) RecheckInterval);
-            // Increase the RecheckInterval, but clamp the value.
-            RecheckInterval *= 8;
-            if (RecheckInterval > 1000) RecheckInterval = 1000;
-        } else {
-            TEVENT(Inflated enter - park UNTIMED);
-            Self->_ParkEvent->park();
-        }
-
-        if (TryLock(Self) > 0) break;
-
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT(Inflated enter - Futile wakeup);
-        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-           ObjectMonitor::_sync_FutileWakeups->inc();
-        }
-        ++nWakeups;
-
-        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
-        // We can defer clearing _succ until after the spin completes
-        // TrySpin() must tolerate being called with _succ == Self.
-        // Try yet another round of adaptive spinning.
-        if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
-
-        // We can find that we were unpark()ed and redesignated _succ while
-        // we were spinning.  That's harmless.  If we iterate and call park(),
-        // park() will consume the event and return immediately and we'll
-        // just spin again.  This pattern can repeat, leaving _succ to simply
-        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
-        // Alternately, we can sample fired() here, and if set, forgo spinning
-        // in the next iteration.
-
-        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
-           Self->_ParkEvent->reset();
-           OrderAccess::fence();
-        }
-        if (_succ == Self) _succ = NULL;
-
-        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
-        OrderAccess::fence();
+    // park self
+    if (_Responsible == Self || (SyncFlags & 1)) {
+      TEVENT(Inflated enter - park TIMED);
+      Self->_ParkEvent->park((jlong) RecheckInterval);
+      // Increase the RecheckInterval, but clamp the value.
+      RecheckInterval *= 8;
+      if (RecheckInterval > 1000) RecheckInterval = 1000;
+    } else {
+      TEVENT(Inflated enter - park UNTIMED);
+      Self->_ParkEvent->park();
     }
 
-    // Egress :
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
-    // Normally we'll find Self on the EntryList .
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
+    if (TryLock(Self) > 0) break;
+
+    // The lock is still contested.
+    // Keep a tally of the # of futile wakeups.
+    // Note that the counter is not protected by a lock or updated by atomics.
+    // That is by design - we trade "lossy" counters which are exposed to
+    // races during updates for a lower probe effect.
+    TEVENT(Inflated enter - Futile wakeup);
+    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+      ObjectMonitor::_sync_FutileWakeups->inc();
+    }
+    ++nWakeups;
 
-    assert(_owner == Self      , "invariant");
-    assert(object() != NULL    , "invariant");
-    // I'd like to write:
-    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-    // but as we're at a safepoint that's not safe.
+    // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
+    // We can defer clearing _succ until after the spin completes
+    // TrySpin() must tolerate being called with _succ == Self.
+    // Try yet another round of adaptive spinning.
+    if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
 
-    UnlinkAfterAcquire(Self, &node);
+    // We can find that we were unpark()ed and redesignated _succ while
+    // we were spinning.  That's harmless.  If we iterate and call park(),
+    // park() will consume the event and return immediately and we'll
+    // just spin again.  This pattern can repeat, leaving _succ to simply
+    // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
+    // Alternately, we can sample fired() here, and if set, forgo spinning
+    // in the next iteration.
+
+    if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
+      Self->_ParkEvent->reset();
+      OrderAccess::fence();
+    }
     if (_succ == Self) _succ = NULL;
 
-    assert(_succ != Self, "invariant");
-    if (_Responsible == Self) {
-        _Responsible = NULL;
-        OrderAccess::fence(); // Dekker pivot-point
+    // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+    OrderAccess::fence();
+  }
+
+  // Egress :
+  // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
+  // Normally we'll find Self on the EntryList .
+  // From the perspective of the lock owner (this thread), the
+  // EntryList is stable and cxq is prepend-only.
+  // The head of cxq is volatile but the interior is stable.
+  // In addition, Self.TState is stable.
 
-        // We may leave threads on cxq|EntryList without a designated
-        // "Responsible" thread.  This is benign.  When this thread subsequently
-        // exits the monitor it can "see" such preexisting "old" threads --
-        // threads that arrived on the cxq|EntryList before the fence, above --
-        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
-        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
-        // non-null and elect a new "Responsible" timer thread.
-        //
-        // This thread executes:
-        //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
-        //    LD cxq|EntryList               (in subsequent exit)
-        //
-        // Entering threads in the slow/contended path execute:
-        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
-        //    The (ST cxq; MEMBAR) is accomplished with CAS().
-        //
-        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
-        // exit operation from floating above the ST Responsible=null.
-    }
+  assert(_owner == Self      , "invariant");
+  assert(object() != NULL    , "invariant");
+  // I'd like to write:
+  //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+  // but as we're at a safepoint that's not safe.
+
+  UnlinkAfterAcquire(Self, &node);
+  if (_succ == Self) _succ = NULL;
+
+  assert(_succ != Self, "invariant");
+  if (_Responsible == Self) {
+    _Responsible = NULL;
+    OrderAccess::fence(); // Dekker pivot-point
 
-    // We've acquired ownership with CAS().
-    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
-    // But since the CAS() this thread may have also stored into _succ,
-    // EntryList, cxq or Responsible.  These meta-data updates must be
-    // visible __before this thread subsequently drops the lock.
-    // Consider what could occur if we didn't enforce this constraint --
-    // STs to monitor meta-data and user-data could reorder with (become
-    // visible after) the ST in exit that drops ownership of the lock.
-    // Some other thread could then acquire the lock, but observe inconsistent
-    // or old monitor meta-data and heap data.  That violates the JMM.
-    // To that end, the 1-0 exit() operation must have at least STST|LDST
-    // "release" barrier semantics.  Specifically, there must be at least a
-    // STST|LDST barrier in exit() before the ST of null into _owner that drops
-    // the lock.   The barrier ensures that changes to monitor meta-data and data
-    // protected by the lock will be visible before we release the lock, and
-    // therefore before some other thread (CPU) has a chance to acquire the lock.
-    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+    // We may leave threads on cxq|EntryList without a designated
+    // "Responsible" thread.  This is benign.  When this thread subsequently
+    // exits the monitor it can "see" such preexisting "old" threads --
+    // threads that arrived on the cxq|EntryList before the fence, above --
+    // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
+    // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
+    // non-null and elect a new "Responsible" timer thread.
+    //
+    // This thread executes:
+    //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
+    //    LD cxq|EntryList               (in subsequent exit)
+    //
+    // Entering threads in the slow/contended path execute:
+    //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
+    //    The (ST cxq; MEMBAR) is accomplished with CAS().
     //
-    // Critically, any prior STs to _succ or EntryList must be visible before
-    // the ST of null into _owner in the *subsequent* (following) corresponding
-    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
-    // execute a serializing instruction.
+    // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
+    // exit operation from floating above the ST Responsible=null.
+  }
 
-    if (SyncFlags & 8) {
-       OrderAccess::fence();
-    }
-    return;
+  // We've acquired ownership with CAS().
+  // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
+  // But since the CAS() this thread may have also stored into _succ,
+  // EntryList, cxq or Responsible.  These meta-data updates must be
+  // visible __before this thread subsequently drops the lock.
+  // Consider what could occur if we didn't enforce this constraint --
+  // STs to monitor meta-data and user-data could reorder with (become
+  // visible after) the ST in exit that drops ownership of the lock.
+  // Some other thread could then acquire the lock, but observe inconsistent
+  // or old monitor meta-data and heap data.  That violates the JMM.
+  // To that end, the 1-0 exit() operation must have at least STST|LDST
+  // "release" barrier semantics.  Specifically, there must be at least a
+  // STST|LDST barrier in exit() before the ST of null into _owner that drops
+  // the lock.   The barrier ensures that changes to monitor meta-data and data
+  // protected by the lock will be visible before we release the lock, and
+  // therefore before some other thread (CPU) has a chance to acquire the lock.
+  // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+  //
+  // Critically, any prior STs to _succ or EntryList must be visible before
+  // the ST of null into _owner in the *subsequent* (following) corresponding
+  // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
+  // execute a serializing instruction.
+
+  if (SyncFlags & 8) {
+    OrderAccess::fence();
+  }
+  return;
 }
 
 // ReenterI() is a specialized inline form of the latter half of the
@@ -701,91 +701,91 @@
 // loop accordingly.
 
 void NOINLINE ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
-    assert(Self != NULL                , "invariant");
-    assert(SelfNode != NULL            , "invariant");
-    assert(SelfNode->_thread == Self   , "invariant");
-    assert(_waiters > 0                , "invariant");
-    assert(((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant");
-    assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
-    JavaThread * jt = (JavaThread *) Self;
+  assert(Self != NULL                , "invariant");
+  assert(SelfNode != NULL            , "invariant");
+  assert(SelfNode->_thread == Self   , "invariant");
+  assert(_waiters > 0                , "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant");
+  assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
+  JavaThread * jt = (JavaThread *) Self;
 
-    int nWakeups = 0;
-    for (;;) {
-        ObjectWaiter::TStates v = SelfNode->TState;
-        guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
-        assert(_owner != Self, "invariant");
-
-        if (TryLock(Self) > 0) break;
-        if (TrySpin(Self) > 0) break;
+  int nWakeups = 0;
+  for (;;) {
+    ObjectWaiter::TStates v = SelfNode->TState;
+    guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
+    assert(_owner != Self, "invariant");
 
-        TEVENT(Wait Reentry - parking);
+    if (TryLock(Self) > 0) break;
+    if (TrySpin(Self) > 0) break;
 
-        // State transition wrappers around park() ...
-        // ReenterI() wisely defers state transitions until
-        // it's clear we must park the thread.
-        {
-           OSThreadContendState osts(Self->osthread());
-           ThreadBlockInVM tbivm(jt);
+    TEVENT(Wait Reentry - parking);
 
-           // cleared by handle_special_suspend_equivalent_condition()
-           // or java_suspend_self()
-           jt->set_suspend_equivalent();
-           if (SyncFlags & 1) {
-              Self->_ParkEvent->park((jlong)1000);
-           } else {
-              Self->_ParkEvent->park();
-           }
-
-           // were we externally suspended while we were waiting?
-           for (;;) {
-              if (!ExitSuspendEquivalent(jt)) break;
-              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
-              jt->java_suspend_self();
-              jt->set_suspend_equivalent();
-           }
-        }
+    // State transition wrappers around park() ...
+    // ReenterI() wisely defers state transitions until
+    // it's clear we must park the thread.
+    {
+      OSThreadContendState osts(Self->osthread());
+      ThreadBlockInVM tbivm(jt);
 
-        // Try again, but just so we distinguish between futile wakeups and
-        // successful wakeups.  The following test isn't algorithmically
-        // necessary, but it helps us maintain sensible statistics.
-        if (TryLock(Self) > 0) break;
+      // cleared by handle_special_suspend_equivalent_condition()
+      // or java_suspend_self()
+      jt->set_suspend_equivalent();
+      if (SyncFlags & 1) {
+        Self->_ParkEvent->park((jlong)1000);
+      } else {
+        Self->_ParkEvent->park();
+      }
 
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT(Wait Reentry - futile wakeup);
-        ++nWakeups;
-
-        // Assuming this is not a spurious wakeup we'll normally
-        // find that _succ == Self.
-        if (_succ == Self) _succ = NULL;
-
-        // Invariant: after clearing _succ a contending thread
-        // *must* retry  _owner before parking.
-        OrderAccess::fence();
-
-        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-          ObjectMonitor::_sync_FutileWakeups->inc();
-        }
+      // were we externally suspended while we were waiting?
+      for (;;) {
+        if (!ExitSuspendEquivalent(jt)) break;
+        if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
+        jt->java_suspend_self();
+        jt->set_suspend_equivalent();
+      }
     }
 
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
-    // Normally we'll find Self on the EntryList.
-    // Unlinking from the EntryList is constant-time and atomic-free.
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
+    // Try again, but just so we distinguish between futile wakeups and
+    // successful wakeups.  The following test isn't algorithmically
+    // necessary, but it helps us maintain sensible statistics.
+    if (TryLock(Self) > 0) break;
+
+    // The lock is still contested.
+    // Keep a tally of the # of futile wakeups.
+    // Note that the counter is not protected by a lock or updated by atomics.
+    // That is by design - we trade "lossy" counters which are exposed to
+    // races during updates for a lower probe effect.
+    TEVENT(Wait Reentry - futile wakeup);
+    ++nWakeups;
+
+    // Assuming this is not a spurious wakeup we'll normally
+    // find that _succ == Self.
+    if (_succ == Self) _succ = NULL;
+
+    // Invariant: after clearing _succ a contending thread
+    // *must* retry  _owner before parking.
+    OrderAccess::fence();
 
-    assert(_owner == Self, "invariant");
-    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
-    UnlinkAfterAcquire(Self, SelfNode);
-    if (_succ == Self) _succ = NULL;
-    assert(_succ != Self, "invariant");
-    SelfNode->TState = ObjectWaiter::TS_RUN;
-    OrderAccess::fence();      // see comments at the end of EnterI()
+    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+      ObjectMonitor::_sync_FutileWakeups->inc();
+    }
+  }
+
+  // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
+  // Normally we'll find Self on the EntryList.
+  // Unlinking from the EntryList is constant-time and atomic-free.
+  // From the perspective of the lock owner (this thread), the
+  // EntryList is stable and cxq is prepend-only.
+  // The head of cxq is volatile but the interior is stable.
+  // In addition, Self.TState is stable.
+
+  assert(_owner == Self, "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  UnlinkAfterAcquire(Self, SelfNode);
+  if (_succ == Self) _succ = NULL;
+  assert(_succ != Self, "invariant");
+  SelfNode->TState = ObjectWaiter::TS_RUN;
+  OrderAccess::fence();      // see comments at the end of EnterI()
 }
 
 // By convention we unlink a contending thread from EntryList|cxq immediately
@@ -794,66 +794,66 @@
 
 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
 {
-    assert(_owner == Self, "invariant");
-    assert(SelfNode->_thread == Self, "invariant");
+  assert(_owner == Self, "invariant");
+  assert(SelfNode->_thread == Self, "invariant");
 
-    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
-        // Normal case: remove Self from the DLL EntryList .
-        // This is a constant-time operation.
-        ObjectWaiter * nxt = SelfNode->_next;
-        ObjectWaiter * prv = SelfNode->_prev;
-        if (nxt != NULL) nxt->_prev = prv;
-        if (prv != NULL) prv->_next = nxt;
-        if (SelfNode == _EntryList) _EntryList = nxt;
-        assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
-        TEVENT(Unlink from EntryList);
-    } else {
-        assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
-        // Inopportune interleaving -- Self is still on the cxq.
-        // This usually means the enqueue of self raced an exiting thread.
-        // Normally we'll find Self near the front of the cxq, so
-        // dequeueing is typically fast.  If needbe we can accelerate
-        // this with some MCS/CHL-like bidirectional list hints and advisory
-        // back-links so dequeueing from the interior will normally operate
-        // in constant-time.
-        // Dequeue Self from either the head (with CAS) or from the interior
-        // with a linear-time scan and normal non-atomic memory operations.
-        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
-        // and then unlink Self from EntryList.  We have to drain eventually,
-        // so it might as well be now.
+  if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
+    // Normal case: remove Self from the DLL EntryList .
+    // This is a constant-time operation.
+    ObjectWaiter * nxt = SelfNode->_next;
+    ObjectWaiter * prv = SelfNode->_prev;
+    if (nxt != NULL) nxt->_prev = prv;
+    if (prv != NULL) prv->_next = nxt;
+    if (SelfNode == _EntryList) _EntryList = nxt;
+    assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
+    assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
+    TEVENT(Unlink from EntryList);
+  } else {
+    assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
+    // Inopportune interleaving -- Self is still on the cxq.
+    // This usually means the enqueue of self raced an exiting thread.
+    // Normally we'll find Self near the front of the cxq, so
+    // dequeueing is typically fast.  If needbe we can accelerate
+    // this with some MCS/CHL-like bidirectional list hints and advisory
+    // back-links so dequeueing from the interior will normally operate
+    // in constant-time.
+    // Dequeue Self from either the head (with CAS) or from the interior
+    // with a linear-time scan and normal non-atomic memory operations.
+    // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
+    // and then unlink Self from EntryList.  We have to drain eventually,
+    // so it might as well be now.
 
-        ObjectWaiter * v = _cxq;
-        assert(v != NULL, "invariant");
-        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
-            // The CAS above can fail from interference IFF a "RAT" arrived.
-            // In that case Self must be in the interior and can no longer be
-            // at the head of cxq.
-            if (v == SelfNode) {
-                assert(_cxq != v, "invariant");
-                v = _cxq;          // CAS above failed - start scan at head of list
-            }
-            ObjectWaiter * p;
-            ObjectWaiter * q = NULL;
-            for (p = v; p != NULL && p != SelfNode; p = p->_next) {
-                q = p;
-                assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
-            }
-            assert(v != SelfNode, "invariant");
-            assert(p == SelfNode, "Node not found on cxq");
-            assert(p != _cxq, "invariant");
-            assert(q != NULL, "invariant");
-            assert(q->_next == p, "invariant");
-            q->_next = p->_next;
-        }
-        TEVENT(Unlink from cxq);
+    ObjectWaiter * v = _cxq;
+    assert(v != NULL, "invariant");
+    if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+      // The CAS above can fail from interference IFF a "RAT" arrived.
+      // In that case Self must be in the interior and can no longer be
+      // at the head of cxq.
+      if (v == SelfNode) {
+        assert(_cxq != v, "invariant");
+        v = _cxq;          // CAS above failed - start scan at head of list
+      }
+      ObjectWaiter * p;
+      ObjectWaiter * q = NULL;
+      for (p = v; p != NULL && p != SelfNode; p = p->_next) {
+        q = p;
+        assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
+      }
+      assert(v != SelfNode, "invariant");
+      assert(p == SelfNode, "Node not found on cxq");
+      assert(p != _cxq, "invariant");
+      assert(q != NULL, "invariant");
+      assert(q->_next == p, "invariant");
+      q->_next = p->_next;
     }
+    TEVENT(Unlink from cxq);
+  }
 
 #ifdef ASSERT
-    // Diagnostic hygiene ...
-    SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
-    SelfNode->_next  = (ObjectWaiter *) 0xBAD;
-    SelfNode->TState = ObjectWaiter::TS_RUN;
+  // Diagnostic hygiene ...
+  SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
+  SelfNode->_next  = (ObjectWaiter *) 0xBAD;
+  SelfNode->TState = ObjectWaiter::TS_RUN;
 #endif
 }
 
@@ -915,331 +915,331 @@
 // of such futile wakups is low.
 
 void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
-   Thread * const Self = THREAD;
-   if (THREAD != _owner) {
-     if (THREAD->is_lock_owned((address) _owner)) {
-       // Transmute _owner from a BasicLock pointer to a Thread address.
-       // We don't need to hold _mutex for this transition.
-       // Non-null to Non-null is safe as long as all readers can
-       // tolerate either flavor.
-       assert(_recursions == 0, "invariant");
-       _owner = THREAD;
-       _recursions = 0;
-       OwnerIsThread = 1;
-     } else {
-       // Apparent unbalanced locking ...
-       // Naively we'd like to throw IllegalMonitorStateException.
-       // As a practical matter we can neither allocate nor throw an
-       // exception as ::exit() can be called from leaf routines.
-       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
-       // Upon deeper reflection, however, in a properly run JVM the only
-       // way we should encounter this situation is in the presence of
-       // unbalanced JNI locking. TODO: CheckJNICalls.
-       // See also: CR4414101
-       TEVENT(Exit - Throw IMSX);
-       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
-       return;
-     }
-   }
+  Thread * const Self = THREAD;
+  if (THREAD != _owner) {
+    if (THREAD->is_lock_owned((address) _owner)) {
+      // Transmute _owner from a BasicLock pointer to a Thread address.
+      // We don't need to hold _mutex for this transition.
+      // Non-null to Non-null is safe as long as all readers can
+      // tolerate either flavor.
+      assert(_recursions == 0, "invariant");
+      _owner = THREAD;
+      _recursions = 0;
+      OwnerIsThread = 1;
+    } else {
+      // Apparent unbalanced locking ...
+      // Naively we'd like to throw IllegalMonitorStateException.
+      // As a practical matter we can neither allocate nor throw an
+      // exception as ::exit() can be called from leaf routines.
+      // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
+      // Upon deeper reflection, however, in a properly run JVM the only
+      // way we should encounter this situation is in the presence of
+      // unbalanced JNI locking. TODO: CheckJNICalls.
+      // See also: CR4414101
+      TEVENT(Exit - Throw IMSX);
+      assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
+      return;
+    }
+  }
 
-   if (_recursions != 0) {
-     _recursions--;        // this is simple recursive enter
-     TEVENT(Inflated exit - recursive);
-     return;
-   }
+  if (_recursions != 0) {
+    _recursions--;        // this is simple recursive enter
+    TEVENT(Inflated exit - recursive);
+    return;
+  }
 
-   // Invariant: after setting Responsible=null an thread must execute
-   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL;
-   }
+  // Invariant: after setting Responsible=null an thread must execute
+  // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
+  if ((SyncFlags & 4) == 0) {
+    _Responsible = NULL;
+  }
 
 #if INCLUDE_TRACE
-   // get the owner's thread id for the MonitorEnter event
-   // if it is enabled and the thread isn't suspended
-   if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
-     _previous_owner_tid = SharedRuntime::get_java_tid(Self);
-   }
+  // get the owner's thread id for the MonitorEnter event
+  // if it is enabled and the thread isn't suspended
+  if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
+    _previous_owner_tid = SharedRuntime::get_java_tid(Self);
+  }
 #endif
 
-   for (;;) {
-      assert(THREAD == _owner, "invariant");
+  for (;;) {
+    assert(THREAD == _owner, "invariant");
 
 
-      if (Knob_ExitPolicy == 0) {
-         // release semantics: prior loads and stores from within the critical section
-         // must not float (reorder) past the following store that drops the lock.
-         // On SPARC that requires MEMBAR #loadstore|#storestore.
-         // But of course in TSO #loadstore|#storestore is not required.
-         // I'd like to write one of the following:
-         // A.  OrderAccess::release() ; _owner = NULL
-         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
-         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
-         // store into a _dummy variable.  That store is not needed, but can result
-         // in massive wasteful coherency traffic on classic SMP systems.
-         // Instead, I use release_store(), which is implemented as just a simple
-         // ST on x64, x86 and SPARC.
-         OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
-         OrderAccess::storeload();                         // See if we need to wake a successor
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            TEVENT(Inflated exit - simple egress);
-            return;
-         }
-         TEVENT(Inflated exit - complex egress);
-         // Other threads are blocked trying to acquire the lock.
+    if (Knob_ExitPolicy == 0) {
+      // release semantics: prior loads and stores from within the critical section
+      // must not float (reorder) past the following store that drops the lock.
+      // On SPARC that requires MEMBAR #loadstore|#storestore.
+      // But of course in TSO #loadstore|#storestore is not required.
+      // I'd like to write one of the following:
+      // A.  OrderAccess::release() ; _owner = NULL
+      // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
+      // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
+      // store into a _dummy variable.  That store is not needed, but can result
+      // in massive wasteful coherency traffic on classic SMP systems.
+      // Instead, I use release_store(), which is implemented as just a simple
+      // ST on x64, x86 and SPARC.
+      OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+      OrderAccess::storeload();                         // See if we need to wake a successor
+      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+        TEVENT(Inflated exit - simple egress);
+        return;
+      }
+      TEVENT(Inflated exit - complex egress);
+      // Other threads are blocked trying to acquire the lock.
+
+      // Normally the exiting thread is responsible for ensuring succession,
+      // but if other successors are ready or other entering threads are spinning
+      // then this thread can simply store NULL into _owner and exit without
+      // waking a successor.  The existence of spinners or ready successors
+      // guarantees proper succession (liveness).  Responsibility passes to the
+      // ready or running successors.  The exiting thread delegates the duty.
+      // More precisely, if a successor already exists this thread is absolved
+      // of the responsibility of waking (unparking) one.
+      //
+      // The _succ variable is critical to reducing futile wakeup frequency.
+      // _succ identifies the "heir presumptive" thread that has been made
+      // ready (unparked) but that has not yet run.  We need only one such
+      // successor thread to guarantee progress.
+      // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+      // section 3.3 "Futile Wakeup Throttling" for details.
+      //
+      // Note that spinners in Enter() also set _succ non-null.
+      // In the current implementation spinners opportunistically set
+      // _succ so that exiting threads might avoid waking a successor.
+      // Another less appealing alternative would be for the exiting thread
+      // to drop the lock and then spin briefly to see if a spinner managed
+      // to acquire the lock.  If so, the exiting thread could exit
+      // immediately without waking a successor, otherwise the exiting
+      // thread would need to dequeue and wake a successor.
+      // (Note that we'd need to make the post-drop spin short, but no
+      // shorter than the worst-case round-trip cache-line migration time.
+      // The dropped lock needs to become visible to the spinner, and then
+      // the acquisition of the lock by the spinner must become visible to
+      // the exiting thread).
+      //
 
-         // Normally the exiting thread is responsible for ensuring succession,
-         // but if other successors are ready or other entering threads are spinning
-         // then this thread can simply store NULL into _owner and exit without
-         // waking a successor.  The existence of spinners or ready successors
-         // guarantees proper succession (liveness).  Responsibility passes to the
-         // ready or running successors.  The exiting thread delegates the duty.
-         // More precisely, if a successor already exists this thread is absolved
-         // of the responsibility of waking (unparking) one.
-         //
-         // The _succ variable is critical to reducing futile wakeup frequency.
-         // _succ identifies the "heir presumptive" thread that has been made
-         // ready (unparked) but that has not yet run.  We need only one such
-         // successor thread to guarantee progress.
-         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
-         // section 3.3 "Futile Wakeup Throttling" for details.
-         //
-         // Note that spinners in Enter() also set _succ non-null.
-         // In the current implementation spinners opportunistically set
-         // _succ so that exiting threads might avoid waking a successor.
-         // Another less appealing alternative would be for the exiting thread
-         // to drop the lock and then spin briefly to see if a spinner managed
-         // to acquire the lock.  If so, the exiting thread could exit
-         // immediately without waking a successor, otherwise the exiting
-         // thread would need to dequeue and wake a successor.
-         // (Note that we'd need to make the post-drop spin short, but no
-         // shorter than the worst-case round-trip cache-line migration time.
-         // The dropped lock needs to become visible to the spinner, and then
-         // the acquisition of the lock by the spinner must become visible to
-         // the exiting thread).
-         //
+      // It appears that an heir-presumptive (successor) must be made ready.
+      // Only the current lock owner can manipulate the EntryList or
+      // drain _cxq, so we need to reacquire the lock.  If we fail
+      // to reacquire the lock the responsibility for ensuring succession
+      // falls to the new owner.
+      //
+      if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+        return;
+      }
+      TEVENT(Exit - Reacquired);
+    } else {
+      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+        OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+        OrderAccess::storeload();
+        // Ratify the previously observed values.
+        if (_cxq == NULL || _succ != NULL) {
+          TEVENT(Inflated exit - simple egress);
+          return;
+        }
 
-         // It appears that an heir-presumptive (successor) must be made ready.
-         // Only the current lock owner can manipulate the EntryList or
-         // drain _cxq, so we need to reacquire the lock.  If we fail
-         // to reacquire the lock the responsibility for ensuring succession
-         // falls to the new owner.
-         //
-         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-            return;
-         }
-         TEVENT(Exit - Reacquired);
+        // inopportune interleaving -- the exiting thread (this thread)
+        // in the fast-exit path raced an entering thread in the slow-enter
+        // path.
+        // We have two choices:
+        // A.  Try to reacquire the lock.
+        //     If the CAS() fails return immediately, otherwise
+        //     we either restart/rerun the exit operation, or simply
+        //     fall-through into the code below which wakes a successor.
+        // B.  If the elements forming the EntryList|cxq are TSM
+        //     we could simply unpark() the lead thread and return
+        //     without having set _succ.
+        if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+          TEVENT(Inflated exit - reacquired succeeded);
+          return;
+        }
+        TEVENT(Inflated exit - reacquired failed);
       } else {
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
-            OrderAccess::storeload();
-            // Ratify the previously observed values.
-            if (_cxq == NULL || _succ != NULL) {
-                TEVENT(Inflated exit - simple egress);
-                return;
-            }
+        TEVENT(Inflated exit - complex egress);
+      }
+    }
+
+    guarantee(_owner == THREAD, "invariant");
+
+    ObjectWaiter * w = NULL;
+    int QMode = Knob_QMode;
 
-            // inopportune interleaving -- the exiting thread (this thread)
-            // in the fast-exit path raced an entering thread in the slow-enter
-            // path.
-            // We have two choices:
-            // A.  Try to reacquire the lock.
-            //     If the CAS() fails return immediately, otherwise
-            //     we either restart/rerun the exit operation, or simply
-            //     fall-through into the code below which wakes a successor.
-            // B.  If the elements forming the EntryList|cxq are TSM
-            //     we could simply unpark() the lead thread and return
-            //     without having set _succ.
-            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-               TEVENT(Inflated exit - reacquired succeeded);
-               return;
-            }
-            TEVENT(Inflated exit - reacquired failed);
-         } else {
-            TEVENT(Inflated exit - complex egress);
-         }
+    if (QMode == 2 && _cxq != NULL) {
+      // QMode == 2 : cxq has precedence over EntryList.
+      // Try to directly wake a successor from the cxq.
+      // If successful, the successor will need to unlink itself from cxq.
+      w = _cxq;
+      assert(w != NULL, "invariant");
+      assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+
+    if (QMode == 3 && _cxq != NULL) {
+      // Aggressively drain cxq into EntryList at the first opportunity.
+      // This policy ensure that recently-run threads live at the head of EntryList.
+      // Drain _cxq into EntryList - bulk transfer.
+      // First, detach _cxq.
+      // The following loop is tantamount to: w = swap (&cxq, NULL)
+      w = _cxq;
+      for (;;) {
+        assert(w != NULL, "Invariant");
+        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        if (u == w) break;
+        w = u;
+      }
+      assert(w != NULL              , "invariant");
+
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
       }
 
-      guarantee(_owner == THREAD, "invariant");
-
-      ObjectWaiter * w = NULL;
-      int QMode = Knob_QMode;
-
-      if (QMode == 2 && _cxq != NULL) {
-          // QMode == 2 : cxq has precedence over EntryList.
-          // Try to directly wake a successor from the cxq.
-          // If successful, the successor will need to unlink itself from cxq.
-          w = _cxq;
-          assert(w != NULL, "invariant");
-          assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
-          ExitEpilog(Self, w);
-          return;
-      }
-
-      if (QMode == 3 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq;
-          for (;;) {
-             assert(w != NULL, "Invariant");
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-             if (u == w) break;
-             w = u;
-          }
-          assert(w != NULL              , "invariant");
-
-          ObjectWaiter * q = NULL;
-          ObjectWaiter * p;
-          for (p = w; p != NULL; p = p->_next) {
-              guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-              p->TState = ObjectWaiter::TS_ENTER;
-              p->_prev = q;
-              q = p;
-          }
-
-          // Append the RATs to the EntryList
-          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
-          ObjectWaiter * Tail;
-          for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL; Tail = Tail->_next);
-          if (Tail == NULL) {
-              _EntryList = w;
-          } else {
-              Tail->_next = w;
-              w->_prev = Tail;
-          }
-
-          // Fall thru into code that tries to wake a successor from EntryList
+      // Append the RATs to the EntryList
+      // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
+      ObjectWaiter * Tail;
+      for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL; Tail = Tail->_next);
+      if (Tail == NULL) {
+        _EntryList = w;
+      } else {
+        Tail->_next = w;
+        w->_prev = Tail;
       }
 
-      if (QMode == 4 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq;
-          for (;;) {
-             assert(w != NULL, "Invariant");
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-             if (u == w) break;
-             w = u;
-          }
-          assert(w != NULL              , "invariant");
-
-          ObjectWaiter * q = NULL;
-          ObjectWaiter * p;
-          for (p = w; p != NULL; p = p->_next) {
-              guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-              p->TState = ObjectWaiter::TS_ENTER;
-              p->_prev = q;
-              q = p;
-          }
+      // Fall thru into code that tries to wake a successor from EntryList
+    }
 
-          // Prepend the RATs to the EntryList
-          if (_EntryList != NULL) {
-              q->_next = _EntryList;
-              _EntryList->_prev = q;
-          }
-          _EntryList = w;
-
-          // Fall thru into code that tries to wake a successor from EntryList
-      }
-
-      w = _EntryList;
-      if (w != NULL) {
-          // I'd like to write: guarantee (w->_thread != Self).
-          // But in practice an exiting thread may find itself on the EntryList.
-          // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
-          // then calls exit().  Exit release the lock by setting O._owner to NULL.
-          // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
-          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
-          // release the lock "O".  T2 resumes immediately after the ST of null into
-          // _owner, above.  T2 notices that the EntryList is populated, so it
-          // reacquires the lock and then finds itself on the EntryList.
-          // Given all that, we have to tolerate the circumstance where "w" is
-          // associated with Self.
-          assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
-          ExitEpilog(Self, w);
-          return;
-      }
-
-      // If we find that both _cxq and EntryList are null then just
-      // re-run the exit protocol from the top.
-      w = _cxq;
-      if (w == NULL) continue;
+    if (QMode == 4 && _cxq != NULL) {
+      // Aggressively drain cxq into EntryList at the first opportunity.
+      // This policy ensure that recently-run threads live at the head of EntryList.
 
       // Drain _cxq into EntryList - bulk transfer.
       // First, detach _cxq.
       // The following loop is tantamount to: w = swap (&cxq, NULL)
+      w = _cxq;
       for (;;) {
-          assert(w != NULL, "Invariant");
-          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-          if (u == w) break;
-          w = u;
+        assert(w != NULL, "Invariant");
+        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        if (u == w) break;
+        w = u;
       }
-      TEVENT(Inflated exit - drain cxq into EntryList);
-
       assert(w != NULL              , "invariant");
-      assert(_EntryList  == NULL    , "invariant");
-
-      // Convert the LIFO SLL anchored by _cxq into a DLL.
-      // The list reorganization step operates in O(LENGTH(w)) time.
-      // It's critical that this step operate quickly as
-      // "Self" still holds the outer-lock, restricting parallelism
-      // and effectively lengthening the critical section.
-      // Invariant: s chases t chases u.
-      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
-      // we have faster access to the tail.
 
-      if (QMode == 1) {
-         // QMode == 1 : drain cxq to EntryList, reversing order
-         // We also reverse the order of the list.
-         ObjectWaiter * s = NULL;
-         ObjectWaiter * t = w;
-         ObjectWaiter * u = NULL;
-         while (t != NULL) {
-             guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
-             t->TState = ObjectWaiter::TS_ENTER;
-             u = t->_next;
-             t->_prev = u;
-             t->_next = s;
-             s = t;
-             t = u;
-         }
-         _EntryList  = s;
-         assert(s != NULL, "invariant");
-      } else {
-         // QMode == 0 or QMode == 2
-         _EntryList = w;
-         ObjectWaiter * q = NULL;
-         ObjectWaiter * p;
-         for (p = w; p != NULL; p = p->_next) {
-             guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-             p->TState = ObjectWaiter::TS_ENTER;
-             p->_prev = q;
-             q = p;
-         }
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
       }
 
-      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
-      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+      // Prepend the RATs to the EntryList
+      if (_EntryList != NULL) {
+        q->_next = _EntryList;
+        _EntryList->_prev = q;
+      }
+      _EntryList = w;
+
+      // Fall thru into code that tries to wake a successor from EntryList
+    }
 
-      // See if we can abdicate to a spinner instead of waking a thread.
-      // A primary goal of the implementation is to reduce the
-      // context-switch rate.
-      if (_succ != NULL) continue;
+    w = _EntryList;
+    if (w != NULL) {
+      // I'd like to write: guarantee (w->_thread != Self).
+      // But in practice an exiting thread may find itself on the EntryList.
+      // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
+      // then calls exit().  Exit release the lock by setting O._owner to NULL.
+      // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
+      // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
+      // release the lock "O".  T2 resumes immediately after the ST of null into
+      // _owner, above.  T2 notices that the EntryList is populated, so it
+      // reacquires the lock and then finds itself on the EntryList.
+      // Given all that, we have to tolerate the circumstance where "w" is
+      // associated with Self.
+      assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+
+    // If we find that both _cxq and EntryList are null then just
+    // re-run the exit protocol from the top.
+    w = _cxq;
+    if (w == NULL) continue;
+
+    // Drain _cxq into EntryList - bulk transfer.
+    // First, detach _cxq.
+    // The following loop is tantamount to: w = swap (&cxq, NULL)
+    for (;;) {
+      assert(w != NULL, "Invariant");
+      ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+      if (u == w) break;
+      w = u;
+    }
+    TEVENT(Inflated exit - drain cxq into EntryList);
+
+    assert(w != NULL              , "invariant");
+    assert(_EntryList  == NULL    , "invariant");
 
-      w = _EntryList;
-      if (w != NULL) {
-          guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
-          ExitEpilog(Self, w);
-          return;
+    // Convert the LIFO SLL anchored by _cxq into a DLL.
+    // The list reorganization step operates in O(LENGTH(w)) time.
+    // It's critical that this step operate quickly as
+    // "Self" still holds the outer-lock, restricting parallelism
+    // and effectively lengthening the critical section.
+    // Invariant: s chases t chases u.
+    // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
+    // we have faster access to the tail.
+
+    if (QMode == 1) {
+      // QMode == 1 : drain cxq to EntryList, reversing order
+      // We also reverse the order of the list.
+      ObjectWaiter * s = NULL;
+      ObjectWaiter * t = w;
+      ObjectWaiter * u = NULL;
+      while (t != NULL) {
+        guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
+        t->TState = ObjectWaiter::TS_ENTER;
+        u = t->_next;
+        t->_prev = u;
+        t->_next = s;
+        s = t;
+        t = u;
       }
-   }
+      _EntryList  = s;
+      assert(s != NULL, "invariant");
+    } else {
+      // QMode == 0 or QMode == 2
+      _EntryList = w;
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
+      }
+    }
+
+    // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
+    // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+
+    // See if we can abdicate to a spinner instead of waking a thread.
+    // A primary goal of the implementation is to reduce the
+    // context-switch rate.
+    if (_succ != NULL) continue;
+
+    w = _EntryList;
+    if (w != NULL) {
+      guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+  }
 }
 
 // ExitSuspendEquivalent:
@@ -1278,52 +1278,52 @@
 
 
 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
-   const int Mode = Knob_FastHSSEC;
-   if (Mode && !jSelf->is_external_suspend()) {
-      assert(jSelf->is_suspend_equivalent(), "invariant");
-      jSelf->clear_suspend_equivalent();
-      if (2 == Mode) OrderAccess::storeload();
-      if (!jSelf->is_external_suspend()) return false;
-      // We raced a suspension -- fall thru into the slow path
-      TEVENT(ExitSuspendEquivalent - raced);
-      jSelf->set_suspend_equivalent();
-   }
-   return jSelf->handle_special_suspend_equivalent_condition();
+  const int Mode = Knob_FastHSSEC;
+  if (Mode && !jSelf->is_external_suspend()) {
+    assert(jSelf->is_suspend_equivalent(), "invariant");
+    jSelf->clear_suspend_equivalent();
+    if (2 == Mode) OrderAccess::storeload();
+    if (!jSelf->is_external_suspend()) return false;
+    // We raced a suspension -- fall thru into the slow path
+    TEVENT(ExitSuspendEquivalent - raced);
+    jSelf->set_suspend_equivalent();
+  }
+  return jSelf->handle_special_suspend_equivalent_condition();
 }
 
 
 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
-   assert(_owner == Self, "invariant");
+  assert(_owner == Self, "invariant");
 
-   // Exit protocol:
-   // 1. ST _succ = wakee
-   // 2. membar #loadstore|#storestore;
-   // 2. ST _owner = NULL
-   // 3. unpark(wakee)
+  // Exit protocol:
+  // 1. ST _succ = wakee
+  // 2. membar #loadstore|#storestore;
+  // 2. ST _owner = NULL
+  // 3. unpark(wakee)
 
-   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
-   ParkEvent * Trigger = Wakee->_event;
+  _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
+  ParkEvent * Trigger = Wakee->_event;
 
-   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
-   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
-   // out-of-scope (non-extant).
-   Wakee  = NULL;
+  // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
+  // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
+  // out-of-scope (non-extant).
+  Wakee  = NULL;
 
-   // Drop the lock
-   OrderAccess::release_store_ptr(&_owner, NULL);
-   OrderAccess::fence();                               // ST _owner vs LD in unpark()
+  // Drop the lock
+  OrderAccess::release_store_ptr(&_owner, NULL);
+  OrderAccess::fence();                               // ST _owner vs LD in unpark()
 
-   if (SafepointSynchronize::do_call_back()) {
-      TEVENT(unpark before SAFEPOINT);
-   }
+  if (SafepointSynchronize::do_call_back()) {
+    TEVENT(unpark before SAFEPOINT);
+  }
 
-   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
-   Trigger->unpark();
+  DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
+  Trigger->unpark();
 
-   // Maintain stats and report events to JVMTI
-   if (ObjectMonitor::_sync_Parks != NULL) {
-      ObjectMonitor::_sync_Parks->inc();
-   }
+  // Maintain stats and report events to JVMTI
+  if (ObjectMonitor::_sync_Parks != NULL) {
+    ObjectMonitor::_sync_Parks->inc();
+  }
 }
 
 
@@ -1337,41 +1337,41 @@
 // inflated monitor, e.g. the monitor can be inflated by a non-owning
 // thread due to contention.
 intptr_t ObjectMonitor::complete_exit(TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   DeferredInitialize();
+  DeferredInitialize();
 
-   if (THREAD != _owner) {
+  if (THREAD != _owner) {
     if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD;   /* Convert from basiclock addr to Thread addr */
-       _recursions = 0;
-       OwnerIsThread = 1;
+      assert(_recursions == 0, "internal state error");
+      _owner = THREAD;   /* Convert from basiclock addr to Thread addr */
+      _recursions = 0;
+      OwnerIsThread = 1;
     }
-   }
+  }
 
-   guarantee(Self == _owner, "complete_exit not owner");
-   intptr_t save = _recursions; // record the old recursion count
-   _recursions = 0;        // set the recursion level to be 0
-   exit(true, Self);           // exit the monitor
-   guarantee(_owner != Self, "invariant");
-   return save;
+  guarantee(Self == _owner, "complete_exit not owner");
+  intptr_t save = _recursions; // record the old recursion count
+  _recursions = 0;        // set the recursion level to be 0
+  exit(true, Self);           // exit the monitor
+  guarantee(_owner != Self, "invariant");
+  return save;
 }
 
 // reenter() enters a lock and sets recursion count
 // complete_exit/reenter operate as a wait without waiting
 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   guarantee(_owner != Self, "reenter already owner");
-   enter(THREAD);       // enter the monitor
-   guarantee(_recursions == 0, "reenter recursion");
-   _recursions = recursions;
-   return;
+  guarantee(_owner != Self, "reenter already owner");
+  enter(THREAD);       // enter the monitor
+  guarantee(_recursions == 0, "reenter recursion");
+  _recursions = recursions;
+  return;
 }
 
 
@@ -1412,9 +1412,9 @@
 
 // helper method for posting a monitor wait event
 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
-                                                           jlong notifier_tid,
-                                                           jlong timeout,
-                                                           bool timedout) {
+                                            jlong notifier_tid,
+                                            jlong timeout,
+                                            bool timedout) {
   event->set_klass(((oop)this->object())->klass());
   event->set_timeout((TYPE_ULONG)timeout);
   event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
@@ -1429,232 +1429,232 @@
 // Note: a subset of changes to ObjectMonitor::wait()
 // will need to be replicated in complete_exit
 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   DeferredInitialize();
+  DeferredInitialize();
 
-   // Throw IMSX or IEX.
-   CHECK_OWNER();
+  // Throw IMSX or IEX.
+  CHECK_OWNER();
 
-   EventJavaMonitorWait event;
+  EventJavaMonitorWait event;
 
-   // check for a pending interrupt
-   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-     // post monitor waited event.  Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-        // Note: 'false' parameter is passed here because the
-        // wait was not timed out due to thread interrupt.
-        JvmtiExport::post_monitor_waited(jt, this, false);
+  // check for a pending interrupt
+  if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+    // post monitor waited event.  Note that this is past-tense, we are done waiting.
+    if (JvmtiExport::should_post_monitor_waited()) {
+      // Note: 'false' parameter is passed here because the
+      // wait was not timed out due to thread interrupt.
+      JvmtiExport::post_monitor_waited(jt, this, false);
 
-        // In this short circuit of the monitor wait protocol, the
-        // current thread never drops ownership of the monitor and
-        // never gets added to the wait queue so the current thread
-        // cannot be made the successor. This means that the
-        // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
-        // consume an unpark() meant for the ParkEvent associated with
-        // this ObjectMonitor.
-     }
-     if (event.should_commit()) {
-       post_monitor_wait_event(&event, 0, millis, false);
-     }
-     TEVENT(Wait - Throw IEX);
-     THROW(vmSymbols::java_lang_InterruptedException());
-     return;
-   }
+      // In this short circuit of the monitor wait protocol, the
+      // current thread never drops ownership of the monitor and
+      // never gets added to the wait queue so the current thread
+      // cannot be made the successor. This means that the
+      // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
+      // consume an unpark() meant for the ParkEvent associated with
+      // this ObjectMonitor.
+    }
+    if (event.should_commit()) {
+      post_monitor_wait_event(&event, 0, millis, false);
+    }
+    TEVENT(Wait - Throw IEX);
+    THROW(vmSymbols::java_lang_InterruptedException());
+    return;
+  }
 
-   TEVENT(Wait);
+  TEVENT(Wait);
 
-   assert(Self->_Stalled == 0, "invariant");
-   Self->_Stalled = intptr_t(this);
-   jt->set_current_waiting_monitor(this);
+  assert(Self->_Stalled == 0, "invariant");
+  Self->_Stalled = intptr_t(this);
+  jt->set_current_waiting_monitor(this);
 
-   // create a node to be put into the queue
-   // Critically, after we reset() the event but prior to park(), we must check
-   // for a pending interrupt.
-   ObjectWaiter node(Self);
-   node.TState = ObjectWaiter::TS_WAIT;
-   Self->_ParkEvent->reset();
-   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
+  // create a node to be put into the queue
+  // Critically, after we reset() the event but prior to park(), we must check
+  // for a pending interrupt.
+  ObjectWaiter node(Self);
+  node.TState = ObjectWaiter::TS_WAIT;
+  Self->_ParkEvent->reset();
+  OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
 
-   // Enter the waiting queue, which is a circular doubly linked list in this case
-   // but it could be a priority queue or any data structure.
-   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
-   // by the the owner of the monitor *except* in the case where park()
-   // returns because of a timeout of interrupt.  Contention is exceptionally rare
-   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+  // Enter the waiting queue, which is a circular doubly linked list in this case
+  // but it could be a priority queue or any data structure.
+  // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
+  // by the the owner of the monitor *except* in the case where park()
+  // returns because of a timeout of interrupt.  Contention is exceptionally rare
+  // so we use a simple spin-lock instead of a heavier-weight blocking lock.
 
-   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
-   AddWaiter(&node);
-   Thread::SpinRelease(&_WaitSetLock);
+  Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
+  AddWaiter(&node);
+  Thread::SpinRelease(&_WaitSetLock);
 
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL;
-   }
-   intptr_t save = _recursions; // record the old recursion count
-   _waiters++;                  // increment the number of waiters
-   _recursions = 0;             // set the recursion level to be 1
-   exit(true, Self);                    // exit the monitor
-   guarantee(_owner != Self, "invariant");
+  if ((SyncFlags & 4) == 0) {
+    _Responsible = NULL;
+  }
+  intptr_t save = _recursions; // record the old recursion count
+  _waiters++;                  // increment the number of waiters
+  _recursions = 0;             // set the recursion level to be 1
+  exit(true, Self);                    // exit the monitor
+  guarantee(_owner != Self, "invariant");
 
-   // The thread is on the WaitSet list - now park() it.
-   // On MP systems it's conceivable that a brief spin before we park
-   // could be profitable.
-   //
-   // TODO-FIXME: change the following logic to a loop of the form
-   //   while (!timeout && !interrupted && _notified == 0) park()
+  // The thread is on the WaitSet list - now park() it.
+  // On MP systems it's conceivable that a brief spin before we park
+  // could be profitable.
+  //
+  // TODO-FIXME: change the following logic to a loop of the form
+  //   while (!timeout && !interrupted && _notified == 0) park()
 
-   int ret = OS_OK;
-   int WasNotified = 0;
-   { // State transition wrappers
-     OSThread* osthread = Self->osthread();
-     OSThreadWaitState osts(osthread, true);
-     {
-       ThreadBlockInVM tbivm(jt);
-       // Thread is in thread_blocked state and oop access is unsafe.
-       jt->set_suspend_equivalent();
+  int ret = OS_OK;
+  int WasNotified = 0;
+  { // State transition wrappers
+    OSThread* osthread = Self->osthread();
+    OSThreadWaitState osts(osthread, true);
+    {
+      ThreadBlockInVM tbivm(jt);
+      // Thread is in thread_blocked state and oop access is unsafe.
+      jt->set_suspend_equivalent();
 
-       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
-           // Intentionally empty
-       } else
-       if (node._notified == 0) {
-         if (millis <= 0) {
-            Self->_ParkEvent->park();
-         } else {
-            ret = Self->_ParkEvent->park(millis);
-         }
-       }
+      if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
+        // Intentionally empty
+      } else
+      if (node._notified == 0) {
+        if (millis <= 0) {
+          Self->_ParkEvent->park();
+        } else {
+          ret = Self->_ParkEvent->park(millis);
+        }
+      }
 
-       // were we externally suspended while we were waiting?
-       if (ExitSuspendEquivalent (jt)) {
-          // TODO-FIXME: add -- if succ == Self then succ = null.
-          jt->java_suspend_self();
-       }
+      // were we externally suspended while we were waiting?
+      if (ExitSuspendEquivalent (jt)) {
+        // TODO-FIXME: add -- if succ == Self then succ = null.
+        jt->java_suspend_self();
+      }
 
-     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
+    } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
 
 
-     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
-     // from the WaitSet to the EntryList.
-     // See if we need to remove Node from the WaitSet.
-     // We use double-checked locking to avoid grabbing _WaitSetLock
-     // if the thread is not on the wait queue.
-     //
-     // Note that we don't need a fence before the fetch of TState.
-     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
-     // written by the is thread. (perhaps the fetch might even be satisfied
-     // by a look-aside into the processor's own store buffer, although given
-     // the length of the code path between the prior ST and this load that's
-     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
-     // then we'll acquire the lock and then re-fetch a fresh TState value.
-     // That is, we fail toward safety.
+    // Node may be on the WaitSet, the EntryList (or cxq), or in transition
+    // from the WaitSet to the EntryList.
+    // See if we need to remove Node from the WaitSet.
+    // We use double-checked locking to avoid grabbing _WaitSetLock
+    // if the thread is not on the wait queue.
+    //
+    // Note that we don't need a fence before the fetch of TState.
+    // In the worst case we'll fetch a old-stale value of TS_WAIT previously
+    // written by the is thread. (perhaps the fetch might even be satisfied
+    // by a look-aside into the processor's own store buffer, although given
+    // the length of the code path between the prior ST and this load that's
+    // highly unlikely).  If the following LD fetches a stale TS_WAIT value
+    // then we'll acquire the lock and then re-fetch a fresh TState value.
+    // That is, we fail toward safety.
 
-     if (node.TState == ObjectWaiter::TS_WAIT) {
-         Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
-         if (node.TState == ObjectWaiter::TS_WAIT) {
-            DequeueSpecificWaiter(&node);       // unlink from WaitSet
-            assert(node._notified == 0, "invariant");
-            node.TState = ObjectWaiter::TS_RUN;
-         }
-         Thread::SpinRelease(&_WaitSetLock);
-     }
+    if (node.TState == ObjectWaiter::TS_WAIT) {
+      Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
+      if (node.TState == ObjectWaiter::TS_WAIT) {
+        DequeueSpecificWaiter(&node);       // unlink from WaitSet
+        assert(node._notified == 0, "invariant");
+        node.TState = ObjectWaiter::TS_RUN;
+      }
+      Thread::SpinRelease(&_WaitSetLock);
+    }
 
-     // The thread is now either on off-list (TS_RUN),
-     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
-     // The Node's TState variable is stable from the perspective of this thread.
-     // No other threads will asynchronously modify TState.
-     guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
-     OrderAccess::loadload();
-     if (_succ == Self) _succ = NULL;
-     WasNotified = node._notified;
+    // The thread is now either on off-list (TS_RUN),
+    // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
+    // The Node's TState variable is stable from the perspective of this thread.
+    // No other threads will asynchronously modify TState.
+    guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
+    OrderAccess::loadload();
+    if (_succ == Self) _succ = NULL;
+    WasNotified = node._notified;
 
-     // Reentry phase -- reacquire the monitor.
-     // re-enter contended monitor after object.wait().
-     // retain OBJECT_WAIT state until re-enter successfully completes
-     // Thread state is thread_in_vm and oop access is again safe,
-     // although the raw address of the object may have changed.
-     // (Don't cache naked oops over safepoints, of course).
+    // Reentry phase -- reacquire the monitor.
+    // re-enter contended monitor after object.wait().
+    // retain OBJECT_WAIT state until re-enter successfully completes
+    // Thread state is thread_in_vm and oop access is again safe,
+    // although the raw address of the object may have changed.
+    // (Don't cache naked oops over safepoints, of course).
 
-     // post monitor waited event. Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+    // post monitor waited event. Note that this is past-tense, we are done waiting.
+    if (JvmtiExport::should_post_monitor_waited()) {
+      JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
 
-       if (node._notified != 0 && _succ == Self) {
-         // In this part of the monitor wait-notify-reenter protocol it
-         // is possible (and normal) for another thread to do a fastpath
-         // monitor enter-exit while this thread is still trying to get
-         // to the reenter portion of the protocol.
-         //
-         // The ObjectMonitor was notified and the current thread is
-         // the successor which also means that an unpark() has already
-         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
-         // consume the unpark() that was done when the successor was
-         // set because the same ParkEvent is shared between Java
-         // monitors and JVM/TI RawMonitors (for now).
-         //
-         // We redo the unpark() to ensure forward progress, i.e., we
-         // don't want all pending threads hanging (parked) with none
-         // entering the unlocked monitor.
-         node._event->unpark();
-       }
-     }
+      if (node._notified != 0 && _succ == Self) {
+        // In this part of the monitor wait-notify-reenter protocol it
+        // is possible (and normal) for another thread to do a fastpath
+        // monitor enter-exit while this thread is still trying to get
+        // to the reenter portion of the protocol.
+        //
+        // The ObjectMonitor was notified and the current thread is
+        // the successor which also means that an unpark() has already
+        // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
+        // consume the unpark() that was done when the successor was
+        // set because the same ParkEvent is shared between Java
+        // monitors and JVM/TI RawMonitors (for now).
+        //
+        // We redo the unpark() to ensure forward progress, i.e., we
+        // don't want all pending threads hanging (parked) with none
+        // entering the unlocked monitor.
+        node._event->unpark();
+      }
+    }
 
-     if (event.should_commit()) {
-       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
-     }
+    if (event.should_commit()) {
+      post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
+    }
 
-     OrderAccess::fence();
+    OrderAccess::fence();
 
-     assert(Self->_Stalled != 0, "invariant");
-     Self->_Stalled = 0;
+    assert(Self->_Stalled != 0, "invariant");
+    Self->_Stalled = 0;
 
-     assert(_owner != Self, "invariant");
-     ObjectWaiter::TStates v = node.TState;
-     if (v == ObjectWaiter::TS_RUN) {
-         enter(Self);
-     } else {
-         guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
-         ReenterI(Self, &node);
-         node.wait_reenter_end(this);
-     }
+    assert(_owner != Self, "invariant");
+    ObjectWaiter::TStates v = node.TState;
+    if (v == ObjectWaiter::TS_RUN) {
+      enter(Self);
+    } else {
+      guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
+      ReenterI(Self, &node);
+      node.wait_reenter_end(this);
+    }
 
-     // Self has reacquired the lock.
-     // Lifecycle - the node representing Self must not appear on any queues.
-     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
-     // want residual elements associated with this thread left on any lists.
-     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
-     assert(_owner == Self, "invariant");
-     assert(_succ != Self , "invariant");
-   } // OSThreadWaitState()
+    // Self has reacquired the lock.
+    // Lifecycle - the node representing Self must not appear on any queues.
+    // Node is about to go out-of-scope, but even if it were immortal we wouldn't
+    // want residual elements associated with this thread left on any lists.
+    guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
+    assert(_owner == Self, "invariant");
+    assert(_succ != Self , "invariant");
+  } // OSThreadWaitState()
 
-   jt->set_current_waiting_monitor(NULL);
+  jt->set_current_waiting_monitor(NULL);
 
-   guarantee(_recursions == 0, "invariant");
-   _recursions = save;     // restore the old recursion count
-   _waiters--;             // decrement the number of waiters
+  guarantee(_recursions == 0, "invariant");
+  _recursions = save;     // restore the old recursion count
+  _waiters--;             // decrement the number of waiters
 
-   // Verify a few postconditions
-   assert(_owner == Self       , "invariant");
-   assert(_succ  != Self       , "invariant");
-   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  // Verify a few postconditions
+  assert(_owner == Self       , "invariant");
+  assert(_succ  != Self       , "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 
-   if (SyncFlags & 32) {
-      OrderAccess::fence();
-   }
+  if (SyncFlags & 32) {
+    OrderAccess::fence();
+  }
 
-   // check if the notification happened
-   if (!WasNotified) {
-     // no, it could be timeout or Thread.interrupt() or both
-     // check for interrupt event, otherwise it is timeout
-     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-       TEVENT(Wait - throw IEX from epilog);
-       THROW(vmSymbols::java_lang_InterruptedException());
-     }
-   }
+  // check if the notification happened
+  if (!WasNotified) {
+    // no, it could be timeout or Thread.interrupt() or both
+    // check for interrupt event, otherwise it is timeout
+    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+      TEVENT(Wait - throw IEX from epilog);
+      THROW(vmSymbols::java_lang_InterruptedException());
+    }
+  }
 
-   // NOTE: Spurious wake up will be consider as timeout.
-   // Monitor notify has precedence over thread interrupt.
+  // NOTE: Spurious wake up will be consider as timeout.
+  // Monitor notify has precedence over thread interrupt.
 }
 
 
@@ -1666,8 +1666,8 @@
 void ObjectMonitor::notify(TRAPS) {
   CHECK_OWNER();
   if (_WaitSet == NULL) {
-     TEVENT(Empty-Notify);
-     return;
+    TEVENT(Empty-Notify);
+    return;
   }
   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
 
@@ -1676,108 +1676,108 @@
   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
   ObjectWaiter * iterator = DequeueWaiter();
   if (iterator != NULL) {
-     TEVENT(Notify1 - Transfer);
-     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
-     guarantee(iterator->_notified == 0, "invariant");
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER;
-     }
-     iterator->_notified = 1;
-     Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
+    TEVENT(Notify1 - Transfer);
+    guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
+    guarantee(iterator->_notified == 0, "invariant");
+    if (Policy != 4) {
+      iterator->TState = ObjectWaiter::TS_ENTER;
+    }
+    iterator->_notified = 1;
+    Thread * Self = THREAD;
+    iterator->_notifier_tid = Self->osthread()->thread_id();
 
-     ObjectWaiter * List = _EntryList;
-     if (List != NULL) {
-        assert(List->_prev == NULL, "invariant");
-        assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(List != iterator, "invariant");
-     }
+    ObjectWaiter * List = _EntryList;
+    if (List != NULL) {
+      assert(List->_prev == NULL, "invariant");
+      assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
+      assert(List != iterator, "invariant");
+    }
 
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-             List->_prev = iterator;
-             iterator->_next = List;
-             iterator->_prev = NULL;
-             _EntryList = iterator;
-        }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail;
-            for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
-            assert(Tail != NULL && Tail->_next == NULL, "invariant");
-            Tail->_next = iterator;
-            iterator->_prev = Tail;
-            iterator->_next = NULL;
-        }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            iterator->TState = ObjectWaiter::TS_CXQ;
-            for (;;) {
-                ObjectWaiter * Front = _cxq;
-                iterator->_next = Front;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                    break;
-                }
-            }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
+    if (Policy == 0) {       // prepend to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        List->_prev = iterator;
+        iterator->_next = List;
+        iterator->_prev = NULL;
+        _EntryList = iterator;
+      }
+    } else
+    if (Policy == 1) {      // append to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        // CONSIDER:  finding the tail currently requires a linear-time walk of
+        // the EntryList.  We can make tail access constant-time by converting to
+        // a CDLL instead of using our current DLL.
+        ObjectWaiter * Tail;
+        for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
+        assert(Tail != NULL && Tail->_next == NULL, "invariant");
+        Tail->_next = iterator;
+        iterator->_prev = Tail;
+        iterator->_next = NULL;
+      }
+    } else
+    if (Policy == 2) {      // prepend to cxq
+      // prepend to cxq
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
         iterator->TState = ObjectWaiter::TS_CXQ;
         for (;;) {
-            ObjectWaiter * Tail;
-            Tail = _cxq;
-            if (Tail == NULL) {
-                iterator->_next = NULL;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next;
-                Tail->_next = iterator;
-                iterator->_prev = Tail;
-                iterator->_next = NULL;
-                break;
-            }
+          ObjectWaiter * Front = _cxq;
+          iterator->_next = Front;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+            break;
+          }
         }
-     } else {
-        ParkEvent * ev = iterator->_event;
-        iterator->TState = ObjectWaiter::TS_RUN;
-        OrderAccess::fence();
-        ev->unpark();
-     }
+      }
+    } else
+    if (Policy == 3) {      // append to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Tail;
+        Tail = _cxq;
+        if (Tail == NULL) {
+          iterator->_next = NULL;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+            break;
+          }
+        } else {
+          while (Tail->_next != NULL) Tail = Tail->_next;
+          Tail->_next = iterator;
+          iterator->_prev = Tail;
+          iterator->_next = NULL;
+          break;
+        }
+      }
+    } else {
+      ParkEvent * ev = iterator->_event;
+      iterator->TState = ObjectWaiter::TS_RUN;
+      OrderAccess::fence();
+      ev->unpark();
+    }
 
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
+    if (Policy < 4) {
+      iterator->wait_reenter_begin(this);
+    }
 
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
+    // _WaitSetLock protects the wait queue, not the EntryList.  We could
+    // move the add-to-EntryList operation, above, outside the critical section
+    // protected by _WaitSetLock.  In practice that's not useful.  With the
+    // exception of  wait() timeouts and interrupts the monitor owner
+    // is the only thread that grabs _WaitSetLock.  There's almost no contention
+    // on _WaitSetLock so it's not profitable to reduce the length of the
+    // critical section.
   }
 
   Thread::SpinRelease(&_WaitSetLock);
 
   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
-     ObjectMonitor::_sync_Notifications->inc();
+    ObjectMonitor::_sync_Notifications->inc();
   }
 }
 
@@ -1786,8 +1786,8 @@
   CHECK_OWNER();
   ObjectWaiter* iterator;
   if (_WaitSet == NULL) {
-      TEVENT(Empty-NotifyAll);
-      return;
+    TEVENT(Empty-NotifyAll);
+    return;
   }
   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
 
@@ -1796,112 +1796,112 @@
   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notifyall");
 
   for (;;) {
-     iterator = DequeueWaiter();
-     if (iterator == NULL) break;
-     TEVENT(NotifyAll - Transfer1);
-     ++Tally;
+    iterator = DequeueWaiter();
+    if (iterator == NULL) break;
+    TEVENT(NotifyAll - Transfer1);
+    ++Tally;
+
+    // Disposition - what might we do with iterator ?
+    // a.  add it directly to the EntryList - either tail or head.
+    // b.  push it onto the front of the _cxq.
+    // For now we use (a).
 
-     // Disposition - what might we do with iterator ?
-     // a.  add it directly to the EntryList - either tail or head.
-     // b.  push it onto the front of the _cxq.
-     // For now we use (a).
+    guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
+    guarantee(iterator->_notified == 0, "invariant");
+    iterator->_notified = 1;
+    Thread * Self = THREAD;
+    iterator->_notifier_tid = Self->osthread()->thread_id();
+    if (Policy != 4) {
+      iterator->TState = ObjectWaiter::TS_ENTER;
+    }
+
+    ObjectWaiter * List = _EntryList;
+    if (List != NULL) {
+      assert(List->_prev == NULL, "invariant");
+      assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
+      assert(List != iterator, "invariant");
+    }
 
-     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
-     guarantee(iterator->_notified == 0, "invariant");
-     iterator->_notified = 1;
-     Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER;
-     }
-
-     ObjectWaiter * List = _EntryList;
-     if (List != NULL) {
-        assert(List->_prev == NULL, "invariant");
-        assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(List != iterator, "invariant");
-     }
-
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-             List->_prev = iterator;
-             iterator->_next = List;
-             iterator->_prev = NULL;
-             _EntryList = iterator;
+    if (Policy == 0) {       // prepend to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        List->_prev = iterator;
+        iterator->_next = List;
+        iterator->_prev = NULL;
+        _EntryList = iterator;
+      }
+    } else
+    if (Policy == 1) {      // append to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        // CONSIDER:  finding the tail currently requires a linear-time walk of
+        // the EntryList.  We can make tail access constant-time by converting to
+        // a CDLL instead of using our current DLL.
+        ObjectWaiter * Tail;
+        for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
+        assert(Tail != NULL && Tail->_next == NULL, "invariant");
+        Tail->_next = iterator;
+        iterator->_prev = Tail;
+        iterator->_next = NULL;
+      }
+    } else
+    if (Policy == 2) {      // prepend to cxq
+      // prepend to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Front = _cxq;
+        iterator->_next = Front;
+        if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+          break;
         }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail;
-            for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
-            assert(Tail != NULL && Tail->_next == NULL, "invariant");
-            Tail->_next = iterator;
-            iterator->_prev = Tail;
-            iterator->_next = NULL;
+      }
+    } else
+    if (Policy == 3) {      // append to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Tail;
+        Tail = _cxq;
+        if (Tail == NULL) {
+          iterator->_next = NULL;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+            break;
+          }
+        } else {
+          while (Tail->_next != NULL) Tail = Tail->_next;
+          Tail->_next = iterator;
+          iterator->_prev = Tail;
+          iterator->_next = NULL;
+          break;
         }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         iterator->TState = ObjectWaiter::TS_CXQ;
-         for (;;) {
-             ObjectWaiter * Front = _cxq;
-             iterator->_next = Front;
-             if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                 break;
-             }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
-        iterator->TState = ObjectWaiter::TS_CXQ;
-        for (;;) {
-            ObjectWaiter * Tail;
-            Tail = _cxq;
-            if (Tail == NULL) {
-                iterator->_next = NULL;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next;
-                Tail->_next = iterator;
-                iterator->_prev = Tail;
-                iterator->_next = NULL;
-                break;
-            }
-        }
-     } else {
-        ParkEvent * ev = iterator->_event;
-        iterator->TState = ObjectWaiter::TS_RUN;
-        OrderAccess::fence();
-        ev->unpark();
-     }
+      }
+    } else {
+      ParkEvent * ev = iterator->_event;
+      iterator->TState = ObjectWaiter::TS_RUN;
+      OrderAccess::fence();
+      ev->unpark();
+    }
 
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
+    if (Policy < 4) {
+      iterator->wait_reenter_begin(this);
+    }
 
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
+    // _WaitSetLock protects the wait queue, not the EntryList.  We could
+    // move the add-to-EntryList operation, above, outside the critical section
+    // protected by _WaitSetLock.  In practice that's not useful.  With the
+    // exception of  wait() timeouts and interrupts the monitor owner
+    // is the only thread that grabs _WaitSetLock.  There's almost no contention
+    // on _WaitSetLock so it's not profitable to reduce the length of the
+    // critical section.
   }
 
   Thread::SpinRelease(&_WaitSetLock);
 
   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
-     ObjectMonitor::_sync_Notifications->inc(Tally);
+    ObjectMonitor::_sync_Notifications->inc(Tally);
   }
 }
 
@@ -1979,227 +1979,227 @@
 
 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
 
-    // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
-    int ctr = Knob_FixedSpin;
-    if (ctr != 0) {
-        while (--ctr >= 0) {
-            if (TryLock(Self) > 0) return 1;
-            SpinPause();
-        }
-        return 0;
+  // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
+  int ctr = Knob_FixedSpin;
+  if (ctr != 0) {
+    while (--ctr >= 0) {
+      if (TryLock(Self) > 0) return 1;
+      SpinPause();
+    }
+    return 0;
+  }
+
+  for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
+    if (TryLock(Self) > 0) {
+      // Increase _SpinDuration ...
+      // Note that we don't clamp SpinDuration precisely at SpinLimit.
+      // Raising _SpurDuration to the poverty line is key.
+      int x = _SpinDuration;
+      if (x < Knob_SpinLimit) {
+        if (x < Knob_Poverty) x = Knob_Poverty;
+        _SpinDuration = x + Knob_BonusB;
+      }
+      return 1;
+    }
+    SpinPause();
+  }
+
+  // Admission control - verify preconditions for spinning
+  //
+  // We always spin a little bit, just to prevent _SpinDuration == 0 from
+  // becoming an absorbing state.  Put another way, we spin briefly to
+  // sample, just in case the system load, parallelism, contention, or lock
+  // modality changed.
+  //
+  // Consider the following alternative:
+  // Periodically set _SpinDuration = _SpinLimit and try a long/full
+  // spin attempt.  "Periodically" might mean after a tally of
+  // the # of failed spin attempts (or iterations) reaches some threshold.
+  // This takes us into the realm of 1-out-of-N spinning, where we
+  // hold the duration constant but vary the frequency.
+
+  ctr = _SpinDuration;
+  if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
+  if (ctr <= 0) return 0;
+
+  if (Knob_SuccRestrict && _succ != NULL) return 0;
+  if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
+    TEVENT(Spin abort - notrunnable [TOP]);
+    return 0;
+  }
+
+  int MaxSpin = Knob_MaxSpinners;
+  if (MaxSpin >= 0) {
+    if (_Spinner > MaxSpin) {
+      TEVENT(Spin abort -- too many spinners);
+      return 0;
+    }
+    // Slightly racy, but benign ...
+    Adjust(&_Spinner, 1);
+  }
+
+  // We're good to spin ... spin ingress.
+  // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
+  // when preparing to LD...CAS _owner, etc and the CAS is likely
+  // to succeed.
+  int hits    = 0;
+  int msk     = 0;
+  int caspty  = Knob_CASPenalty;
+  int oxpty   = Knob_OXPenalty;
+  int sss     = Knob_SpinSetSucc;
+  if (sss && _succ == NULL) _succ = Self;
+  Thread * prv = NULL;
+
+  // There are three ways to exit the following loop:
+  // 1.  A successful spin where this thread has acquired the lock.
+  // 2.  Spin failure with prejudice
+  // 3.  Spin failure without prejudice
+
+  while (--ctr >= 0) {
+
+    // Periodic polling -- Check for pending GC
+    // Threads may spin while they're unsafe.
+    // We don't want spinning threads to delay the JVM from reaching
+    // a stop-the-world safepoint or to steal cycles from GC.
+    // If we detect a pending safepoint we abort in order that
+    // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
+    // this thread, if safe, doesn't steal cycles from GC.
+    // This is in keeping with the "no loitering in runtime" rule.
+    // We periodically check to see if there's a safepoint pending.
+    if ((ctr & 0xFF) == 0) {
+      if (SafepointSynchronize::do_call_back()) {
+        TEVENT(Spin: safepoint);
+        goto Abort;           // abrupt spin egress
+      }
+      if (Knob_UsePause & 1) SpinPause();
+
+      int (*scb)(intptr_t,int) = SpinCallbackFunction;
+      if (hits > 50 && scb != NULL) {
+        int abend = (*scb)(SpinCallbackArgument, 0);
+      }
     }
 
-    for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
-      if (TryLock(Self) > 0) {
-        // Increase _SpinDuration ...
+    if (Knob_UsePause & 2) SpinPause();
+
+    // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
+    // This is useful on classic SMP systems, but is of less utility on
+    // N1-style CMT platforms.
+    //
+    // Trade-off: lock acquisition latency vs coherency bandwidth.
+    // Lock hold times are typically short.  A histogram
+    // of successful spin attempts shows that we usually acquire
+    // the lock early in the spin.  That suggests we want to
+    // sample _owner frequently in the early phase of the spin,
+    // but then back-off and sample less frequently as the spin
+    // progresses.  The back-off makes a good citizen on SMP big
+    // SMP systems.  Oversampling _owner can consume excessive
+    // coherency bandwidth.  Relatedly, if we _oversample _owner we
+    // can inadvertently interfere with the the ST m->owner=null.
+    // executed by the lock owner.
+    if (ctr & msk) continue;
+    ++hits;
+    if ((hits & 0xF) == 0) {
+      // The 0xF, above, corresponds to the exponent.
+      // Consider: (msk+1)|msk
+      msk = ((msk << 2)|3) & BackOffMask;
+    }
+
+    // Probe _owner with TATAS
+    // If this thread observes the monitor transition or flicker
+    // from locked to unlocked to locked, then the odds that this
+    // thread will acquire the lock in this spin attempt go down
+    // considerably.  The same argument applies if the CAS fails
+    // or if we observe _owner change from one non-null value to
+    // another non-null value.   In such cases we might abort
+    // the spin without prejudice or apply a "penalty" to the
+    // spin count-down variable "ctr", reducing it by 100, say.
+
+    Thread * ox = (Thread *) _owner;
+    if (ox == NULL) {
+      ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
+      if (ox == NULL) {
+        // The CAS succeeded -- this thread acquired ownership
+        // Take care of some bookkeeping to exit spin state.
+        if (sss && _succ == Self) {
+          _succ = NULL;
+        }
+        if (MaxSpin > 0) Adjust(&_Spinner, -1);
+
+        // Increase _SpinDuration :
+        // The spin was successful (profitable) so we tend toward
+        // longer spin attempts in the future.
+        // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
+        // If we acquired the lock early in the spin cycle it
+        // makes sense to increase _SpinDuration proportionally.
         // Note that we don't clamp SpinDuration precisely at SpinLimit.
-        // Raising _SpurDuration to the poverty line is key.
         int x = _SpinDuration;
         if (x < Knob_SpinLimit) {
-           if (x < Knob_Poverty) x = Knob_Poverty;
-           _SpinDuration = x + Knob_BonusB;
+          if (x < Knob_Poverty) x = Knob_Poverty;
+          _SpinDuration = x + Knob_Bonus;
         }
         return 1;
       }
-      SpinPause();
-    }
 
-    // Admission control - verify preconditions for spinning
-    //
-    // We always spin a little bit, just to prevent _SpinDuration == 0 from
-    // becoming an absorbing state.  Put another way, we spin briefly to
-    // sample, just in case the system load, parallelism, contention, or lock
-    // modality changed.
-    //
-    // Consider the following alternative:
-    // Periodically set _SpinDuration = _SpinLimit and try a long/full
-    // spin attempt.  "Periodically" might mean after a tally of
-    // the # of failed spin attempts (or iterations) reaches some threshold.
-    // This takes us into the realm of 1-out-of-N spinning, where we
-    // hold the duration constant but vary the frequency.
-
-    ctr = _SpinDuration;
-    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
-    if (ctr <= 0) return 0;
-
-    if (Knob_SuccRestrict && _succ != NULL) return 0;
-    if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
-       TEVENT(Spin abort - notrunnable [TOP]);
-       return 0;
-    }
-
-    int MaxSpin = Knob_MaxSpinners;
-    if (MaxSpin >= 0) {
-       if (_Spinner > MaxSpin) {
-          TEVENT(Spin abort -- too many spinners);
-          return 0;
-       }
-       // Slightly racy, but benign ...
-       Adjust(&_Spinner, 1);
+      // The CAS failed ... we can take any of the following actions:
+      // * penalize: ctr -= Knob_CASPenalty
+      // * exit spin with prejudice -- goto Abort;
+      // * exit spin without prejudice.
+      // * Since CAS is high-latency, retry again immediately.
+      prv = ox;
+      TEVENT(Spin: cas failed);
+      if (caspty == -2) break;
+      if (caspty == -1) goto Abort;
+      ctr -= caspty;
+      continue;
     }
 
-    // We're good to spin ... spin ingress.
-    // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
-    // when preparing to LD...CAS _owner, etc and the CAS is likely
-    // to succeed.
-    int hits    = 0;
-    int msk     = 0;
-    int caspty  = Knob_CASPenalty;
-    int oxpty   = Knob_OXPenalty;
-    int sss     = Knob_SpinSetSucc;
-    if (sss && _succ == NULL) _succ = Self;
-    Thread * prv = NULL;
-
-    // There are three ways to exit the following loop:
-    // 1.  A successful spin where this thread has acquired the lock.
-    // 2.  Spin failure with prejudice
-    // 3.  Spin failure without prejudice
-
-    while (--ctr >= 0) {
-
-      // Periodic polling -- Check for pending GC
-      // Threads may spin while they're unsafe.
-      // We don't want spinning threads to delay the JVM from reaching
-      // a stop-the-world safepoint or to steal cycles from GC.
-      // If we detect a pending safepoint we abort in order that
-      // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
-      // this thread, if safe, doesn't steal cycles from GC.
-      // This is in keeping with the "no loitering in runtime" rule.
-      // We periodically check to see if there's a safepoint pending.
-      if ((ctr & 0xFF) == 0) {
-         if (SafepointSynchronize::do_call_back()) {
-            TEVENT(Spin: safepoint);
-            goto Abort;           // abrupt spin egress
-         }
-         if (Knob_UsePause & 1) SpinPause();
-
-         int (*scb)(intptr_t,int) = SpinCallbackFunction;
-         if (hits > 50 && scb != NULL) {
-            int abend = (*scb)(SpinCallbackArgument, 0);
-         }
-      }
-
-      if (Knob_UsePause & 2) SpinPause();
-
-      // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
-      // This is useful on classic SMP systems, but is of less utility on
-      // N1-style CMT platforms.
-      //
-      // Trade-off: lock acquisition latency vs coherency bandwidth.
-      // Lock hold times are typically short.  A histogram
-      // of successful spin attempts shows that we usually acquire
-      // the lock early in the spin.  That suggests we want to
-      // sample _owner frequently in the early phase of the spin,
-      // but then back-off and sample less frequently as the spin
-      // progresses.  The back-off makes a good citizen on SMP big
-      // SMP systems.  Oversampling _owner can consume excessive
-      // coherency bandwidth.  Relatedly, if we _oversample _owner we
-      // can inadvertently interfere with the the ST m->owner=null.
-      // executed by the lock owner.
-      if (ctr & msk) continue;
-      ++hits;
-      if ((hits & 0xF) == 0) {
-        // The 0xF, above, corresponds to the exponent.
-        // Consider: (msk+1)|msk
-        msk = ((msk << 2)|3) & BackOffMask;
-      }
+    // Did lock ownership change hands ?
+    if (ox != prv && prv != NULL) {
+      TEVENT(spin: Owner changed)
+      if (oxpty == -2) break;
+      if (oxpty == -1) goto Abort;
+      ctr -= oxpty;
+    }
+    prv = ox;
 
-      // Probe _owner with TATAS
-      // If this thread observes the monitor transition or flicker
-      // from locked to unlocked to locked, then the odds that this
-      // thread will acquire the lock in this spin attempt go down
-      // considerably.  The same argument applies if the CAS fails
-      // or if we observe _owner change from one non-null value to
-      // another non-null value.   In such cases we might abort
-      // the spin without prejudice or apply a "penalty" to the
-      // spin count-down variable "ctr", reducing it by 100, say.
-
-      Thread * ox = (Thread *) _owner;
-      if (ox == NULL) {
-         ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
-         if (ox == NULL) {
-            // The CAS succeeded -- this thread acquired ownership
-            // Take care of some bookkeeping to exit spin state.
-            if (sss && _succ == Self) {
-               _succ = NULL;
-            }
-            if (MaxSpin > 0) Adjust(&_Spinner, -1);
-
-            // Increase _SpinDuration :
-            // The spin was successful (profitable) so we tend toward
-            // longer spin attempts in the future.
-            // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
-            // If we acquired the lock early in the spin cycle it
-            // makes sense to increase _SpinDuration proportionally.
-            // Note that we don't clamp SpinDuration precisely at SpinLimit.
-            int x = _SpinDuration;
-            if (x < Knob_SpinLimit) {
-                if (x < Knob_Poverty) x = Knob_Poverty;
-                _SpinDuration = x + Knob_Bonus;
-            }
-            return 1;
-         }
+    // Abort the spin if the owner is not executing.
+    // The owner must be executing in order to drop the lock.
+    // Spinning while the owner is OFFPROC is idiocy.
+    // Consider: ctr -= RunnablePenalty ;
+    if (Knob_OState && NotRunnable (Self, ox)) {
+      TEVENT(Spin abort - notrunnable);
+      goto Abort;
+    }
+    if (sss && _succ == NULL) _succ = Self;
+  }
 
-         // The CAS failed ... we can take any of the following actions:
-         // * penalize: ctr -= Knob_CASPenalty
-         // * exit spin with prejudice -- goto Abort;
-         // * exit spin without prejudice.
-         // * Since CAS is high-latency, retry again immediately.
-         prv = ox;
-         TEVENT(Spin: cas failed);
-         if (caspty == -2) break;
-         if (caspty == -1) goto Abort;
-         ctr -= caspty;
-         continue;
-      }
-
-      // Did lock ownership change hands ?
-      if (ox != prv && prv != NULL) {
-          TEVENT(spin: Owner changed)
-          if (oxpty == -2) break;
-          if (oxpty == -1) goto Abort;
-          ctr -= oxpty;
-      }
-      prv = ox;
-
-      // Abort the spin if the owner is not executing.
-      // The owner must be executing in order to drop the lock.
-      // Spinning while the owner is OFFPROC is idiocy.
-      // Consider: ctr -= RunnablePenalty ;
-      if (Knob_OState && NotRunnable (Self, ox)) {
-         TEVENT(Spin abort - notrunnable);
-         goto Abort;
-      }
-      if (sss && _succ == NULL) _succ = Self;
-   }
-
-   // Spin failed with prejudice -- reduce _SpinDuration.
-   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
-   // AIMD is globally stable.
-   TEVENT(Spin failure);
-   {
-     int x = _SpinDuration;
-     if (x > 0) {
-        // Consider an AIMD scheme like: x -= (x >> 3) + 100
-        // This is globally sample and tends to damp the response.
-        x -= Knob_Penalty;
-        if (x < 0) x = 0;
-        _SpinDuration = x;
-     }
-   }
+  // Spin failed with prejudice -- reduce _SpinDuration.
+  // TODO: Use an AIMD-like policy to adjust _SpinDuration.
+  // AIMD is globally stable.
+  TEVENT(Spin failure);
+  {
+    int x = _SpinDuration;
+    if (x > 0) {
+      // Consider an AIMD scheme like: x -= (x >> 3) + 100
+      // This is globally sample and tends to damp the response.
+      x -= Knob_Penalty;
+      if (x < 0) x = 0;
+      _SpinDuration = x;
+    }
+  }
 
  Abort:
-   if (MaxSpin >= 0) Adjust(&_Spinner, -1);
-   if (sss && _succ == Self) {
-      _succ = NULL;
-      // Invariant: after setting succ=null a contending thread
-      // must recheck-retry _owner before parking.  This usually happens
-      // in the normal usage of TrySpin(), but it's safest
-      // to make TrySpin() as foolproof as possible.
-      OrderAccess::fence();
-      if (TryLock(Self) > 0) return 1;
-   }
-   return 0;
+  if (MaxSpin >= 0) Adjust(&_Spinner, -1);
+  if (sss && _succ == Self) {
+    _succ = NULL;
+    // Invariant: after setting succ=null a contending thread
+    // must recheck-retry _owner before parking.  This usually happens
+    // in the normal usage of TrySpin(), but it's safest
+    // to make TrySpin() as foolproof as possible.
+    OrderAccess::fence();
+    if (TryLock(Self) > 0) return 1;
+  }
+  return 0;
 }
 
 // NotRunnable() -- informed spinning
@@ -2242,29 +2242,29 @@
 
 
 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
-    // Check either OwnerIsThread or ox->TypeTag == 2BAD.
-    if (!OwnerIsThread) return 0;
+  // Check either OwnerIsThread or ox->TypeTag == 2BAD.
+  if (!OwnerIsThread) return 0;
 
-    if (ox == NULL) return 0;
+  if (ox == NULL) return 0;
 
-    // Avoid transitive spinning ...
-    // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
-    // Immediately after T1 acquires L it's possible that T2, also
-    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
-    // This occurs transiently after T1 acquired L but before
-    // T1 managed to clear T1.Stalled.  T2 does not need to abort
-    // its spin in this circumstance.
-    intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
+  // Avoid transitive spinning ...
+  // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
+  // Immediately after T1 acquires L it's possible that T2, also
+  // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
+  // This occurs transiently after T1 acquired L but before
+  // T1 managed to clear T1.Stalled.  T2 does not need to abort
+  // its spin in this circumstance.
+  intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
 
-    if (BlockedOn == 1) return 1;
-    if (BlockedOn != 0) {
-      return BlockedOn != intptr_t(this) && _owner == ox;
-    }
+  if (BlockedOn == 1) return 1;
+  if (BlockedOn != 0) {
+    return BlockedOn != intptr_t(this) && _owner == ox;
+  }
 
-    assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
-    int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
-    // consider also: jst != _thread_in_Java -- but that's overspecific.
-    return jst == _thread_blocked || jst == _thread_in_native;
+  assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
+  int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
+  // consider also: jst != _thread_in_Java -- but that's overspecific.
+  return jst == _thread_blocked || jst == _thread_in_native;
 }
 
 
@@ -2377,27 +2377,27 @@
   assert(InitializationCompleted == 0, "invariant");
   InitializationCompleted = 1;
   if (UsePerfData) {
-      EXCEPTION_MARK;
+    EXCEPTION_MARK;
       #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
       #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
-      NEWPERFCOUNTER(_sync_Inflations);
-      NEWPERFCOUNTER(_sync_Deflations);
-      NEWPERFCOUNTER(_sync_ContendedLockAttempts);
-      NEWPERFCOUNTER(_sync_FutileWakeups);
-      NEWPERFCOUNTER(_sync_Parks);
-      NEWPERFCOUNTER(_sync_EmptyNotifications);
-      NEWPERFCOUNTER(_sync_Notifications);
-      NEWPERFCOUNTER(_sync_SlowEnter);
-      NEWPERFCOUNTER(_sync_SlowExit);
-      NEWPERFCOUNTER(_sync_SlowNotify);
-      NEWPERFCOUNTER(_sync_SlowNotifyAll);
-      NEWPERFCOUNTER(_sync_FailedSpins);
-      NEWPERFCOUNTER(_sync_SuccessfulSpins);
-      NEWPERFCOUNTER(_sync_PrivateA);
-      NEWPERFCOUNTER(_sync_PrivateB);
-      NEWPERFCOUNTER(_sync_MonInCirculation);
-      NEWPERFCOUNTER(_sync_MonScavenged);
-      NEWPERFVARIABLE(_sync_MonExtant);
+    NEWPERFCOUNTER(_sync_Inflations);
+    NEWPERFCOUNTER(_sync_Deflations);
+    NEWPERFCOUNTER(_sync_ContendedLockAttempts);
+    NEWPERFCOUNTER(_sync_FutileWakeups);
+    NEWPERFCOUNTER(_sync_Parks);
+    NEWPERFCOUNTER(_sync_EmptyNotifications);
+    NEWPERFCOUNTER(_sync_Notifications);
+    NEWPERFCOUNTER(_sync_SlowEnter);
+    NEWPERFCOUNTER(_sync_SlowExit);
+    NEWPERFCOUNTER(_sync_SlowNotify);
+    NEWPERFCOUNTER(_sync_SlowNotifyAll);
+    NEWPERFCOUNTER(_sync_FailedSpins);
+    NEWPERFCOUNTER(_sync_SuccessfulSpins);
+    NEWPERFCOUNTER(_sync_PrivateA);
+    NEWPERFCOUNTER(_sync_PrivateB);
+    NEWPERFCOUNTER(_sync_MonInCirculation);
+    NEWPERFCOUNTER(_sync_MonScavenged);
+    NEWPERFVARIABLE(_sync_MonExtant);
       #undef NEWPERFCOUNTER
   }
 }
@@ -2417,33 +2417,33 @@
 
 
 static char * kvGet (char * kvList, const char * Key) {
-    if (kvList == NULL) return NULL;
-    size_t n = strlen(Key);
-    char * Search;
-    for (Search = kvList; *Search; Search += strlen(Search) + 1) {
-        if (strncmp (Search, Key, n) == 0) {
-            if (Search[n] == '=') return Search + n + 1;
-            if (Search[n] == 0)   return(char *) "1";
-        }
+  if (kvList == NULL) return NULL;
+  size_t n = strlen(Key);
+  char * Search;
+  for (Search = kvList; *Search; Search += strlen(Search) + 1) {
+    if (strncmp (Search, Key, n) == 0) {
+      if (Search[n] == '=') return Search + n + 1;
+      if (Search[n] == 0)   return(char *) "1";
     }
-    return NULL;
+  }
+  return NULL;
 }
 
 static int kvGetInt (char * kvList, const char * Key, int Default) {
-    char * v = kvGet(kvList, Key);
-    int rslt = v ? ::strtol(v, NULL, 0) : Default;
-    if (Knob_ReportSettings && v != NULL) {
-        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
-        ::fflush(stdout);
-    }
-    return rslt;
+  char * v = kvGet(kvList, Key);
+  int rslt = v ? ::strtol(v, NULL, 0) : Default;
+  if (Knob_ReportSettings && v != NULL) {
+    ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
+    ::fflush(stdout);
+  }
+  return rslt;
 }
 
 void ObjectMonitor::DeferredInitialize() {
   if (InitDone > 0) return;
   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
-      while (InitDone != 1);
-      return;
+    while (InitDone != 1);
+    return;
   }
 
   // One-shot global initialization ...
@@ -2457,13 +2457,13 @@
   size_t sz = strlen(SyncKnobs);
   char * knobs = (char *) malloc(sz + 2);
   if (knobs == NULL) {
-     vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
-     guarantee(0, "invariant");
+    vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
+    guarantee(0, "invariant");
   }
   strcpy(knobs, SyncKnobs);
   knobs[sz+1] = 0;
   for (char * p = knobs; *p; p++) {
-     if (*p == ':') *p = 0;
+    if (*p == ':') *p = 0;
   }
 
   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
@@ -2502,18 +2502,18 @@
   }
 
   if (os::is_MP()) {
-     BackOffMask = (1 << Knob_SpinBackOff) - 1;
-     if (Knob_ReportSettings) ::printf("BackOffMask=%X\n", BackOffMask);
-     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
+    BackOffMask = (1 << Knob_SpinBackOff) - 1;
+    if (Knob_ReportSettings) ::printf("BackOffMask=%X\n", BackOffMask);
+    // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
   } else {
-     Knob_SpinLimit = 0;
-     Knob_SpinBase  = 0;
-     Knob_PreSpin   = 0;
-     Knob_FixedSpin = -1;
+    Knob_SpinLimit = 0;
+    Knob_SpinBase  = 0;
+    Knob_PreSpin   = 0;
+    Knob_FixedSpin = -1;
   }
 
   if (Knob_LogSpins == 0) {
-     ObjectMonitor::_sync_FailedSpins = NULL;
+    ObjectMonitor::_sync_FailedSpins = NULL;
   }
 
   free(knobs);