hotspot/src/share/vm/runtime/mutex.cpp
changeset 41704 14be0ae96c86
parent 40655 9f644073d3a0
child 46644 a5813fb66270
equal deleted inserted replaced
41703:fb19bea93d16 41704:14be0ae96c86
   450   }
   450   }
   451 
   451 
   452   ParkEvent * const ESelf = Self->_MutexEvent;
   452   ParkEvent * const ESelf = Self->_MutexEvent;
   453   assert(_OnDeck != ESelf, "invariant");
   453   assert(_OnDeck != ESelf, "invariant");
   454 
   454 
   455   // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
   455   // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT
   456   // Synchronizer.cpp uses a similar optimization.
   456   // Synchronizer.cpp uses a similar optimization.
   457   if (TrySpin(Self)) goto Exeunt;
   457   if (TrySpin(Self)) goto Exeunt;
   458 
   458 
   459   // Slow-path - the lock is contended.
   459   // Slow-path - the lock is contended.
   460   // Either Enqueue Self on cxq or acquire the outer lock.
   460   // Either Enqueue Self on cxq or acquire the outer lock.
   461   // LockWord encoding = (cxq,LOCKBYTE)
   461   // LockWord encoding = (cxq,LOCKBYTE)
   462   ESelf->reset();
   462   ESelf->reset();
   463   OrderAccess::fence();
   463   OrderAccess::fence();
   464 
   464 
   465   // Optional optimization ... try barging on the inner lock
   465   // Optional optimization ... try barging on the inner lock
   466   if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
   466   if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(ESelf)) == 0) {
   467     goto OnDeck_LOOP;
   467     goto OnDeck_LOOP;
   468   }
   468   }
   469 
   469 
   470   if (AcquireOrPush(ESelf)) goto Exeunt;
   470   if (AcquireOrPush(ESelf)) goto Exeunt;
   471 
   471 
   472   // At any given time there is at most one ondeck thread.
   472   // At any given time there is at most one ondeck thread.
   473   // ondeck implies not resident on cxq and not resident on EntryList
   473   // ondeck implies not resident on cxq and not resident on EntryList
   474   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
   474   // Only the OnDeck thread can try to acquire -- contend for -- the lock.
   475   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
   475   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
   476   // Deschedule Self so that others may run.
   476   // Deschedule Self so that others may run.
   477   while (_OnDeck != ESelf) {
   477   while (OrderAccess::load_ptr_acquire(&_OnDeck) != ESelf) {
   478     ParkCommon(ESelf, 0);
   478     ParkCommon(ESelf, 0);
   479   }
   479   }
   480 
   480 
   481   // Self is now in the ONDECK position and will remain so until it
   481   // Self is now in the OnDeck position and will remain so until it
   482   // manages to acquire the lock.
   482   // manages to acquire the lock.
   483  OnDeck_LOOP:
   483  OnDeck_LOOP:
   484   for (;;) {
   484   for (;;) {
   485     assert(_OnDeck == ESelf, "invariant");
   485     assert(_OnDeck == ESelf, "invariant");
   486     if (TrySpin(Self)) break;
   486     if (TrySpin(Self)) break;
   499   // epilogue immediately after having acquired the outer lock.
   499   // epilogue immediately after having acquired the outer lock.
   500   // But instead we could consider the following optimizations:
   500   // But instead we could consider the following optimizations:
   501   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
   501   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
   502   //    This might avoid potential reacquisition of the inner lock in IUlock().
   502   //    This might avoid potential reacquisition of the inner lock in IUlock().
   503   // B. While still holding the inner lock, attempt to opportunistically select
   503   // B. While still holding the inner lock, attempt to opportunistically select
   504   //    and unlink the next ONDECK thread from the EntryList.
   504   //    and unlink the next OnDeck thread from the EntryList.
   505   //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
   505   //    If successful, set OnDeck to refer to that thread, otherwise clear OnDeck.
   506   //    It's critical that the select-and-unlink operation run in constant-time as
   506   //    It's critical that the select-and-unlink operation run in constant-time as
   507   //    it executes when holding the outer lock and may artificially increase the
   507   //    it executes when holding the outer lock and may artificially increase the
   508   //    effective length of the critical section.
   508   //    effective length of the critical section.
   509   // Note that (A) and (B) are tantamount to succession by direct handoff for
   509   // Note that (A) and (B) are tantamount to succession by direct handoff for
   510   // the inner lock.
   510   // the inner lock.
   527   // provides for progress conditions and succession and is _not related to exclusion
   527   // provides for progress conditions and succession and is _not related to exclusion
   528   // safety or lock release consistency.
   528   // safety or lock release consistency.
   529   OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
   529   OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
   530 
   530 
   531   OrderAccess::storeload();
   531   OrderAccess::storeload();
   532   ParkEvent * const w = _OnDeck;
   532   ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL
   533   assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
   533   assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
   534   if (w != NULL) {
   534   if (w != NULL) {
   535     // Either we have a valid ondeck thread or ondeck is transiently "locked"
   535     // Either we have a valid ondeck thread or ondeck is transiently "locked"
   536     // by some exiting thread as it arranges for succession.  The LSBit of
   536     // by some exiting thread as it arranges for succession.  The LSBit of
   537     // OnDeck allows us to discriminate two cases.  If the latter, the
   537     // OnDeck allows us to discriminate two cases.  If the latter, the
   538     // responsibility for progress and succession lies with that other thread.
   538     // responsibility for progress and succession lies with that other thread.
   539     // For good performance, we also depend on the fact that redundant unpark()
   539     // For good performance, we also depend on the fact that redundant unpark()
   540     // operations are cheap.  That is, repeated Unpark()ing of the ONDECK thread
   540     // operations are cheap.  That is, repeated Unpark()ing of the OnDeck thread
   541     // is inexpensive.  This approach provides implicit futile wakeup throttling.
   541     // is inexpensive.  This approach provides implicit futile wakeup throttling.
   542     // Note that the referent "w" might be stale with respect to the lock.
   542     // Note that the referent "w" might be stale with respect to the lock.
   543     // In that case the following unpark() is harmless and the worst that'll happen
   543     // In that case the following unpark() is harmless and the worst that'll happen
   544     // is a spurious return from a park() operation.  Critically, if "w" _is stale,
   544     // is a spurious return from a park() operation.  Critically, if "w" _is stale,
   545     // then progress is known to have occurred as that means the thread associated
   545     // then progress is known to have occurred as that means the thread associated
   584     ParkEvent * const w = List;
   584     ParkEvent * const w = List;
   585     assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
   585     assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
   586     _EntryList = w->ListNext;
   586     _EntryList = w->ListNext;
   587     // as a diagnostic measure consider setting w->_ListNext = BAD
   587     // as a diagnostic measure consider setting w->_ListNext = BAD
   588     assert(UNS(_OnDeck) == _LBIT, "invariant");
   588     assert(UNS(_OnDeck) == _LBIT, "invariant");
   589     _OnDeck = w;  // pass OnDeck to w.
   589 
   590                   // w will clear OnDeck once it acquires the outer lock
   590     // Pass OnDeck role to w, ensuring that _EntryList has been set first.
       
   591     // w will clear _OnDeck once it acquires the outer lock.
       
   592     // Note that once we set _OnDeck that thread can acquire the mutex, proceed
       
   593     // with its critical section and then enter this code to unlock the mutex. So
       
   594     // you can have multiple threads active in IUnlock at the same time.
       
   595     OrderAccess::release_store_ptr(&_OnDeck, w);
   591 
   596 
   592     // Another optional optimization ...
   597     // Another optional optimization ...
   593     // For heavily contended locks it's not uncommon that some other
   598     // For heavily contended locks it's not uncommon that some other
   594     // thread acquired the lock while this thread was arranging succession.
   599     // thread acquired the lock while this thread was arranging succession.
   595     // Try to defer the unpark() operation - Delegate the responsibility
   600     // Try to defer the unpark() operation - Delegate the responsibility
   833   } else {
   838   } else {
   834     // A prior notify() operation moved ESelf from the WaitSet to the cxq.
   839     // A prior notify() operation moved ESelf from the WaitSet to the cxq.
   835     // ESelf is now on the cxq, EntryList or at the OnDeck position.
   840     // ESelf is now on the cxq, EntryList or at the OnDeck position.
   836     // The following fragment is extracted from Monitor::ILock()
   841     // The following fragment is extracted from Monitor::ILock()
   837     for (;;) {
   842     for (;;) {
   838       if (_OnDeck == ESelf && TrySpin(Self)) break;
   843       if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
   839       ParkCommon(ESelf, 0);
   844       ParkCommon(ESelf, 0);
   840     }
   845     }
   841     assert(_OnDeck == ESelf, "invariant");
   846     assert(_OnDeck == ESelf, "invariant");
   842     _OnDeck = NULL;
   847     _OnDeck = NULL;
   843   }
   848   }
  1048     goto Exeunt;
  1053     goto Exeunt;
  1049   }
  1054   }
  1050 
  1055 
  1051   // At any given time there is at most one ondeck thread.
  1056   // At any given time there is at most one ondeck thread.
  1052   // ondeck implies not resident on cxq and not resident on EntryList
  1057   // ondeck implies not resident on cxq and not resident on EntryList
  1053   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
  1058   // Only the OnDeck thread can try to acquire -- contend for -- the lock.
  1054   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
  1059   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
  1055   for (;;) {
  1060   for (;;) {
  1056     if (_OnDeck == ESelf && TrySpin(NULL)) break;
  1061     if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break;
  1057     ParkCommon(ESelf, 0);
  1062     ParkCommon(ESelf, 0);
  1058   }
  1063   }
  1059 
  1064 
  1060   assert(_OnDeck == ESelf, "invariant");
  1065   assert(_OnDeck == ESelf, "invariant");
  1061   _OnDeck = NULL;
  1066   _OnDeck = NULL;