equal
deleted
inserted
replaced
243 void ObjectMonitor::enter(TRAPS) { |
243 void ObjectMonitor::enter(TRAPS) { |
244 // The following code is ordered to check the most common cases first |
244 // The following code is ordered to check the most common cases first |
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. |
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. |
246 Thread * const Self = THREAD; |
246 Thread * const Self = THREAD; |
247 |
247 |
248 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL); |
248 void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self); |
249 if (cur == NULL) { |
249 if (cur == NULL) { |
250 assert(_recursions == 0, "invariant"); |
250 assert(_recursions == 0, "invariant"); |
251 return; |
251 return; |
252 } |
252 } |
253 |
253 |
401 // Callers must compensate as needed. |
401 // Callers must compensate as needed. |
402 |
402 |
403 int ObjectMonitor::TryLock(Thread * Self) { |
403 int ObjectMonitor::TryLock(Thread * Self) { |
404 void * own = _owner; |
404 void * own = _owner; |
405 if (own != NULL) return 0; |
405 if (own != NULL) return 0; |
406 if (Atomic::replace_if_null(Self, &_owner)) { |
406 if (Atomic::replace_if_null(&_owner, Self)) { |
407 assert(_recursions == 0, "invariant"); |
407 assert(_recursions == 0, "invariant"); |
408 return 1; |
408 return 1; |
409 } |
409 } |
410 // The lock had been free momentarily, but we lost the race to the lock. |
410 // The lock had been free momentarily, but we lost the race to the lock. |
411 // Interference -- the CAS failed. |
411 // Interference -- the CAS failed. |
478 // Note that spinning tends to reduce the rate at which threads |
478 // Note that spinning tends to reduce the rate at which threads |
479 // enqueue and dequeue on EntryList|cxq. |
479 // enqueue and dequeue on EntryList|cxq. |
480 ObjectWaiter * nxt; |
480 ObjectWaiter * nxt; |
481 for (;;) { |
481 for (;;) { |
482 node._next = nxt = _cxq; |
482 node._next = nxt = _cxq; |
483 if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break; |
483 if (Atomic::cmpxchg(&_cxq, nxt, &node) == nxt) break; |
484 |
484 |
485 // Interference - the CAS failed because _cxq changed. Just retry. |
485 // Interference - the CAS failed because _cxq changed. Just retry. |
486 // As an optional optimization we retry the lock. |
486 // As an optional optimization we retry the lock. |
487 if (TryLock (Self) > 0) { |
487 if (TryLock (Self) > 0) { |
488 assert(_succ != Self, "invariant"); |
488 assert(_succ != Self, "invariant"); |
516 // -- the checker -- parked on a timer. |
516 // -- the checker -- parked on a timer. |
517 |
517 |
518 if (nxt == NULL && _EntryList == NULL) { |
518 if (nxt == NULL && _EntryList == NULL) { |
519 // Try to assume the role of responsible thread for the monitor. |
519 // Try to assume the role of responsible thread for the monitor. |
520 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } |
520 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } |
521 Atomic::replace_if_null(Self, &_Responsible); |
521 Atomic::replace_if_null(&_Responsible, Self); |
522 } |
522 } |
523 |
523 |
524 // The lock might have been released while this thread was occupied queueing |
524 // The lock might have been released while this thread was occupied queueing |
525 // itself onto _cxq. To close the race and avoid "stranding" and |
525 // itself onto _cxq. To close the race and avoid "stranding" and |
526 // progress-liveness failure we must resample-retry _owner before parking. |
526 // progress-liveness failure we must resample-retry _owner before parking. |
771 // and then unlink Self from EntryList. We have to drain eventually, |
771 // and then unlink Self from EntryList. We have to drain eventually, |
772 // so it might as well be now. |
772 // so it might as well be now. |
773 |
773 |
774 ObjectWaiter * v = _cxq; |
774 ObjectWaiter * v = _cxq; |
775 assert(v != NULL, "invariant"); |
775 assert(v != NULL, "invariant"); |
776 if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) { |
776 if (v != SelfNode || Atomic::cmpxchg(&_cxq, v, SelfNode->_next) != v) { |
777 // The CAS above can fail from interference IFF a "RAT" arrived. |
777 // The CAS above can fail from interference IFF a "RAT" arrived. |
778 // In that case Self must be in the interior and can no longer be |
778 // In that case Self must be in the interior and can no longer be |
779 // at the head of cxq. |
779 // at the head of cxq. |
780 if (v == SelfNode) { |
780 if (v == SelfNode) { |
781 assert(_cxq != v, "invariant"); |
781 assert(_cxq != v, "invariant"); |
957 // Only the current lock owner can manipulate the EntryList or |
957 // Only the current lock owner can manipulate the EntryList or |
958 // drain _cxq, so we need to reacquire the lock. If we fail |
958 // drain _cxq, so we need to reacquire the lock. If we fail |
959 // to reacquire the lock the responsibility for ensuring succession |
959 // to reacquire the lock the responsibility for ensuring succession |
960 // falls to the new owner. |
960 // falls to the new owner. |
961 // |
961 // |
962 if (!Atomic::replace_if_null(THREAD, &_owner)) { |
962 if (!Atomic::replace_if_null(&_owner, THREAD)) { |
963 return; |
963 return; |
964 } |
964 } |
965 |
965 |
966 guarantee(_owner == THREAD, "invariant"); |
966 guarantee(_owner == THREAD, "invariant"); |
967 |
967 |
993 // Drain _cxq into EntryList - bulk transfer. |
993 // Drain _cxq into EntryList - bulk transfer. |
994 // First, detach _cxq. |
994 // First, detach _cxq. |
995 // The following loop is tantamount to: w = swap(&cxq, NULL) |
995 // The following loop is tantamount to: w = swap(&cxq, NULL) |
996 for (;;) { |
996 for (;;) { |
997 assert(w != NULL, "Invariant"); |
997 assert(w != NULL, "Invariant"); |
998 ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); |
998 ObjectWaiter * u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)NULL); |
999 if (u == w) break; |
999 if (u == w) break; |
1000 w = u; |
1000 w = u; |
1001 } |
1001 } |
1002 |
1002 |
1003 assert(w != NULL, "invariant"); |
1003 assert(w != NULL, "invariant"); |
1457 } else { |
1457 } else { |
1458 iterator->TState = ObjectWaiter::TS_CXQ; |
1458 iterator->TState = ObjectWaiter::TS_CXQ; |
1459 for (;;) { |
1459 for (;;) { |
1460 ObjectWaiter * front = _cxq; |
1460 ObjectWaiter * front = _cxq; |
1461 iterator->_next = front; |
1461 iterator->_next = front; |
1462 if (Atomic::cmpxchg(iterator, &_cxq, front) == front) { |
1462 if (Atomic::cmpxchg(&_cxq, front, iterator) == front) { |
1463 break; |
1463 break; |
1464 } |
1464 } |
1465 } |
1465 } |
1466 } |
1466 } |
1467 |
1467 |
1678 // the spin without prejudice or apply a "penalty" to the |
1678 // the spin without prejudice or apply a "penalty" to the |
1679 // spin count-down variable "ctr", reducing it by 100, say. |
1679 // spin count-down variable "ctr", reducing it by 100, say. |
1680 |
1680 |
1681 Thread * ox = (Thread *) _owner; |
1681 Thread * ox = (Thread *) _owner; |
1682 if (ox == NULL) { |
1682 if (ox == NULL) { |
1683 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL); |
1683 ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self); |
1684 if (ox == NULL) { |
1684 if (ox == NULL) { |
1685 // The CAS succeeded -- this thread acquired ownership |
1685 // The CAS succeeded -- this thread acquired ownership |
1686 // Take care of some bookkeeping to exit spin state. |
1686 // Take care of some bookkeeping to exit spin state. |
1687 if (_succ == Self) { |
1687 if (_succ == Self) { |
1688 _succ = NULL; |
1688 _succ = NULL; |