265 |
265 |
266 |
266 |
267 // CASPTR() uses the canonical argument order that dominates in the literature. |
267 // CASPTR() uses the canonical argument order that dominates in the literature. |
268 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. |
268 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. |
269 |
269 |
270 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c))) |
270 #define CASPTR(a, c, s) \ |
|
271 intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c))) |
271 #define UNS(x) (uintptr_t(x)) |
272 #define UNS(x) (uintptr_t(x)) |
272 #define TRACE(m) { static volatile int ctr = 0; int x = ++ctr; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }} |
273 #define TRACE(m) \ |
|
274 { \ |
|
275 static volatile int ctr = 0; \ |
|
276 int x = ++ctr; \ |
|
277 if ((x & (x - 1)) == 0) { \ |
|
278 ::printf("%d:%s\n", x, #m); \ |
|
279 ::fflush(stdout); \ |
|
280 } \ |
|
281 } |
273 |
282 |
274 // Simplistic low-quality Marsaglia SHIFT-XOR RNG. |
283 // Simplistic low-quality Marsaglia SHIFT-XOR RNG. |
275 // Bijective except for the trailing mask operation. |
284 // Bijective except for the trailing mask operation. |
276 // Useful for spin loops as the compiler can't optimize it away. |
285 // Useful for spin loops as the compiler can't optimize it away. |
277 |
286 |
278 static inline jint MarsagliaXORV (jint x) { |
287 static inline jint MarsagliaXORV(jint x) { |
279 if (x == 0) x = 1|os::random(); |
288 if (x == 0) x = 1|os::random(); |
280 x ^= x << 6; |
289 x ^= x << 6; |
281 x ^= ((unsigned)x) >> 21; |
290 x ^= ((unsigned)x) >> 21; |
282 x ^= x << 7; |
291 x ^= x << 7; |
283 return x & 0x7FFFFFFF; |
292 return x & 0x7FFFFFFF; |
284 } |
293 } |
285 |
294 |
286 static int Stall (int its) { |
295 static int Stall(int its) { |
287 static volatile jint rv = 1; |
296 static volatile jint rv = 1; |
288 volatile int OnFrame = 0; |
297 volatile int OnFrame = 0; |
289 jint v = rv ^ UNS(OnFrame); |
298 jint v = rv ^ UNS(OnFrame); |
290 while (--its >= 0) { |
299 while (--its >= 0) { |
291 v = MarsagliaXORV(v); |
300 v = MarsagliaXORV(v); |
339 // scalability. |
348 // scalability. |
340 // |
349 // |
341 // Clamp spinning at approximately 1/2 of a context-switch round-trip. |
350 // Clamp spinning at approximately 1/2 of a context-switch round-trip. |
342 // See synchronizer.cpp for details and rationale. |
351 // See synchronizer.cpp for details and rationale. |
343 |
352 |
344 int Monitor::TrySpin (Thread * const Self) { |
353 int Monitor::TrySpin(Thread * const Self) { |
345 if (TryLock()) return 1; |
354 if (TryLock()) return 1; |
346 if (!os::is_MP()) return 0; |
355 if (!os::is_MP()) return 0; |
347 |
356 |
348 int Probes = 0; |
357 int Probes = 0; |
349 int Delay = 0; |
358 int Delay = 0; |
416 err = ev->park(timo); |
425 err = ev->park(timo); |
417 } |
426 } |
418 return err; |
427 return err; |
419 } |
428 } |
420 |
429 |
421 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) { |
430 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) { |
422 intptr_t v = _LockWord.FullWord; |
431 intptr_t v = _LockWord.FullWord; |
423 for (;;) { |
432 for (;;) { |
424 if ((v & _LBIT) == 0) { |
433 if ((v & _LBIT) == 0) { |
425 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); |
434 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); |
426 if (u == v) return 1; // indicate acquired |
435 if (u == v) return 1; // indicate acquired |
441 // performed any needed state transitions beforehand. |
450 // performed any needed state transitions beforehand. |
442 // IWait and ILock may directly call park() without any concern for thread state. |
451 // IWait and ILock may directly call park() without any concern for thread state. |
443 // Note that ILock and IWait do *not* access _owner. |
452 // Note that ILock and IWait do *not* access _owner. |
444 // _owner is a higher-level logical concept. |
453 // _owner is a higher-level logical concept. |
445 |
454 |
446 void Monitor::ILock (Thread * Self) { |
455 void Monitor::ILock(Thread * Self) { |
447 assert(_OnDeck != Self->_MutexEvent, "invariant"); |
456 assert(_OnDeck != Self->_MutexEvent, "invariant"); |
448 |
457 |
449 if (TryFast()) { |
458 if (TryFast()) { |
450 Exeunt: |
459 Exeunt: |
451 assert(ILocked(), "invariant"); |
460 assert(ILocked(), "invariant"); |
512 // Note that (A) and (B) are tantamount to succession by direct handoff for |
521 // Note that (A) and (B) are tantamount to succession by direct handoff for |
513 // the inner lock. |
522 // the inner lock. |
514 goto Exeunt; |
523 goto Exeunt; |
515 } |
524 } |
516 |
525 |
517 void Monitor::IUnlock (bool RelaxAssert) { |
526 void Monitor::IUnlock(bool RelaxAssert) { |
518 assert(ILocked(), "invariant"); |
527 assert(ILocked(), "invariant"); |
519 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately |
528 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately |
520 // before the store that releases the lock. Crucially, all the stores and loads in the |
529 // before the store that releases the lock. Crucially, all the stores and loads in the |
521 // critical section must be globally visible before the store of 0 into the lock-word |
530 // critical section must be globally visible before the store of 0 into the lock-word |
522 // that releases the lock becomes globally visible. That is, memory accesses in the |
531 // that releases the lock becomes globally visible. That is, memory accesses in the |
587 ParkEvent * const w = List; |
596 ParkEvent * const w = List; |
588 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); |
597 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); |
589 _EntryList = w->ListNext; |
598 _EntryList = w->ListNext; |
590 // as a diagnostic measure consider setting w->_ListNext = BAD |
599 // as a diagnostic measure consider setting w->_ListNext = BAD |
591 assert(UNS(_OnDeck) == _LBIT, "invariant"); |
600 assert(UNS(_OnDeck) == _LBIT, "invariant"); |
592 _OnDeck = w; // pass OnDeck to w. |
601 _OnDeck = w; // pass OnDeck to w. |
593 // w will clear OnDeck once it acquires the outer lock |
602 // w will clear OnDeck once it acquires the outer lock |
594 |
603 |
595 // Another optional optimization ... |
604 // Another optional optimization ... |
596 // For heavily contended locks it's not uncommon that some other |
605 // For heavily contended locks it's not uncommon that some other |
597 // thread acquired the lock while this thread was arranging succession. |
606 // thread acquired the lock while this thread was arranging succession. |
598 // Try to defer the unpark() operation - Delegate the responsibility |
607 // Try to defer the unpark() operation - Delegate the responsibility |
722 assert(ILocked(), "invariant"); |
731 assert(ILocked(), "invariant"); |
723 while (_WaitSet != NULL) notify(); |
732 while (_WaitSet != NULL) notify(); |
724 return true; |
733 return true; |
725 } |
734 } |
726 |
735 |
727 int Monitor::IWait (Thread * Self, jlong timo) { |
736 int Monitor::IWait(Thread * Self, jlong timo) { |
728 assert(ILocked(), "invariant"); |
737 assert(ILocked(), "invariant"); |
729 |
738 |
730 // Phases: |
739 // Phases: |
731 // 1. Enqueue Self on WaitSet - currently prepend |
740 // 1. Enqueue Self on WaitSet - currently prepend |
732 // 2. unlock - drop the outer lock |
741 // 2. unlock - drop the outer lock |
883 // |
892 // |
884 // But of course the proper ultimate approach is to avoid schemes that require explicit |
893 // But of course the proper ultimate approach is to avoid schemes that require explicit |
885 // sneaking or dependence on any any clever invariants or subtle implementation properties |
894 // sneaking or dependence on any any clever invariants or subtle implementation properties |
886 // of Mutex-Monitor and instead directly address the underlying design flaw. |
895 // of Mutex-Monitor and instead directly address the underlying design flaw. |
887 |
896 |
888 void Monitor::lock (Thread * Self) { |
897 void Monitor::lock(Thread * Self) { |
889 #ifdef CHECK_UNHANDLED_OOPS |
898 #ifdef CHECK_UNHANDLED_OOPS |
890 // Clear unhandled oops so we get a crash right away. Only clear for non-vm |
899 // Clear unhandled oops so we get a crash right away. Only clear for non-vm |
891 // or GC threads. |
900 // or GC threads. |
892 if (Self->is_Java_thread()) { |
901 if (Self->is_Java_thread()) { |
893 Self->clear_unhandled_oops(); |
902 Self->clear_unhandled_oops(); |
894 } |
903 } |
895 #endif // CHECK_UNHANDLED_OOPS |
904 #endif // CHECK_UNHANDLED_OOPS |
896 |
905 |
897 debug_only(check_prelock_state(Self)); |
906 debug_only(check_prelock_state(Self)); |
898 assert(_owner != Self , "invariant"); |
907 assert(_owner != Self, "invariant"); |
899 assert(_OnDeck != Self->_MutexEvent, "invariant"); |
908 assert(_OnDeck != Self->_MutexEvent, "invariant"); |
900 |
909 |
901 if (TryFast()) { |
910 if (TryFast()) { |
902 Exeunt: |
911 Exeunt: |
903 assert(ILocked(), "invariant"); |
912 assert(ILocked(), "invariant"); |
941 // Lock without safepoint check - a degenerate variant of lock(). |
950 // Lock without safepoint check - a degenerate variant of lock(). |
942 // Should ONLY be used by safepoint code and other code |
951 // Should ONLY be used by safepoint code and other code |
943 // that is guaranteed not to block while running inside the VM. If this is called with |
952 // that is guaranteed not to block while running inside the VM. If this is called with |
944 // thread state set to be in VM, the safepoint synchronization code will deadlock! |
953 // thread state set to be in VM, the safepoint synchronization code will deadlock! |
945 |
954 |
946 void Monitor::lock_without_safepoint_check (Thread * Self) { |
955 void Monitor::lock_without_safepoint_check(Thread * Self) { |
947 assert(_owner != Self, "invariant"); |
956 assert(_owner != Self, "invariant"); |
948 ILock(Self); |
957 ILock(Self); |
949 assert(_owner == NULL, "invariant"); |
958 assert(_owner == NULL, "invariant"); |
950 set_owner(Self); |
959 set_owner(Self); |
951 } |
960 } |
981 } |
990 } |
982 return false; |
991 return false; |
983 } |
992 } |
984 |
993 |
985 void Monitor::unlock() { |
994 void Monitor::unlock() { |
986 assert(_owner == Thread::current(), "invariant"); |
995 assert(_owner == Thread::current(), "invariant"); |
987 assert(_OnDeck != Thread::current()->_MutexEvent , "invariant"); |
996 assert(_OnDeck != Thread::current()->_MutexEvent, "invariant"); |
988 set_owner(NULL); |
997 set_owner(NULL); |
989 if (_snuck) { |
998 if (_snuck) { |
990 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); |
999 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); |
991 _snuck = false; |
1000 _snuck = false; |
992 return; |
1001 return; |
1069 return; |
1078 return; |
1070 } |
1079 } |
1071 IUnlock(false); |
1080 IUnlock(false); |
1072 } |
1081 } |
1073 |
1082 |
1074 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) { |
1083 bool Monitor::wait(bool no_safepoint_check, long timeout, |
|
1084 bool as_suspend_equivalent) { |
1075 Thread * const Self = Thread::current(); |
1085 Thread * const Self = Thread::current(); |
1076 assert(_owner == Self, "invariant"); |
1086 assert(_owner == Self, "invariant"); |
1077 assert(ILocked(), "invariant"); |
1087 assert(ILocked(), "invariant"); |
1078 |
1088 |
1079 // as_suspend_equivalent logically implies !no_safepoint_check |
1089 // as_suspend_equivalent logically implies !no_safepoint_check |
1138 |
1148 |
1139 Monitor::~Monitor() { |
1149 Monitor::~Monitor() { |
1140 assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, ""); |
1150 assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, ""); |
1141 } |
1151 } |
1142 |
1152 |
1143 void Monitor::ClearMonitor (Monitor * m, const char *name) { |
1153 void Monitor::ClearMonitor(Monitor * m, const char *name) { |
1144 m->_owner = NULL; |
1154 m->_owner = NULL; |
1145 m->_snuck = false; |
1155 m->_snuck = false; |
1146 if (name == NULL) { |
1156 if (name == NULL) { |
1147 strcpy(m->_name, "UNKNOWN"); |
1157 strcpy(m->_name, "UNKNOWN"); |
1148 } else { |
1158 } else { |
1156 m->_WaitLock[0] = 0; |
1166 m->_WaitLock[0] = 0; |
1157 } |
1167 } |
1158 |
1168 |
1159 Monitor::Monitor() { ClearMonitor(this); } |
1169 Monitor::Monitor() { ClearMonitor(this); } |
1160 |
1170 |
1161 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { |
1171 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block) { |
1162 ClearMonitor(this, name); |
1172 ClearMonitor(this, name); |
1163 #ifdef ASSERT |
1173 #ifdef ASSERT |
1164 _allow_vm_block = allow_vm_block; |
1174 _allow_vm_block = allow_vm_block; |
1165 _rank = Rank; |
1175 _rank = Rank; |
1166 #endif |
1176 #endif |
1168 |
1178 |
1169 Mutex::~Mutex() { |
1179 Mutex::~Mutex() { |
1170 assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, ""); |
1180 assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, ""); |
1171 } |
1181 } |
1172 |
1182 |
1173 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { |
1183 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block) { |
1174 ClearMonitor((Monitor *) this, name); |
1184 ClearMonitor((Monitor *) this, name); |
1175 #ifdef ASSERT |
1185 #ifdef ASSERT |
1176 _allow_vm_block = allow_vm_block; |
1186 _allow_vm_block = allow_vm_block; |
1177 _rank = Rank; |
1187 _rank = Rank; |
1178 #endif |
1188 #endif |
1277 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); |
1288 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); |
1278 _owner = new_owner; // set the owner |
1289 _owner = new_owner; // set the owner |
1279 |
1290 |
1280 // link "this" into the owned locks list |
1291 // link "this" into the owned locks list |
1281 |
1292 |
1282 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef |
1293 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef |
1283 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); |
1294 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); |
1284 // Mutex::set_owner_implementation is a friend of Thread |
1295 // Mutex::set_owner_implementation is a friend of Thread |
1285 |
1296 |
1286 assert(this->rank() >= 0, "bad lock rank"); |
1297 assert(this->rank() >= 0, "bad lock rank"); |
1287 |
1298 |
1310 locks->name(), locks->rank())); |
1321 locks->name(), locks->rank())); |
1311 } |
1322 } |
1312 |
1323 |
1313 this->_next = new_owner->_owned_locks; |
1324 this->_next = new_owner->_owned_locks; |
1314 new_owner->_owned_locks = this; |
1325 new_owner->_owned_locks = this; |
1315 #endif |
1326 #endif |
1316 |
1327 |
1317 } else { |
1328 } else { |
1318 // the thread is releasing this lock |
1329 // the thread is releasing this lock |
1319 |
1330 |
1320 Thread* old_owner = _owner; |
1331 Thread* old_owner = _owner; |
1323 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); |
1334 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); |
1324 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); |
1335 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); |
1325 |
1336 |
1326 _owner = NULL; // set the owner |
1337 _owner = NULL; // set the owner |
1327 |
1338 |
1328 #ifdef ASSERT |
1339 #ifdef ASSERT |
1329 Monitor *locks = old_owner->owned_locks(); |
1340 Monitor *locks = old_owner->owned_locks(); |
1330 |
1341 |
1331 // remove "this" from the owned locks list |
1342 // remove "this" from the owned locks list |
1332 |
1343 |
1333 Monitor *prev = NULL; |
1344 Monitor *prev = NULL; |