hotspot/src/share/vm/runtime/objectMonitor.cpp
changeset 27165 785a8d56024c
parent 26684 d1221849ea3d
child 31782 b23b74f8ae8d
equal deleted inserted replaced
27164:6523fa019ffa 27165:785a8d56024c
   255   if (THREAD != _owner) {
   255   if (THREAD != _owner) {
   256     if (THREAD->is_lock_owned ((address)_owner)) {
   256     if (THREAD->is_lock_owned ((address)_owner)) {
   257       assert(_recursions == 0, "internal state error");
   257       assert(_recursions == 0, "internal state error");
   258       _owner = THREAD;
   258       _owner = THREAD;
   259       _recursions = 1;
   259       _recursions = 1;
   260       OwnerIsThread = 1;
       
   261       return true;
   260       return true;
   262     }
   261     }
   263     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
   262     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
   264       return false;
   263       return false;
   265     }
   264     }
   278   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   277   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   279   if (cur == NULL) {
   278   if (cur == NULL) {
   280     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
   279     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
   281     assert(_recursions == 0, "invariant");
   280     assert(_recursions == 0, "invariant");
   282     assert(_owner == Self, "invariant");
   281     assert(_owner == Self, "invariant");
   283     // CONSIDER: set or assert OwnerIsThread == 1
       
   284     return;
   282     return;
   285   }
   283   }
   286 
   284 
   287   if (cur == Self) {
   285   if (cur == Self) {
   288     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
   286     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
   294     assert(_recursions == 0, "internal state error");
   292     assert(_recursions == 0, "internal state error");
   295     _recursions = 1;
   293     _recursions = 1;
   296     // Commute owner from a thread-specific on-stack BasicLockObject address to
   294     // Commute owner from a thread-specific on-stack BasicLockObject address to
   297     // a full-fledged "Thread *".
   295     // a full-fledged "Thread *".
   298     _owner = Self;
   296     _owner = Self;
   299     OwnerIsThread = 1;
       
   300     return;
   297     return;
   301   }
   298   }
   302 
   299 
   303   // We've encountered genuine contention.
   300   // We've encountered genuine contention.
   304   assert(Self->_Stalled == 0, "invariant");
   301   assert(Self->_Stalled == 0, "invariant");
   326   assert(this->object() != NULL, "invariant");
   323   assert(this->object() != NULL, "invariant");
   327   assert(_count >= 0, "invariant");
   324   assert(_count >= 0, "invariant");
   328 
   325 
   329   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
   326   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
   330   // Ensure the object-monitor relationship remains stable while there's contention.
   327   // Ensure the object-monitor relationship remains stable while there's contention.
   331   Atomic::inc_ptr(&_count);
   328   Atomic::inc(&_count);
   332 
   329 
   333   EventJavaMonitorEnter event;
   330   EventJavaMonitorEnter event;
   334 
   331 
   335   { // Change java thread status to indicate blocked on monitor enter.
   332   { // Change java thread status to indicate blocked on monitor enter.
   336     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
   333     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
   382     // as having "-locked" the monitor, but the OS and java.lang.Thread
   379     // as having "-locked" the monitor, but the OS and java.lang.Thread
   383     // states will still report that the thread is blocked trying to
   380     // states will still report that the thread is blocked trying to
   384     // acquire it.
   381     // acquire it.
   385   }
   382   }
   386 
   383 
   387   Atomic::dec_ptr(&_count);
   384   Atomic::dec(&_count);
   388   assert(_count >= 0, "invariant");
   385   assert(_count >= 0, "invariant");
   389   Self->_Stalled = 0;
   386   Self->_Stalled = 0;
   390 
   387 
   391   // Must either set _recursions = 0 or ASSERT _recursions == 0.
   388   // Must either set _recursions = 0 or ASSERT _recursions == 0.
   392   assert(_recursions == 0, "invariant");
   389   assert(_recursions == 0, "invariant");
   438   if (own != NULL) return 0;
   435   if (own != NULL) return 0;
   439   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
   436   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
   440     // Either guarantee _recursions == 0 or set _recursions = 0.
   437     // Either guarantee _recursions == 0 or set _recursions = 0.
   441     assert(_recursions == 0, "invariant");
   438     assert(_recursions == 0, "invariant");
   442     assert(_owner == Self, "invariant");
   439     assert(_owner == Self, "invariant");
   443     // CONSIDER: set or assert that OwnerIsThread == 1
       
   444     return 1;
   440     return 1;
   445   }
   441   }
   446   // The lock had been free momentarily, but we lost the race to the lock.
   442   // The lock had been free momentarily, but we lost the race to the lock.
   447   // Interference -- the CAS failed.
   443   // Interference -- the CAS failed.
   448   // We can either return -1 or retry.
   444   // We can either return -1 or retry.
   920       // Non-null to Non-null is safe as long as all readers can
   916       // Non-null to Non-null is safe as long as all readers can
   921       // tolerate either flavor.
   917       // tolerate either flavor.
   922       assert(_recursions == 0, "invariant");
   918       assert(_recursions == 0, "invariant");
   923       _owner = THREAD;
   919       _owner = THREAD;
   924       _recursions = 0;
   920       _recursions = 0;
   925       OwnerIsThread = 1;
       
   926     } else {
   921     } else {
   927       // Apparent unbalanced locking ...
   922       // Apparent unbalanced locking ...
   928       // Naively we'd like to throw IllegalMonitorStateException.
   923       // Naively we'd like to throw IllegalMonitorStateException.
   929       // As a practical matter we can neither allocate nor throw an
   924       // As a practical matter we can neither allocate nor throw an
   930       // exception as ::exit() can be called from leaf routines.
   925       // exception as ::exit() can be called from leaf routines.
  1344   if (THREAD != _owner) {
  1339   if (THREAD != _owner) {
  1345     if (THREAD->is_lock_owned ((address)_owner)) {
  1340     if (THREAD->is_lock_owned ((address)_owner)) {
  1346       assert(_recursions == 0, "internal state error");
  1341       assert(_recursions == 0, "internal state error");
  1347       _owner = THREAD;   // Convert from basiclock addr to Thread addr
  1342       _owner = THREAD;   // Convert from basiclock addr to Thread addr
  1348       _recursions = 0;
  1343       _recursions = 0;
  1349       OwnerIsThread = 1;
       
  1350     }
  1344     }
  1351   }
  1345   }
  1352 
  1346 
  1353   guarantee(Self == _owner, "complete_exit not owner");
  1347   guarantee(Self == _owner, "complete_exit not owner");
  1354   intptr_t save = _recursions; // record the old recursion count
  1348   intptr_t save = _recursions; // record the old recursion count
  1383   do {                                                                      \
  1377   do {                                                                      \
  1384     if (THREAD != _owner) {                                                 \
  1378     if (THREAD != _owner) {                                                 \
  1385       if (THREAD->is_lock_owned((address) _owner)) {                        \
  1379       if (THREAD->is_lock_owned((address) _owner)) {                        \
  1386         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
  1380         _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
  1387         _recursions = 0;                                                    \
  1381         _recursions = 0;                                                    \
  1388         OwnerIsThread = 1;                                                  \
       
  1389       } else {                                                              \
  1382       } else {                                                              \
  1390         TEVENT(Throw IMSX);                                                 \
  1383         TEVENT(Throw IMSX);                                                 \
  1391         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
  1384         THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
  1392       }                                                                     \
  1385       }                                                                     \
  1393     }                                                                       \
  1386     }                                                                       \
  1904 // algorithm.  On high order SMP systems it would be better to start with
  1897 // algorithm.  On high order SMP systems it would be better to start with
  1905 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
  1898 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
  1906 // a contending thread could enqueue itself on the cxq and then spin locally
  1899 // a contending thread could enqueue itself on the cxq and then spin locally
  1907 // on a thread-specific variable such as its ParkEvent._Event flag.
  1900 // on a thread-specific variable such as its ParkEvent._Event flag.
  1908 // That's left as an exercise for the reader.  Note that global spinning is
  1901 // That's left as an exercise for the reader.  Note that global spinning is
  1909 // not problematic on Niagara, as the L2$ serves the interconnect and has both
  1902 // not problematic on Niagara, as the L2 cache serves the interconnect and
  1910 // low latency and massive bandwidth.
  1903 // has both low latency and massive bandwidth.
  1911 //
  1904 //
  1912 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
  1905 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
  1913 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
  1906 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
  1914 // (duration) or we can fix the count at approximately the duration of
  1907 // (duration) or we can fix the count at approximately the duration of
  1915 // a context switch and vary the frequency.   Of course we could also
  1908 // a context switch and vary the frequency.   Of course we could also
  2206 // observed by NotRunnable() might be garbage.  NotRunnable must
  2199 // observed by NotRunnable() might be garbage.  NotRunnable must
  2207 // tolerate this and consider the observed _thread_state value
  2200 // tolerate this and consider the observed _thread_state value
  2208 // as advisory.
  2201 // as advisory.
  2209 //
  2202 //
  2210 // Beware too, that _owner is sometimes a BasicLock address and sometimes
  2203 // Beware too, that _owner is sometimes a BasicLock address and sometimes
  2211 // a thread pointer.  We differentiate the two cases with OwnerIsThread.
  2204 // a thread pointer.
  2212 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
  2205 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
  2213 // with the LSB of _owner.  Another option would be to probablistically probe
  2206 // with the LSB of _owner.  Another option would be to probablistically probe
  2214 // the putative _owner->TypeTag value.
  2207 // the putative _owner->TypeTag value.
  2215 //
  2208 //
  2216 // Checking _thread_state isn't perfect.  Even if the thread is
  2209 // Checking _thread_state isn't perfect.  Even if the thread is
  2228 // The caller must tolerate false-negative and false-positive errors.
  2221 // The caller must tolerate false-negative and false-positive errors.
  2229 // Spinning, in general, is probabilistic anyway.
  2222 // Spinning, in general, is probabilistic anyway.
  2230 
  2223 
  2231 
  2224 
  2232 int ObjectMonitor::NotRunnable(Thread * Self, Thread * ox) {
  2225 int ObjectMonitor::NotRunnable(Thread * Self, Thread * ox) {
  2233   // Check either OwnerIsThread or ox->TypeTag == 2BAD.
  2226   // Check ox->TypeTag == 2BAD.
  2234   if (!OwnerIsThread) return 0;
       
  2235 
       
  2236   if (ox == NULL) return 0;
  2227   if (ox == NULL) return 0;
  2237 
  2228 
  2238   // Avoid transitive spinning ...
  2229   // Avoid transitive spinning ...
  2239   // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
  2230   // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
  2240   // Immediately after T1 acquires L it's possible that T2, also
  2231   // Immediately after T1 acquires L it's possible that T2, also
  2396     NEWPERFVARIABLE(_sync_MonExtant);
  2387     NEWPERFVARIABLE(_sync_MonExtant);
  2397 #undef NEWPERFCOUNTER
  2388 #undef NEWPERFCOUNTER
  2398 #undef NEWPERFVARIABLE
  2389 #undef NEWPERFVARIABLE
  2399   }
  2390   }
  2400 }
  2391 }
  2401 
       
  2402 
       
  2403 // Compile-time asserts
       
  2404 // When possible, it's better to catch errors deterministically at
       
  2405 // compile-time than at runtime.  The down-side to using compile-time
       
  2406 // asserts is that error message -- often something about negative array
       
  2407 // indices -- is opaque.
       
  2408 
       
  2409 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
       
  2410 
       
  2411 void ObjectMonitor::ctAsserts() {
       
  2412   CTASSERT(offset_of (ObjectMonitor, _header) == 0);
       
  2413 }
       
  2414 
       
  2415 
  2392 
  2416 static char * kvGet(char * kvList, const char * Key) {
  2393 static char * kvGet(char * kvList, const char * Key) {
  2417   if (kvList == NULL) return NULL;
  2394   if (kvList == NULL) return NULL;
  2418   size_t n = strlen(Key);
  2395   size_t n = strlen(Key);
  2419   char * Search;
  2396   char * Search;
  2524   bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests);
  2501   bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests);
  2525 
  2502 
  2526   if (verbose) {
  2503   if (verbose) {
  2527     tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT,
  2504     tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT,
  2528                   sizeof(ObjectMonitor));
  2505                   sizeof(ObjectMonitor));
       
  2506     tty->print_cr("INFO: sizeof(PaddedEnd<ObjectMonitor>)=" SIZE_FORMAT,
       
  2507                   sizeof(PaddedEnd<ObjectMonitor>));
  2529   }
  2508   }
  2530 
  2509 
  2531   uint cache_line_size = VM_Version::L1_data_cache_line_size();
  2510   uint cache_line_size = VM_Version::L1_data_cache_line_size();
  2532   if (verbose) {
  2511   if (verbose) {
  2533     tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size);
  2512     tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size);
  2557       tty->print_cr("WARNING: the _header and _owner fields are closer "
  2536       tty->print_cr("WARNING: the _header and _owner fields are closer "
  2558                     "than a cache line which permits false sharing.");
  2537                     "than a cache line which permits false sharing.");
  2559       warning_cnt++;
  2538       warning_cnt++;
  2560     }
  2539     }
  2561 
  2540 
  2562     if ((sizeof(ObjectMonitor) % cache_line_size) != 0) {
  2541     if ((sizeof(PaddedEnd<ObjectMonitor>) % cache_line_size) != 0) {
  2563       tty->print_cr("WARNING: ObjectMonitor size is not a multiple of "
  2542       tty->print_cr("WARNING: PaddedEnd<ObjectMonitor> size is not a "
  2564                     "a cache line which permits false sharing.");
  2543                     "multiple of a cache line which permits false sharing.");
  2565       warning_cnt++;
  2544       warning_cnt++;
  2566     }
  2545     }
  2567   }
  2546   }
  2568 
  2547 
  2569   ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt,
  2548   ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt,