hotspot/src/share/vm/runtime/objectMonitor.cpp
changeset 37092 0e56e3c9d545
parent 36384 b0b41336a9a8
child 37251 9fc139ad74b5
equal deleted inserted replaced
37074:20b25dd44cb8 37092:0e56e3c9d545
    42 #include "trace/traceMacros.hpp"
    42 #include "trace/traceMacros.hpp"
    43 #include "utilities/dtrace.hpp"
    43 #include "utilities/dtrace.hpp"
    44 #include "utilities/macros.hpp"
    44 #include "utilities/macros.hpp"
    45 #include "utilities/preserveException.hpp"
    45 #include "utilities/preserveException.hpp"
    46 
    46 
    47 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
       
    48 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
       
    49   #define NOINLINE __attribute__((noinline))
       
    50 #else
       
    51   #define NOINLINE
       
    52 #endif
       
    53 
       
    54 
       
    55 #ifdef DTRACE_ENABLED
    47 #ifdef DTRACE_ENABLED
    56 
    48 
    57 // Only bother with this argument setup if dtrace is available
    49 // Only bother with this argument setup if dtrace is available
    58 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
    50 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
    59 
    51 
   252 
   244 
   253 
   245 
   254 // -----------------------------------------------------------------------------
   246 // -----------------------------------------------------------------------------
   255 // Enter support
   247 // Enter support
   256 
   248 
   257 void NOINLINE ObjectMonitor::enter(TRAPS) {
   249 void ObjectMonitor::enter(TRAPS) {
   258   // The following code is ordered to check the most common cases first
   250   // The following code is ordered to check the most common cases first
   259   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   251   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   260   Thread * const Self = THREAD;
   252   Thread * const Self = THREAD;
   261 
   253 
   262   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   254   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   429   return -1;
   421   return -1;
   430 }
   422 }
   431 
   423 
   432 #define MAX_RECHECK_INTERVAL 1000
   424 #define MAX_RECHECK_INTERVAL 1000
   433 
   425 
   434 void NOINLINE ObjectMonitor::EnterI(TRAPS) {
   426 void ObjectMonitor::EnterI(TRAPS) {
   435   Thread * const Self = THREAD;
   427   Thread * const Self = THREAD;
   436   assert(Self->is_Java_thread(), "invariant");
   428   assert(Self->is_Java_thread(), "invariant");
   437   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
   429   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
   438 
   430 
   439   // Try the lock - TATAS
   431   // Try the lock - TATAS
   679 //
   671 //
   680 // In the future we should reconcile EnterI() and ReenterI(), adding
   672 // In the future we should reconcile EnterI() and ReenterI(), adding
   681 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
   673 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
   682 // loop accordingly.
   674 // loop accordingly.
   683 
   675 
   684 void NOINLINE ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
   676 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
   685   assert(Self != NULL, "invariant");
   677   assert(Self != NULL, "invariant");
   686   assert(SelfNode != NULL, "invariant");
   678   assert(SelfNode != NULL, "invariant");
   687   assert(SelfNode->_thread == Self, "invariant");
   679   assert(SelfNode->_thread == Self, "invariant");
   688   assert(_waiters > 0, "invariant");
   680   assert(_waiters > 0, "invariant");
   689   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
   681   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
   892 // another thread can reacquire the lock immediately, and we can
   884 // another thread can reacquire the lock immediately, and we can
   893 // then wake a thread unnecessarily. This is benign, and we've
   885 // then wake a thread unnecessarily. This is benign, and we've
   894 // structured the code so the windows are short and the frequency
   886 // structured the code so the windows are short and the frequency
   895 // of such futile wakups is low.
   887 // of such futile wakups is low.
   896 
   888 
   897 void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
   889 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
   898   Thread * const Self = THREAD;
   890   Thread * const Self = THREAD;
   899   if (THREAD != _owner) {
   891   if (THREAD != _owner) {
   900     if (THREAD->is_lock_owned((address) _owner)) {
   892     if (THREAD->is_lock_owned((address) _owner)) {
   901       // Transmute _owner from a BasicLock pointer to a Thread address.
   893       // Transmute _owner from a BasicLock pointer to a Thread address.
   902       // We don't need to hold _mutex for this transition.
   894       // We don't need to hold _mutex for this transition.