hotspot/src/share/vm/runtime/synchronizer.cpp
changeset 6975 dc9b63952682
parent 5920 8fdbb85e62d3
child 7397 5b173b4ca846
equal deleted inserted replaced
6971:11c11e616b91 6975:dc9b63952682
    30   #define ATTR __attribute__((noinline))
    30   #define ATTR __attribute__((noinline))
    31 #else
    31 #else
    32   #define ATTR
    32   #define ATTR
    33 #endif
    33 #endif
    34 
    34 
    35 // Native markword accessors for synchronization and hashCode().
       
    36 //
       
    37 // The "core" versions of monitor enter and exit reside in this file.
    35 // The "core" versions of monitor enter and exit reside in this file.
    38 // The interpreter and compilers contain specialized transliterated
    36 // The interpreter and compilers contain specialized transliterated
    39 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
    37 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
    40 // for instance.  If you make changes here, make sure to modify the
    38 // for instance.  If you make changes here, make sure to modify the
    41 // interpreter, and both C1 and C2 fast-path inline locking code emission.
    39 // interpreter, and both C1 and C2 fast-path inline locking code emission.
    42 //
    40 //
    43 // TODO: merge the objectMonitor and synchronizer classes.
       
    44 //
    41 //
    45 // -----------------------------------------------------------------------------
    42 // -----------------------------------------------------------------------------
    46 
    43 
    47 #ifdef DTRACE_ENABLED
    44 #ifdef DTRACE_ENABLED
    48 
    45 
    50 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
    47 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
    51 
    48 
    52 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
    49 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
    53   jlong, uintptr_t, char*, int, long);
    50   jlong, uintptr_t, char*, int, long);
    54 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
    51 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
    55   jlong, uintptr_t, char*, int);
       
    56 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
       
    57   jlong, uintptr_t, char*, int);
       
    58 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
       
    59   jlong, uintptr_t, char*, int);
       
    60 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
       
    61   jlong, uintptr_t, char*, int);
       
    62 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
       
    63   jlong, uintptr_t, char*, int);
       
    64 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
       
    65   jlong, uintptr_t, char*, int);
    52   jlong, uintptr_t, char*, int);
    66 
    53 
    67 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
    54 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
    68   char* bytes = NULL;                                                      \
    55   char* bytes = NULL;                                                      \
    69   int len = 0;                                                             \
    56   int len = 0;                                                             \
    97 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon)    {;}
    84 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon)    {;}
    98 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon)          {;}
    85 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon)          {;}
    99 
    86 
   100 #endif // ndef DTRACE_ENABLED
    87 #endif // ndef DTRACE_ENABLED
   101 
    88 
   102 // ObjectWaiter serves as a "proxy" or surrogate thread.
    89 // This exists only as a workaround of dtrace bug 6254741
   103 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
    90 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
   104 // ParkEvent instead.  Beware, however, that the JVMTI code
    91   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
   105 // knows about ObjectWaiters, so we'll have to reconcile that code.
    92   return 0;
   106 // See next_waiter(), first_waiter(), etc.
    93 }
   107 
    94 
   108 class ObjectWaiter : public StackObj {
    95 #define NINFLATIONLOCKS 256
   109  public:
    96 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
   110   enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
    97 
   111   enum Sorted  { PREPEND, APPEND, SORTED } ;
    98 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
   112   ObjectWaiter * volatile _next;
    99 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
   113   ObjectWaiter * volatile _prev;
   100 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
   114   Thread*       _thread;
   101 int ObjectSynchronizer::gOmInUseCount = 0;
   115   ParkEvent *   _event;
   102 static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
   116   volatile int  _notified ;
   103 static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
   117   volatile TStates TState ;
   104 static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
   118   Sorted        _Sorted ;           // List placement disposition
   105 #define CHAINMARKER ((oop)-1)
   119   bool          _active ;           // Contention monitoring is enabled
   106 
   120  public:
   107 // -----------------------------------------------------------------------------
   121   ObjectWaiter(Thread* thread) {
   108 //  Fast Monitor Enter/Exit
   122     _next     = NULL;
   109 // This the fast monitor enter. The interpreter and compiler use
   123     _prev     = NULL;
   110 // some assembly copies of this code. Make sure update those code
   124     _notified = 0;
   111 // if the following function is changed. The implementation is
   125     TState    = TS_RUN ;
   112 // extremely sensitive to race condition. Be careful.
   126     _thread   = thread;
   113 
   127     _event    = thread->_ParkEvent ;
   114 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
   128     _active   = false;
   115  if (UseBiasedLocking) {
   129     assert (_event != NULL, "invariant") ;
   116     if (!SafepointSynchronize::is_at_safepoint()) {
   130   }
   117       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
   131 
   118       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
   132   void wait_reenter_begin(ObjectMonitor *mon) {
   119         return;
   133     JavaThread *jt = (JavaThread *)this->_thread;
   120       }
   134     _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
   121     } else {
   135   }
   122       assert(!attempt_rebias, "can not rebias toward VM thread");
   136 
   123       BiasedLocking::revoke_at_safepoint(obj);
   137   void wait_reenter_end(ObjectMonitor *mon) {
   124     }
   138     JavaThread *jt = (JavaThread *)this->_thread;
   125     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   139     JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
   126  }
   140   }
   127 
   141 };
   128  slow_enter (obj, lock, THREAD) ;
   142 
   129 }
   143 enum ManifestConstants {
   130 
   144     ClearResponsibleAtSTW   = 0,
   131 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
   145     MaximumRecheckInterval  = 1000
   132   assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
   146 } ;
   133   // if displaced header is null, the previous enter is recursive enter, no-op
   147 
   134   markOop dhw = lock->displaced_header();
   148 
   135   markOop mark ;
   149 #undef TEVENT
   136   if (dhw == NULL) {
   150 #define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
   137      // Recursive stack-lock.
   151 
   138      // Diagnostics -- Could be: stack-locked, inflating, inflated.
   152 #define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
   139      mark = object->mark() ;
   153 
   140      assert (!mark->is_neutral(), "invariant") ;
   154 #undef  TEVENT
   141      if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
   155 #define TEVENT(nom) {;}
   142         assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
   156 
   143      }
       
   144      if (mark->has_monitor()) {
       
   145         ObjectMonitor * m = mark->monitor() ;
       
   146         assert(((oop)(m->object()))->mark() == mark, "invariant") ;
       
   147         assert(m->is_entered(THREAD), "invariant") ;
       
   148      }
       
   149      return ;
       
   150   }
       
   151 
       
   152   mark = object->mark() ;
       
   153 
       
   154   // If the object is stack-locked by the current thread, try to
       
   155   // swing the displaced header from the box back to the mark.
       
   156   if (mark == (markOop) lock) {
       
   157      assert (dhw->is_neutral(), "invariant") ;
       
   158      if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
       
   159         TEVENT (fast_exit: release stacklock) ;
       
   160         return;
       
   161      }
       
   162   }
       
   163 
       
   164   ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
       
   165 }
       
   166 
       
   167 // -----------------------------------------------------------------------------
       
   168 // Interpreter/Compiler Slow Case
       
   169 // This routine is used to handle interpreter/compiler slow case
       
   170 // We don't need to use fast path here, because it must have been
       
   171 // failed in the interpreter/compiler code.
       
   172 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
       
   173   markOop mark = obj->mark();
       
   174   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
       
   175 
       
   176   if (mark->is_neutral()) {
       
   177     // Anticipate successful CAS -- the ST of the displaced mark must
       
   178     // be visible <= the ST performed by the CAS.
       
   179     lock->set_displaced_header(mark);
       
   180     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
       
   181       TEVENT (slow_enter: release stacklock) ;
       
   182       return ;
       
   183     }
       
   184     // Fall through to inflate() ...
       
   185   } else
       
   186   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
       
   187     assert(lock != mark->locker(), "must not re-lock the same lock");
       
   188     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
       
   189     lock->set_displaced_header(NULL);
       
   190     return;
       
   191   }
       
   192 
       
   193 #if 0
       
   194   // The following optimization isn't particularly useful.
       
   195   if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
       
   196     lock->set_displaced_header (NULL) ;
       
   197     return ;
       
   198   }
       
   199 #endif
       
   200 
       
   201   // The object header will never be displaced to this lock,
       
   202   // so it does not matter what the value is, except that it
       
   203   // must be non-zero to avoid looking like a re-entrant lock,
       
   204   // and must not look locked either.
       
   205   lock->set_displaced_header(markOopDesc::unused_mark());
       
   206   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
       
   207 }
       
   208 
       
   209 // This routine is used to handle interpreter/compiler slow case
       
   210 // We don't need to use fast path here, because it must have
       
   211 // failed in the interpreter/compiler code. Simply use the heavy
       
   212 // weight monitor should be ok, unless someone find otherwise.
       
   213 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
       
   214   fast_exit (object, lock, THREAD) ;
       
   215 }
       
   216 
       
   217 // -----------------------------------------------------------------------------
       
   218 // Class Loader  support to workaround deadlocks on the class loader lock objects
       
   219 // Also used by GC
       
   220 // complete_exit()/reenter() are used to wait on a nested lock
       
   221 // i.e. to give up an outer lock completely and then re-enter
       
   222 // Used when holding nested locks - lock acquisition order: lock1 then lock2
       
   223 //  1) complete_exit lock1 - saving recursion count
       
   224 //  2) wait on lock2
       
   225 //  3) when notified on lock2, unlock lock2
       
   226 //  4) reenter lock1 with original recursion count
       
   227 //  5) lock lock2
       
   228 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
       
   229 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
       
   230   TEVENT (complete_exit) ;
       
   231   if (UseBiasedLocking) {
       
   232     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   233     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   234   }
       
   235 
       
   236   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
       
   237 
       
   238   return monitor->complete_exit(THREAD);
       
   239 }
       
   240 
       
   241 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
       
   242 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
       
   243   TEVENT (reenter) ;
       
   244   if (UseBiasedLocking) {
       
   245     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   246     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   247   }
       
   248 
       
   249   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
       
   250 
       
   251   monitor->reenter(recursion, THREAD);
       
   252 }
       
   253 // -----------------------------------------------------------------------------
       
   254 // JNI locks on java objects
       
   255 // NOTE: must use heavy weight monitor to handle jni monitor enter
       
   256 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
       
   257   // the current locking is from JNI instead of Java code
       
   258   TEVENT (jni_enter) ;
       
   259   if (UseBiasedLocking) {
       
   260     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   261     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   262   }
       
   263   THREAD->set_current_pending_monitor_is_from_java(false);
       
   264   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
       
   265   THREAD->set_current_pending_monitor_is_from_java(true);
       
   266 }
       
   267 
       
   268 // NOTE: must use heavy weight monitor to handle jni monitor enter
       
   269 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
       
   270   if (UseBiasedLocking) {
       
   271     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   272     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   273   }
       
   274 
       
   275   ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
       
   276   return monitor->try_enter(THREAD);
       
   277 }
       
   278 
       
   279 
       
   280 // NOTE: must use heavy weight monitor to handle jni monitor exit
       
   281 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
       
   282   TEVENT (jni_exit) ;
       
   283   if (UseBiasedLocking) {
       
   284     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   285   }
       
   286   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   287 
       
   288   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
       
   289   // If this thread has locked the object, exit the monitor.  Note:  can't use
       
   290   // monitor->check(CHECK); must exit even if an exception is pending.
       
   291   if (monitor->check(THREAD)) {
       
   292      monitor->exit(THREAD);
       
   293   }
       
   294 }
       
   295 
       
   296 // -----------------------------------------------------------------------------
       
   297 // Internal VM locks on java objects
       
   298 // standard constructor, allows locking failures
       
   299 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
       
   300   _dolock = doLock;
       
   301   _thread = thread;
       
   302   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
       
   303   _obj = obj;
       
   304 
       
   305   if (_dolock) {
       
   306     TEVENT (ObjectLocker) ;
       
   307 
       
   308     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
       
   309   }
       
   310 }
       
   311 
       
   312 ObjectLocker::~ObjectLocker() {
       
   313   if (_dolock) {
       
   314     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
       
   315   }
       
   316 }
       
   317 
       
   318 
       
   319 // -----------------------------------------------------------------------------
       
   320 //  Wait/Notify/NotifyAll
       
   321 // NOTE: must use heavy weight monitor to handle wait()
       
   322 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
       
   323   if (UseBiasedLocking) {
       
   324     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   325     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   326   }
       
   327   if (millis < 0) {
       
   328     TEVENT (wait - throw IAX) ;
       
   329     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
       
   330   }
       
   331   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
       
   332   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
       
   333   monitor->wait(millis, true, THREAD);
       
   334 
       
   335   /* This dummy call is in place to get around dtrace bug 6254741.  Once
       
   336      that's fixed we can uncomment the following line and remove the call */
       
   337   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
       
   338   dtrace_waited_probe(monitor, obj, THREAD);
       
   339 }
       
   340 
       
   341 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
       
   342   if (UseBiasedLocking) {
       
   343     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   344     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   345   }
       
   346   if (millis < 0) {
       
   347     TEVENT (wait - throw IAX) ;
       
   348     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
       
   349   }
       
   350   ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
       
   351 }
       
   352 
       
   353 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
       
   354  if (UseBiasedLocking) {
       
   355     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   356     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   357   }
       
   358 
       
   359   markOop mark = obj->mark();
       
   360   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
       
   361     return;
       
   362   }
       
   363   ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
       
   364 }
       
   365 
       
   366 // NOTE: see comment of notify()
       
   367 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
       
   368   if (UseBiasedLocking) {
       
   369     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
   370     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   371   }
       
   372 
       
   373   markOop mark = obj->mark();
       
   374   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
       
   375     return;
       
   376   }
       
   377   ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
       
   378 }
       
   379 
       
   380 // -----------------------------------------------------------------------------
       
   381 // Hash Code handling
       
   382 //
   157 // Performance concern:
   383 // Performance concern:
   158 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
   384 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
   159 // OrderAccess::Dummy variable.  This store is unnecessary for correctness.
   385 // OrderAccess::Dummy variable.  This store is unnecessary for correctness.
   160 // Many threads STing into a common location causes considerable cache migration
   386 // Many threads STing into a common location causes considerable cache migration
   161 // or "sloshing" on large SMP system.  As such, I avoid using OrderAccess::storestore()
   387 // or "sloshing" on large SMP system.  As such, I avoid using OrderAccess::storestore()
   185 } ;
   411 } ;
   186 
   412 
   187 static SharedGlobals GVars ;
   413 static SharedGlobals GVars ;
   188 static int MonitorScavengeThreshold = 1000000 ;
   414 static int MonitorScavengeThreshold = 1000000 ;
   189 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
   415 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
   190 
       
   191 
       
   192 // Tunables ...
       
   193 // The knob* variables are effectively final.  Once set they should
       
   194 // never be modified hence.  Consider using __read_mostly with GCC.
       
   195 
       
   196 static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
       
   197 static int Knob_HandOff            = 0 ;
       
   198 static int Knob_Verbose            = 0 ;
       
   199 static int Knob_ReportSettings     = 0 ;
       
   200 
       
   201 static int Knob_SpinLimit          = 5000 ;    // derived by an external tool -
       
   202 static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
       
   203 static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
       
   204 static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
       
   205 static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
       
   206 static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
       
   207 static int Knob_SpinEarly          = 1 ;
       
   208 static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
       
   209 static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
       
   210 static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
       
   211 static int Knob_Bonus              = 100 ;     // spin success bonus
       
   212 static int Knob_BonusB             = 100 ;     // spin success bonus
       
   213 static int Knob_Penalty            = 200 ;     // spin failure penalty
       
   214 static int Knob_Poverty            = 1000 ;
       
   215 static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
       
   216 static int Knob_FixedSpin          = 0 ;
       
   217 static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
       
   218 static int Knob_UsePause           = 1 ;
       
   219 static int Knob_ExitPolicy         = 0 ;
       
   220 static int Knob_PreSpin            = 10 ;      // 20-100 likely better
       
   221 static int Knob_ResetEvent         = 0 ;
       
   222 static int BackOffMask             = 0 ;
       
   223 
       
   224 static int Knob_FastHSSEC          = 0 ;
       
   225 static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
       
   226 static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
       
   227 static volatile int InitDone       = 0 ;
       
   228 
       
   229 
       
   230 // hashCode() generation :
       
   231 //
       
   232 // Possibilities:
       
   233 // * MD5Digest of {obj,stwRandom}
       
   234 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
       
   235 // * A DES- or AES-style SBox[] mechanism
       
   236 // * One of the Phi-based schemes, such as:
       
   237 //   2654435761 = 2^32 * Phi (golden ratio)
       
   238 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
       
   239 // * A variation of Marsaglia's shift-xor RNG scheme.
       
   240 // * (obj ^ stwRandom) is appealing, but can result
       
   241 //   in undesirable regularity in the hashCode values of adjacent objects
       
   242 //   (objects allocated back-to-back, in particular).  This could potentially
       
   243 //   result in hashtable collisions and reduced hashtable efficiency.
       
   244 //   There are simple ways to "diffuse" the middle address bits over the
       
   245 //   generated hashCode values:
       
   246 //
       
   247 
       
   248 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
       
   249   intptr_t value = 0 ;
       
   250   if (hashCode == 0) {
       
   251      // This form uses an unguarded global Park-Miller RNG,
       
   252      // so it's possible for two threads to race and generate the same RNG.
       
   253      // On MP system we'll have lots of RW access to a global, so the
       
   254      // mechanism induces lots of coherency traffic.
       
   255      value = os::random() ;
       
   256   } else
       
   257   if (hashCode == 1) {
       
   258      // This variation has the property of being stable (idempotent)
       
   259      // between STW operations.  This can be useful in some of the 1-0
       
   260      // synchronization schemes.
       
   261      intptr_t addrBits = intptr_t(obj) >> 3 ;
       
   262      value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
       
   263   } else
       
   264   if (hashCode == 2) {
       
   265      value = 1 ;            // for sensitivity testing
       
   266   } else
       
   267   if (hashCode == 3) {
       
   268      value = ++GVars.hcSequence ;
       
   269   } else
       
   270   if (hashCode == 4) {
       
   271      value = intptr_t(obj) ;
       
   272   } else {
       
   273      // Marsaglia's xor-shift scheme with thread-specific state
       
   274      // This is probably the best overall implementation -- we'll
       
   275      // likely make this the default in future releases.
       
   276      unsigned t = Self->_hashStateX ;
       
   277      t ^= (t << 11) ;
       
   278      Self->_hashStateX = Self->_hashStateY ;
       
   279      Self->_hashStateY = Self->_hashStateZ ;
       
   280      Self->_hashStateZ = Self->_hashStateW ;
       
   281      unsigned v = Self->_hashStateW ;
       
   282      v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
       
   283      Self->_hashStateW = v ;
       
   284      value = v ;
       
   285   }
       
   286 
       
   287   value &= markOopDesc::hash_mask;
       
   288   if (value == 0) value = 0xBAD ;
       
   289   assert (value != markOopDesc::no_hash, "invariant") ;
       
   290   TEVENT (hashCode: GENERATE) ;
       
   291   return value;
       
   292 }
       
   293 
       
   294 void BasicLock::print_on(outputStream* st) const {
       
   295   st->print("monitor");
       
   296 }
       
   297 
       
   298 void BasicLock::move_to(oop obj, BasicLock* dest) {
       
   299   // Check to see if we need to inflate the lock. This is only needed
       
   300   // if an object is locked using "this" lightweight monitor. In that
       
   301   // case, the displaced_header() is unlocked, because the
       
   302   // displaced_header() contains the header for the originally unlocked
       
   303   // object. However the object could have already been inflated. But it
       
   304   // does not matter, the inflation will just a no-op. For other cases,
       
   305   // the displaced header will be either 0x0 or 0x3, which are location
       
   306   // independent, therefore the BasicLock is free to move.
       
   307   //
       
   308   // During OSR we may need to relocate a BasicLock (which contains a
       
   309   // displaced word) from a location in an interpreter frame to a
       
   310   // new location in a compiled frame.  "this" refers to the source
       
   311   // basiclock in the interpreter frame.  "dest" refers to the destination
       
   312   // basiclock in the new compiled frame.  We *always* inflate in move_to().
       
   313   // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
       
   314   // cause performance problems in code that makes heavy use of a small # of
       
   315   // uncontended locks.   (We'd inflate during OSR, and then sync performance
       
   316   // would subsequently plummet because the thread would be forced thru the slow-path).
       
   317   // This problem has been made largely moot on IA32 by inlining the inflated fast-path
       
   318   // operations in Fast_Lock and Fast_Unlock in i486.ad.
       
   319   //
       
   320   // Note that there is a way to safely swing the object's markword from
       
   321   // one stack location to another.  This avoids inflation.  Obviously,
       
   322   // we need to ensure that both locations refer to the current thread's stack.
       
   323   // There are some subtle concurrency issues, however, and since the benefit is
       
   324   // is small (given the support for inflated fast-path locking in the fast_lock, etc)
       
   325   // we'll leave that optimization for another time.
       
   326 
       
   327   if (displaced_header()->is_neutral()) {
       
   328     ObjectSynchronizer::inflate_helper(obj);
       
   329     // WARNING: We can not put check here, because the inflation
       
   330     // will not update the displaced header. Once BasicLock is inflated,
       
   331     // no one should ever look at its content.
       
   332   } else {
       
   333     // Typically the displaced header will be 0 (recursive stack lock) or
       
   334     // unused_mark.  Naively we'd like to assert that the displaced mark
       
   335     // value is either 0, neutral, or 3.  But with the advent of the
       
   336     // store-before-CAS avoidance in fast_lock/compiler_lock_object
       
   337     // we can find any flavor mark in the displaced mark.
       
   338   }
       
   339 // [RGV] The next line appears to do nothing!
       
   340   intptr_t dh = (intptr_t) displaced_header();
       
   341   dest->set_displaced_header(displaced_header());
       
   342 }
       
   343 
       
   344 // -----------------------------------------------------------------------------
       
   345 
       
   346 // standard constructor, allows locking failures
       
   347 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
       
   348   _dolock = doLock;
       
   349   _thread = thread;
       
   350   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
       
   351   _obj = obj;
       
   352 
       
   353   if (_dolock) {
       
   354     TEVENT (ObjectLocker) ;
       
   355 
       
   356     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
       
   357   }
       
   358 }
       
   359 
       
   360 ObjectLocker::~ObjectLocker() {
       
   361   if (_dolock) {
       
   362     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
       
   363   }
       
   364 }
       
   365 
       
   366 // -----------------------------------------------------------------------------
       
   367 
       
   368 
       
   369 PerfCounter * ObjectSynchronizer::_sync_Inflations                  = NULL ;
       
   370 PerfCounter * ObjectSynchronizer::_sync_Deflations                  = NULL ;
       
   371 PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts       = NULL ;
       
   372 PerfCounter * ObjectSynchronizer::_sync_FutileWakeups               = NULL ;
       
   373 PerfCounter * ObjectSynchronizer::_sync_Parks                       = NULL ;
       
   374 PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications          = NULL ;
       
   375 PerfCounter * ObjectSynchronizer::_sync_Notifications               = NULL ;
       
   376 PerfCounter * ObjectSynchronizer::_sync_PrivateA                    = NULL ;
       
   377 PerfCounter * ObjectSynchronizer::_sync_PrivateB                    = NULL ;
       
   378 PerfCounter * ObjectSynchronizer::_sync_SlowExit                    = NULL ;
       
   379 PerfCounter * ObjectSynchronizer::_sync_SlowEnter                   = NULL ;
       
   380 PerfCounter * ObjectSynchronizer::_sync_SlowNotify                  = NULL ;
       
   381 PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll               = NULL ;
       
   382 PerfCounter * ObjectSynchronizer::_sync_FailedSpins                 = NULL ;
       
   383 PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins             = NULL ;
       
   384 PerfCounter * ObjectSynchronizer::_sync_MonInCirculation            = NULL ;
       
   385 PerfCounter * ObjectSynchronizer::_sync_MonScavenged                = NULL ;
       
   386 PerfLongVariable * ObjectSynchronizer::_sync_MonExtant              = NULL ;
       
   387 
       
   388 // One-shot global initialization for the sync subsystem.
       
   389 // We could also defer initialization and initialize on-demand
       
   390 // the first time we call inflate().  Initialization would
       
   391 // be protected - like so many things - by the MonitorCache_lock.
       
   392 
       
   393 void ObjectSynchronizer::Initialize () {
       
   394   static int InitializationCompleted = 0 ;
       
   395   assert (InitializationCompleted == 0, "invariant") ;
       
   396   InitializationCompleted = 1 ;
       
   397   if (UsePerfData) {
       
   398       EXCEPTION_MARK ;
       
   399       #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
       
   400       #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
       
   401       NEWPERFCOUNTER(_sync_Inflations) ;
       
   402       NEWPERFCOUNTER(_sync_Deflations) ;
       
   403       NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
       
   404       NEWPERFCOUNTER(_sync_FutileWakeups) ;
       
   405       NEWPERFCOUNTER(_sync_Parks) ;
       
   406       NEWPERFCOUNTER(_sync_EmptyNotifications) ;
       
   407       NEWPERFCOUNTER(_sync_Notifications) ;
       
   408       NEWPERFCOUNTER(_sync_SlowEnter) ;
       
   409       NEWPERFCOUNTER(_sync_SlowExit) ;
       
   410       NEWPERFCOUNTER(_sync_SlowNotify) ;
       
   411       NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
       
   412       NEWPERFCOUNTER(_sync_FailedSpins) ;
       
   413       NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
       
   414       NEWPERFCOUNTER(_sync_PrivateA) ;
       
   415       NEWPERFCOUNTER(_sync_PrivateB) ;
       
   416       NEWPERFCOUNTER(_sync_MonInCirculation) ;
       
   417       NEWPERFCOUNTER(_sync_MonScavenged) ;
       
   418       NEWPERFVARIABLE(_sync_MonExtant) ;
       
   419       #undef NEWPERFCOUNTER
       
   420   }
       
   421 }
       
   422 
       
   423 // Compile-time asserts
       
   424 // When possible, it's better to catch errors deterministically at
       
   425 // compile-time than at runtime.  The down-side to using compile-time
       
   426 // asserts is that error message -- often something about negative array
       
   427 // indices -- is opaque.
       
   428 
       
   429 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
       
   430 
       
   431 void ObjectMonitor::ctAsserts() {
       
   432   CTASSERT(offset_of (ObjectMonitor, _header) == 0);
       
   433 }
       
   434 
       
   435 static int Adjust (volatile int * adr, int dx) {
       
   436   int v ;
       
   437   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
       
   438   return v ;
       
   439 }
       
   440 
       
   441 // Ad-hoc mutual exclusion primitives: SpinLock and Mux
       
   442 //
       
   443 // We employ SpinLocks _only for low-contention, fixed-length
       
   444 // short-duration critical sections where we're concerned
       
   445 // about native mutex_t or HotSpot Mutex:: latency.
       
   446 // The mux construct provides a spin-then-block mutual exclusion
       
   447 // mechanism.
       
   448 //
       
   449 // Testing has shown that contention on the ListLock guarding gFreeList
       
   450 // is common.  If we implement ListLock as a simple SpinLock it's common
       
   451 // for the JVM to devolve to yielding with little progress.  This is true
       
   452 // despite the fact that the critical sections protected by ListLock are
       
   453 // extremely short.
       
   454 //
       
   455 // TODO-FIXME: ListLock should be of type SpinLock.
       
   456 // We should make this a 1st-class type, integrated into the lock
       
   457 // hierarchy as leaf-locks.  Critically, the SpinLock structure
       
   458 // should have sufficient padding to avoid false-sharing and excessive
       
   459 // cache-coherency traffic.
       
   460 
       
   461 
       
   462 typedef volatile int SpinLockT ;
       
   463 
       
   464 void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
       
   465   if (Atomic::cmpxchg (1, adr, 0) == 0) {
       
   466      return ;   // normal fast-path return
       
   467   }
       
   468 
       
   469   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
       
   470   TEVENT (SpinAcquire - ctx) ;
       
   471   int ctr = 0 ;
       
   472   int Yields = 0 ;
       
   473   for (;;) {
       
   474      while (*adr != 0) {
       
   475         ++ctr ;
       
   476         if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
       
   477            if (Yields > 5) {
       
   478              // Consider using a simple NakedSleep() instead.
       
   479              // Then SpinAcquire could be called by non-JVM threads
       
   480              Thread::current()->_ParkEvent->park(1) ;
       
   481            } else {
       
   482              os::NakedYield() ;
       
   483              ++Yields ;
       
   484            }
       
   485         } else {
       
   486            SpinPause() ;
       
   487         }
       
   488      }
       
   489      if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
       
   490   }
       
   491 }
       
   492 
       
   493 void Thread::SpinRelease (volatile int * adr) {
       
   494   assert (*adr != 0, "invariant") ;
       
   495   OrderAccess::fence() ;      // guarantee at least release consistency.
       
   496   // Roach-motel semantics.
       
   497   // It's safe if subsequent LDs and STs float "up" into the critical section,
       
   498   // but prior LDs and STs within the critical section can't be allowed
       
   499   // to reorder or float past the ST that releases the lock.
       
   500   *adr = 0 ;
       
   501 }
       
   502 
       
   503 // muxAcquire and muxRelease:
       
   504 //
       
   505 // *  muxAcquire and muxRelease support a single-word lock-word construct.
       
   506 //    The LSB of the word is set IFF the lock is held.
       
   507 //    The remainder of the word points to the head of a singly-linked list
       
   508 //    of threads blocked on the lock.
       
   509 //
       
   510 // *  The current implementation of muxAcquire-muxRelease uses its own
       
   511 //    dedicated Thread._MuxEvent instance.  If we're interested in
       
   512 //    minimizing the peak number of extant ParkEvent instances then
       
   513 //    we could eliminate _MuxEvent and "borrow" _ParkEvent as long
       
   514 //    as certain invariants were satisfied.  Specifically, care would need
       
   515 //    to be taken with regards to consuming unpark() "permits".
       
   516 //    A safe rule of thumb is that a thread would never call muxAcquire()
       
   517 //    if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
       
   518 //    park().  Otherwise the _ParkEvent park() operation in muxAcquire() could
       
   519 //    consume an unpark() permit intended for monitorenter, for instance.
       
   520 //    One way around this would be to widen the restricted-range semaphore
       
   521 //    implemented in park().  Another alternative would be to provide
       
   522 //    multiple instances of the PlatformEvent() for each thread.  One
       
   523 //    instance would be dedicated to muxAcquire-muxRelease, for instance.
       
   524 //
       
   525 // *  Usage:
       
   526 //    -- Only as leaf locks
       
   527 //    -- for short-term locking only as muxAcquire does not perform
       
   528 //       thread state transitions.
       
   529 //
       
   530 // Alternatives:
       
   531 // *  We could implement muxAcquire and muxRelease with MCS or CLH locks
       
   532 //    but with parking or spin-then-park instead of pure spinning.
       
   533 // *  Use Taura-Oyama-Yonenzawa locks.
       
   534 // *  It's possible to construct a 1-0 lock if we encode the lockword as
       
   535 //    (List,LockByte).  Acquire will CAS the full lockword while Release
       
   536 //    will STB 0 into the LockByte.  The 1-0 scheme admits stranding, so
       
   537 //    acquiring threads use timers (ParkTimed) to detect and recover from
       
   538 //    the stranding window.  Thread/Node structures must be aligned on 256-byte
       
   539 //    boundaries by using placement-new.
       
   540 // *  Augment MCS with advisory back-link fields maintained with CAS().
       
   541 //    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
       
   542 //    The validity of the backlinks must be ratified before we trust the value.
       
   543 //    If the backlinks are invalid the exiting thread must back-track through the
       
   544 //    the forward links, which are always trustworthy.
       
   545 // *  Add a successor indication.  The LockWord is currently encoded as
       
   546 //    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
       
   547 //    to provide the usual futile-wakeup optimization.
       
   548 //    See RTStt for details.
       
   549 // *  Consider schedctl.sc_nopreempt to cover the critical section.
       
   550 //
       
   551 
       
   552 
       
   553 typedef volatile intptr_t MutexT ;      // Mux Lock-word
       
   554 enum MuxBits { LOCKBIT = 1 } ;
       
   555 
       
   556 void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
       
   557   intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
       
   558   if (w == 0) return ;
       
   559   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
       
   560      return ;
       
   561   }
       
   562 
       
   563   TEVENT (muxAcquire - Contention) ;
       
   564   ParkEvent * const Self = Thread::current()->_MuxEvent ;
       
   565   assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
       
   566   for (;;) {
       
   567      int its = (os::is_MP() ? 100 : 0) + 1 ;
       
   568 
       
   569      // Optional spin phase: spin-then-park strategy
       
   570      while (--its >= 0) {
       
   571        w = *Lock ;
       
   572        if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
       
   573           return ;
       
   574        }
       
   575      }
       
   576 
       
   577      Self->reset() ;
       
   578      Self->OnList = intptr_t(Lock) ;
       
   579      // The following fence() isn't _strictly necessary as the subsequent
       
   580      // CAS() both serializes execution and ratifies the fetched *Lock value.
       
   581      OrderAccess::fence();
       
   582      for (;;) {
       
   583         w = *Lock ;
       
   584         if ((w & LOCKBIT) == 0) {
       
   585             if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
       
   586                 Self->OnList = 0 ;   // hygiene - allows stronger asserts
       
   587                 return ;
       
   588             }
       
   589             continue ;      // Interference -- *Lock changed -- Just retry
       
   590         }
       
   591         assert (w & LOCKBIT, "invariant") ;
       
   592         Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
       
   593         if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
       
   594      }
       
   595 
       
   596      while (Self->OnList != 0) {
       
   597         Self->park() ;
       
   598      }
       
   599   }
       
   600 }
       
   601 
       
   602 void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
       
   603   intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
       
   604   if (w == 0) return ;
       
   605   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
       
   606     return ;
       
   607   }
       
   608 
       
   609   TEVENT (muxAcquire - Contention) ;
       
   610   ParkEvent * ReleaseAfter = NULL ;
       
   611   if (ev == NULL) {
       
   612     ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
       
   613   }
       
   614   assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
       
   615   for (;;) {
       
   616     guarantee (ev->OnList == 0, "invariant") ;
       
   617     int its = (os::is_MP() ? 100 : 0) + 1 ;
       
   618 
       
   619     // Optional spin phase: spin-then-park strategy
       
   620     while (--its >= 0) {
       
   621       w = *Lock ;
       
   622       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
       
   623         if (ReleaseAfter != NULL) {
       
   624           ParkEvent::Release (ReleaseAfter) ;
       
   625         }
       
   626         return ;
       
   627       }
       
   628     }
       
   629 
       
   630     ev->reset() ;
       
   631     ev->OnList = intptr_t(Lock) ;
       
   632     // The following fence() isn't _strictly necessary as the subsequent
       
   633     // CAS() both serializes execution and ratifies the fetched *Lock value.
       
   634     OrderAccess::fence();
       
   635     for (;;) {
       
   636       w = *Lock ;
       
   637       if ((w & LOCKBIT) == 0) {
       
   638         if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
       
   639           ev->OnList = 0 ;
       
   640           // We call ::Release while holding the outer lock, thus
       
   641           // artificially lengthening the critical section.
       
   642           // Consider deferring the ::Release() until the subsequent unlock(),
       
   643           // after we've dropped the outer lock.
       
   644           if (ReleaseAfter != NULL) {
       
   645             ParkEvent::Release (ReleaseAfter) ;
       
   646           }
       
   647           return ;
       
   648         }
       
   649         continue ;      // Interference -- *Lock changed -- Just retry
       
   650       }
       
   651       assert (w & LOCKBIT, "invariant") ;
       
   652       ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
       
   653       if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
       
   654     }
       
   655 
       
   656     while (ev->OnList != 0) {
       
   657       ev->park() ;
       
   658     }
       
   659   }
       
   660 }
       
   661 
       
   662 // Release() must extract a successor from the list and then wake that thread.
       
   663 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
       
   664 // similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based
       
   665 // Release() would :
       
   666 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
       
   667 // (B) Extract a successor from the private list "in-hand"
       
   668 // (C) attempt to CAS() the residual back into *Lock over null.
       
   669 //     If there were any newly arrived threads and the CAS() would fail.
       
   670 //     In that case Release() would detach the RATs, re-merge the list in-hand
       
   671 //     with the RATs and repeat as needed.  Alternately, Release() might
       
   672 //     detach and extract a successor, but then pass the residual list to the wakee.
       
   673 //     The wakee would be responsible for reattaching and remerging before it
       
   674 //     competed for the lock.
       
   675 //
       
   676 // Both "pop" and DMR are immune from ABA corruption -- there can be
       
   677 // multiple concurrent pushers, but only one popper or detacher.
       
   678 // This implementation pops from the head of the list.  This is unfair,
       
   679 // but tends to provide excellent throughput as hot threads remain hot.
       
   680 // (We wake recently run threads first).
       
   681 
       
   682 void Thread::muxRelease (volatile intptr_t * Lock)  {
       
   683   for (;;) {
       
   684     const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
       
   685     assert (w & LOCKBIT, "invariant") ;
       
   686     if (w == LOCKBIT) return ;
       
   687     ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
       
   688     assert (List != NULL, "invariant") ;
       
   689     assert (List->OnList == intptr_t(Lock), "invariant") ;
       
   690     ParkEvent * nxt = List->ListNext ;
       
   691 
       
   692     // The following CAS() releases the lock and pops the head element.
       
   693     if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
       
   694       continue ;
       
   695     }
       
   696     List->OnList = 0 ;
       
   697     OrderAccess::fence() ;
       
   698     List->unpark () ;
       
   699     return ;
       
   700   }
       
   701 }
       
   702 
       
   703 // ObjectMonitor Lifecycle
       
   704 // -----------------------
       
   705 // Inflation unlinks monitors from the global gFreeList and
       
   706 // associates them with objects.  Deflation -- which occurs at
       
   707 // STW-time -- disassociates idle monitors from objects.  Such
       
   708 // scavenged monitors are returned to the gFreeList.
       
   709 //
       
   710 // The global list is protected by ListLock.  All the critical sections
       
   711 // are short and operate in constant-time.
       
   712 //
       
   713 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
       
   714 //
       
   715 // Lifecycle:
       
   716 // --   unassigned and on the global free list
       
   717 // --   unassigned and on a thread's private omFreeList
       
   718 // --   assigned to an object.  The object is inflated and the mark refers
       
   719 //      to the objectmonitor.
       
   720 //
       
   721 // TODO-FIXME:
       
   722 //
       
   723 // *  We currently protect the gFreeList with a simple lock.
       
   724 //    An alternate lock-free scheme would be to pop elements from the gFreeList
       
   725 //    with CAS.  This would be safe from ABA corruption as long we only
       
   726 //    recycled previously appearing elements onto the list in deflate_idle_monitors()
       
   727 //    at STW-time.  Completely new elements could always be pushed onto the gFreeList
       
   728 //    with CAS.  Elements that appeared previously on the list could only
       
   729 //    be installed at STW-time.
       
   730 //
       
   731 // *  For efficiency and to help reduce the store-before-CAS penalty
       
   732 //    the objectmonitors on gFreeList or local free lists should be ready to install
       
   733 //    with the exception of _header and _object.  _object can be set after inflation.
       
   734 //    In particular, keep all objectMonitors on a thread's private list in ready-to-install
       
   735 //    state with m.Owner set properly.
       
   736 //
       
   737 // *  We could all diffuse contention by using multiple global (FreeList, Lock)
       
   738 //    pairs -- threads could use trylock() and a cyclic-scan strategy to search for
       
   739 //    an unlocked free list.
       
   740 //
       
   741 // *  Add lifecycle tags and assert()s.
       
   742 //
       
   743 // *  Be more consistent about when we clear an objectmonitor's fields:
       
   744 //    A.  After extracting the objectmonitor from a free list.
       
   745 //    B.  After adding an objectmonitor to a free list.
       
   746 //
       
   747 
       
   748 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
       
   749 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
       
   750 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
       
   751 int ObjectSynchronizer::gOmInUseCount = 0;
       
   752 static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
       
   753 static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
       
   754 static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
       
   755 #define CHAINMARKER ((oop)-1)
       
   756 
       
   757 // Constraining monitor pool growth via MonitorBound ...
       
   758 //
       
   759 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
       
   760 // the rate of scavenging is driven primarily by GC.  As such,  we can find
       
   761 // an inordinate number of monitors in circulation.
       
   762 // To avoid that scenario we can artificially induce a STW safepoint
       
   763 // if the pool appears to be growing past some reasonable bound.
       
   764 // Generally we favor time in space-time tradeoffs, but as there's no
       
   765 // natural back-pressure on the # of extant monitors we need to impose some
       
   766 // type of limit.  Beware that if MonitorBound is set to too low a value
       
   767 // we could just loop. In addition, if MonitorBound is set to a low value
       
   768 // we'll incur more safepoints, which are harmful to performance.
       
   769 // See also: GuaranteedSafepointInterval
       
   770 //
       
   771 // As noted elsewhere, the correct long-term solution is to deflate at
       
   772 // monitorexit-time, in which case the number of inflated objects is bounded
       
   773 // by the number of threads.  That policy obviates the need for scavenging at
       
   774 // STW safepoint time.   As an aside, scavenging can be time-consuming when the
       
   775 // # of extant monitors is large.   Unfortunately there's a day-1 assumption baked
       
   776 // into much HotSpot code that the object::monitor relationship, once established
       
   777 // or observed, will remain stable except over potential safepoints.
       
   778 //
       
   779 // We can use either a blocking synchronous VM operation or an async VM operation.
       
   780 // -- If we use a blocking VM operation :
       
   781 //    Calls to ScavengeCheck() should be inserted only into 'safe' locations in paths
       
   782 //    that lead to ::inflate() or ::omAlloc().
       
   783 //    Even though the safepoint will not directly induce GC, a GC might
       
   784 //    piggyback on the safepoint operation, so the caller should hold no naked oops.
       
   785 //    Furthermore, monitor::object relationships are NOT necessarily stable over this call
       
   786 //    unless the caller has made provisions to "pin" the object to the monitor, say
       
   787 //    by incrementing the monitor's _count field.
       
   788 // -- If we use a non-blocking asynchronous VM operation :
       
   789 //    the constraints above don't apply.  The safepoint will fire in the future
       
   790 //    at a more convenient time.  On the other hand the latency between posting and
       
   791 //    running the safepoint introduces or admits "slop" or laxity during which the
       
   792 //    monitor population can climb further above the threshold.  The monitor population,
       
   793 //    however, tends to converge asymptotically over time to a count that's slightly
       
   794 //    above the target value specified by MonitorBound.   That is, we avoid unbounded
       
   795 //    growth, albeit with some imprecision.
       
   796 //
       
   797 // The current implementation uses asynchronous VM operations.
       
   798 //
       
   799 // Ideally we'd check if (MonitorPopulation > MonitorBound) in omAlloc()
       
   800 // immediately before trying to grow the global list via allocation.
       
   801 // If the predicate was true then we'd induce a synchronous safepoint, wait
       
   802 // for the safepoint to complete, and then again to allocate from the global
       
   803 // free list.  This approach is much simpler and precise, admitting no "slop".
       
   804 // Unfortunately we can't safely safepoint in the midst of omAlloc(), so
       
   805 // instead we use asynchronous safepoints.
       
   806 
       
   807 static void InduceScavenge (Thread * Self, const char * Whence) {
       
   808   // Induce STW safepoint to trim monitors
       
   809   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
       
   810   // More precisely, trigger an asynchronous STW safepoint as the number
       
   811   // of active monitors passes the specified threshold.
       
   812   // TODO: assert thread state is reasonable
       
   813 
       
   814   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
       
   815     if (Knob_Verbose) {
       
   816       ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
       
   817       ::fflush(stdout) ;
       
   818     }
       
   819     // Induce a 'null' safepoint to scavenge monitors
       
   820     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
       
   821     // to the VMthread and have a lifespan longer than that of this activation record.
       
   822     // The VMThread will delete the op when completed.
       
   823     VMThread::execute (new VM_ForceAsyncSafepoint()) ;
       
   824 
       
   825     if (Knob_Verbose) {
       
   826       ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
       
   827       ::fflush(stdout) ;
       
   828     }
       
   829   }
       
   830 }
       
   831 /* Too slow for general assert or debug
       
   832 void ObjectSynchronizer::verifyInUse (Thread *Self) {
       
   833    ObjectMonitor* mid;
       
   834    int inusetally = 0;
       
   835    for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
       
   836      inusetally ++;
       
   837    }
       
   838    assert(inusetally == Self->omInUseCount, "inuse count off");
       
   839 
       
   840    int freetally = 0;
       
   841    for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
       
   842      freetally ++;
       
   843    }
       
   844    assert(freetally == Self->omFreeCount, "free count off");
       
   845 }
       
   846 */
       
   847 
       
   848 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
       
   849     // A large MAXPRIVATE value reduces both list lock contention
       
   850     // and list coherency traffic, but also tends to increase the
       
   851     // number of objectMonitors in circulation as well as the STW
       
   852     // scavenge costs.  As usual, we lean toward time in space-time
       
   853     // tradeoffs.
       
   854     const int MAXPRIVATE = 1024 ;
       
   855     for (;;) {
       
   856         ObjectMonitor * m ;
       
   857 
       
   858         // 1: try to allocate from the thread's local omFreeList.
       
   859         // Threads will attempt to allocate first from their local list, then
       
   860         // from the global list, and only after those attempts fail will the thread
       
   861         // attempt to instantiate new monitors.   Thread-local free lists take
       
   862         // heat off the ListLock and improve allocation latency, as well as reducing
       
   863         // coherency traffic on the shared global list.
       
   864         m = Self->omFreeList ;
       
   865         if (m != NULL) {
       
   866            Self->omFreeList = m->FreeNext ;
       
   867            Self->omFreeCount -- ;
       
   868            // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
       
   869            guarantee (m->object() == NULL, "invariant") ;
       
   870            if (MonitorInUseLists) {
       
   871              m->FreeNext = Self->omInUseList;
       
   872              Self->omInUseList = m;
       
   873              Self->omInUseCount ++;
       
   874              // verifyInUse(Self);
       
   875            } else {
       
   876              m->FreeNext = NULL;
       
   877            }
       
   878            return m ;
       
   879         }
       
   880 
       
   881         // 2: try to allocate from the global gFreeList
       
   882         // CONSIDER: use muxTry() instead of muxAcquire().
       
   883         // If the muxTry() fails then drop immediately into case 3.
       
   884         // If we're using thread-local free lists then try
       
   885         // to reprovision the caller's free list.
       
   886         if (gFreeList != NULL) {
       
   887             // Reprovision the thread's omFreeList.
       
   888             // Use bulk transfers to reduce the allocation rate and heat
       
   889             // on various locks.
       
   890             Thread::muxAcquire (&ListLock, "omAlloc") ;
       
   891             for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
       
   892                 MonitorFreeCount --;
       
   893                 ObjectMonitor * take = gFreeList ;
       
   894                 gFreeList = take->FreeNext ;
       
   895                 guarantee (take->object() == NULL, "invariant") ;
       
   896                 guarantee (!take->is_busy(), "invariant") ;
       
   897                 take->Recycle() ;
       
   898                 omRelease (Self, take, false) ;
       
   899             }
       
   900             Thread::muxRelease (&ListLock) ;
       
   901             Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
       
   902             if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
       
   903             TEVENT (omFirst - reprovision) ;
       
   904 
       
   905             const int mx = MonitorBound ;
       
   906             if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
       
   907               // We can't safely induce a STW safepoint from omAlloc() as our thread
       
   908               // state may not be appropriate for such activities and callers may hold
       
   909               // naked oops, so instead we defer the action.
       
   910               InduceScavenge (Self, "omAlloc") ;
       
   911             }
       
   912             continue;
       
   913         }
       
   914 
       
   915         // 3: allocate a block of new ObjectMonitors
       
   916         // Both the local and global free lists are empty -- resort to malloc().
       
   917         // In the current implementation objectMonitors are TSM - immortal.
       
   918         assert (_BLOCKSIZE > 1, "invariant") ;
       
   919         ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
       
   920 
       
   921         // NOTE: (almost) no way to recover if allocation failed.
       
   922         // We might be able to induce a STW safepoint and scavenge enough
       
   923         // objectMonitors to permit progress.
       
   924         if (temp == NULL) {
       
   925             vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
       
   926         }
       
   927 
       
   928         // Format the block.
       
   929         // initialize the linked list, each monitor points to its next
       
   930         // forming the single linked free list, the very first monitor
       
   931         // will points to next block, which forms the block list.
       
   932         // The trick of using the 1st element in the block as gBlockList
       
   933         // linkage should be reconsidered.  A better implementation would
       
   934         // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
       
   935 
       
   936         for (int i = 1; i < _BLOCKSIZE ; i++) {
       
   937            temp[i].FreeNext = &temp[i+1];
       
   938         }
       
   939 
       
   940         // terminate the last monitor as the end of list
       
   941         temp[_BLOCKSIZE - 1].FreeNext = NULL ;
       
   942 
       
   943         // Element [0] is reserved for global list linkage
       
   944         temp[0].set_object(CHAINMARKER);
       
   945 
       
   946         // Consider carving out this thread's current request from the
       
   947         // block in hand.  This avoids some lock traffic and redundant
       
   948         // list activity.
       
   949 
       
   950         // Acquire the ListLock to manipulate BlockList and FreeList.
       
   951         // An Oyama-Taura-Yonezawa scheme might be more efficient.
       
   952         Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
       
   953         MonitorPopulation += _BLOCKSIZE-1;
       
   954         MonitorFreeCount += _BLOCKSIZE-1;
       
   955 
       
   956         // Add the new block to the list of extant blocks (gBlockList).
       
   957         // The very first objectMonitor in a block is reserved and dedicated.
       
   958         // It serves as blocklist "next" linkage.
       
   959         temp[0].FreeNext = gBlockList;
       
   960         gBlockList = temp;
       
   961 
       
   962         // Add the new string of objectMonitors to the global free list
       
   963         temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
       
   964         gFreeList = temp + 1;
       
   965         Thread::muxRelease (&ListLock) ;
       
   966         TEVENT (Allocate block of monitors) ;
       
   967     }
       
   968 }
       
   969 
       
   970 // Place "m" on the caller's private per-thread omFreeList.
       
   971 // In practice there's no need to clamp or limit the number of
       
   972 // monitors on a thread's omFreeList as the only time we'll call
       
   973 // omRelease is to return a monitor to the free list after a CAS
       
   974 // attempt failed.  This doesn't allow unbounded #s of monitors to
       
   975 // accumulate on a thread's free list.
       
   976 //
       
   977 // In the future the usage of omRelease() might change and monitors
       
   978 // could migrate between free lists.  In that case to avoid excessive
       
   979 // accumulation we could  limit omCount to (omProvision*2), otherwise return
       
   980 // the objectMonitor to the global list.  We should drain (return) in reasonable chunks.
       
   981 // That is, *not* one-at-a-time.
       
   982 
       
   983 
       
   984 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
       
   985     guarantee (m->object() == NULL, "invariant") ;
       
   986 
       
   987     // Remove from omInUseList
       
   988     if (MonitorInUseLists && fromPerThreadAlloc) {
       
   989       ObjectMonitor* curmidinuse = NULL;
       
   990       for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
       
   991        if (m == mid) {
       
   992          // extract from per-thread in-use-list
       
   993          if (mid == Self->omInUseList) {
       
   994            Self->omInUseList = mid->FreeNext;
       
   995          } else if (curmidinuse != NULL) {
       
   996            curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
       
   997          }
       
   998          Self->omInUseCount --;
       
   999          // verifyInUse(Self);
       
  1000          break;
       
  1001        } else {
       
  1002          curmidinuse = mid;
       
  1003          mid = mid->FreeNext;
       
  1004       }
       
  1005     }
       
  1006   }
       
  1007 
       
  1008   // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
       
  1009   m->FreeNext = Self->omFreeList ;
       
  1010   Self->omFreeList = m ;
       
  1011   Self->omFreeCount ++ ;
       
  1012 }
       
  1013 
       
  1014 // Return the monitors of a moribund thread's local free list to
       
  1015 // the global free list.  Typically a thread calls omFlush() when
       
  1016 // it's dying.  We could also consider having the VM thread steal
       
  1017 // monitors from threads that have not run java code over a few
       
  1018 // consecutive STW safepoints.  Relatedly, we might decay
       
  1019 // omFreeProvision at STW safepoints.
       
  1020 //
       
  1021 // Also return the monitors of a moribund thread"s omInUseList to
       
  1022 // a global gOmInUseList under the global list lock so these
       
  1023 // will continue to be scanned.
       
  1024 //
       
  1025 // We currently call omFlush() from the Thread:: dtor _after the thread
       
  1026 // has been excised from the thread list and is no longer a mutator.
       
  1027 // That means that omFlush() can run concurrently with a safepoint and
       
  1028 // the scavenge operator.  Calling omFlush() from JavaThread::exit() might
       
  1029 // be a better choice as we could safely reason that that the JVM is
       
  1030 // not at a safepoint at the time of the call, and thus there could
       
  1031 // be not inopportune interleavings between omFlush() and the scavenge
       
  1032 // operator.
       
  1033 
       
  1034 void ObjectSynchronizer::omFlush (Thread * Self) {
       
  1035     ObjectMonitor * List = Self->omFreeList ;  // Null-terminated SLL
       
  1036     Self->omFreeList = NULL ;
       
  1037     ObjectMonitor * Tail = NULL ;
       
  1038     int Tally = 0;
       
  1039     if (List != NULL) {
       
  1040       ObjectMonitor * s ;
       
  1041       for (s = List ; s != NULL ; s = s->FreeNext) {
       
  1042           Tally ++ ;
       
  1043           Tail = s ;
       
  1044           guarantee (s->object() == NULL, "invariant") ;
       
  1045           guarantee (!s->is_busy(), "invariant") ;
       
  1046           s->set_owner (NULL) ;   // redundant but good hygiene
       
  1047           TEVENT (omFlush - Move one) ;
       
  1048       }
       
  1049       guarantee (Tail != NULL && List != NULL, "invariant") ;
       
  1050     }
       
  1051 
       
  1052     ObjectMonitor * InUseList = Self->omInUseList;
       
  1053     ObjectMonitor * InUseTail = NULL ;
       
  1054     int InUseTally = 0;
       
  1055     if (InUseList != NULL) {
       
  1056       Self->omInUseList = NULL;
       
  1057       ObjectMonitor *curom;
       
  1058       for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
       
  1059         InUseTail = curom;
       
  1060         InUseTally++;
       
  1061       }
       
  1062 // TODO debug
       
  1063       assert(Self->omInUseCount == InUseTally, "inuse count off");
       
  1064       Self->omInUseCount = 0;
       
  1065       guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
       
  1066     }
       
  1067 
       
  1068     Thread::muxAcquire (&ListLock, "omFlush") ;
       
  1069     if (Tail != NULL) {
       
  1070       Tail->FreeNext = gFreeList ;
       
  1071       gFreeList = List ;
       
  1072       MonitorFreeCount += Tally;
       
  1073     }
       
  1074 
       
  1075     if (InUseTail != NULL) {
       
  1076       InUseTail->FreeNext = gOmInUseList;
       
  1077       gOmInUseList = InUseList;
       
  1078       gOmInUseCount += InUseTally;
       
  1079     }
       
  1080 
       
  1081     Thread::muxRelease (&ListLock) ;
       
  1082     TEVENT (omFlush) ;
       
  1083 }
       
  1084 
       
  1085 
       
  1086 // Get the next block in the block list.
       
  1087 static inline ObjectMonitor* next(ObjectMonitor* block) {
       
  1088   assert(block->object() == CHAINMARKER, "must be a block header");
       
  1089   block = block->FreeNext ;
       
  1090   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
       
  1091   return block;
       
  1092 }
       
  1093 
       
  1094 // Fast path code shared by multiple functions
       
  1095 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
       
  1096   markOop mark = obj->mark();
       
  1097   if (mark->has_monitor()) {
       
  1098     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
       
  1099     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
       
  1100     return mark->monitor();
       
  1101   }
       
  1102   return ObjectSynchronizer::inflate(Thread::current(), obj);
       
  1103 }
       
  1104 
       
  1105 // Note that we could encounter some performance loss through false-sharing as
       
  1106 // multiple locks occupy the same $ line.  Padding might be appropriate.
       
  1107 
       
  1108 #define NINFLATIONLOCKS 256
       
  1109 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
       
  1110 
   416 
  1111 static markOop ReadStableMark (oop obj) {
   417 static markOop ReadStableMark (oop obj) {
  1112   markOop mark = obj->mark() ;
   418   markOop mark = obj->mark() ;
  1113   if (!mark->is_being_inflated()) {
   419   if (!mark->is_being_inflated()) {
  1114     return mark ;       // normal fast-path return
   420     return mark ;       // normal fast-path return
  1174        SpinPause() ;       // SMP-polite spinning
   480        SpinPause() ;       // SMP-polite spinning
  1175     }
   481     }
  1176   }
   482   }
  1177 }
   483 }
  1178 
   484 
       
   485 // hashCode() generation :
       
   486 //
       
   487 // Possibilities:
       
   488 // * MD5Digest of {obj,stwRandom}
       
   489 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
       
   490 // * A DES- or AES-style SBox[] mechanism
       
   491 // * One of the Phi-based schemes, such as:
       
   492 //   2654435761 = 2^32 * Phi (golden ratio)
       
   493 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
       
   494 // * A variation of Marsaglia's shift-xor RNG scheme.
       
   495 // * (obj ^ stwRandom) is appealing, but can result
       
   496 //   in undesirable regularity in the hashCode values of adjacent objects
       
   497 //   (objects allocated back-to-back, in particular).  This could potentially
       
   498 //   result in hashtable collisions and reduced hashtable efficiency.
       
   499 //   There are simple ways to "diffuse" the middle address bits over the
       
   500 //   generated hashCode values:
       
   501 //
       
   502 
       
   503 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
       
   504   intptr_t value = 0 ;
       
   505   if (hashCode == 0) {
       
   506      // This form uses an unguarded global Park-Miller RNG,
       
   507      // so it's possible for two threads to race and generate the same RNG.
       
   508      // On MP system we'll have lots of RW access to a global, so the
       
   509      // mechanism induces lots of coherency traffic.
       
   510      value = os::random() ;
       
   511   } else
       
   512   if (hashCode == 1) {
       
   513      // This variation has the property of being stable (idempotent)
       
   514      // between STW operations.  This can be useful in some of the 1-0
       
   515      // synchronization schemes.
       
   516      intptr_t addrBits = intptr_t(obj) >> 3 ;
       
   517      value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
       
   518   } else
       
   519   if (hashCode == 2) {
       
   520      value = 1 ;            // for sensitivity testing
       
   521   } else
       
   522   if (hashCode == 3) {
       
   523      value = ++GVars.hcSequence ;
       
   524   } else
       
   525   if (hashCode == 4) {
       
   526      value = intptr_t(obj) ;
       
   527   } else {
       
   528      // Marsaglia's xor-shift scheme with thread-specific state
       
   529      // This is probably the best overall implementation -- we'll
       
   530      // likely make this the default in future releases.
       
   531      unsigned t = Self->_hashStateX ;
       
   532      t ^= (t << 11) ;
       
   533      Self->_hashStateX = Self->_hashStateY ;
       
   534      Self->_hashStateY = Self->_hashStateZ ;
       
   535      Self->_hashStateZ = Self->_hashStateW ;
       
   536      unsigned v = Self->_hashStateW ;
       
   537      v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
       
   538      Self->_hashStateW = v ;
       
   539      value = v ;
       
   540   }
       
   541 
       
   542   value &= markOopDesc::hash_mask;
       
   543   if (value == 0) value = 0xBAD ;
       
   544   assert (value != markOopDesc::no_hash, "invariant") ;
       
   545   TEVENT (hashCode: GENERATE) ;
       
   546   return value;
       
   547 }
       
   548 //
       
   549 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
       
   550   if (UseBiasedLocking) {
       
   551     // NOTE: many places throughout the JVM do not expect a safepoint
       
   552     // to be taken here, in particular most operations on perm gen
       
   553     // objects. However, we only ever bias Java instances and all of
       
   554     // the call sites of identity_hash that might revoke biases have
       
   555     // been checked to make sure they can handle a safepoint. The
       
   556     // added check of the bias pattern is to avoid useless calls to
       
   557     // thread-local storage.
       
   558     if (obj->mark()->has_bias_pattern()) {
       
   559       // Box and unbox the raw reference just in case we cause a STW safepoint.
       
   560       Handle hobj (Self, obj) ;
       
   561       // Relaxing assertion for bug 6320749.
       
   562       assert (Universe::verify_in_progress() ||
       
   563               !SafepointSynchronize::is_at_safepoint(),
       
   564              "biases should not be seen by VM thread here");
       
   565       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
       
   566       obj = hobj() ;
       
   567       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   568     }
       
   569   }
       
   570 
       
   571   // hashCode() is a heap mutator ...
       
   572   // Relaxing assertion for bug 6320749.
       
   573   assert (Universe::verify_in_progress() ||
       
   574           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
       
   575   assert (Universe::verify_in_progress() ||
       
   576           Self->is_Java_thread() , "invariant") ;
       
   577   assert (Universe::verify_in_progress() ||
       
   578          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
       
   579 
       
   580   ObjectMonitor* monitor = NULL;
       
   581   markOop temp, test;
       
   582   intptr_t hash;
       
   583   markOop mark = ReadStableMark (obj);
       
   584 
       
   585   // object should remain ineligible for biased locking
       
   586   assert (!mark->has_bias_pattern(), "invariant") ;
       
   587 
       
   588   if (mark->is_neutral()) {
       
   589     hash = mark->hash();              // this is a normal header
       
   590     if (hash) {                       // if it has hash, just return it
       
   591       return hash;
       
   592     }
       
   593     hash = get_next_hash(Self, obj);  // allocate a new hash code
       
   594     temp = mark->copy_set_hash(hash); // merge the hash code into header
       
   595     // use (machine word version) atomic operation to install the hash
       
   596     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
       
   597     if (test == mark) {
       
   598       return hash;
       
   599     }
       
   600     // If atomic operation failed, we must inflate the header
       
   601     // into heavy weight monitor. We could add more code here
       
   602     // for fast path, but it does not worth the complexity.
       
   603   } else if (mark->has_monitor()) {
       
   604     monitor = mark->monitor();
       
   605     temp = monitor->header();
       
   606     assert (temp->is_neutral(), "invariant") ;
       
   607     hash = temp->hash();
       
   608     if (hash) {
       
   609       return hash;
       
   610     }
       
   611     // Skip to the following code to reduce code size
       
   612   } else if (Self->is_lock_owned((address)mark->locker())) {
       
   613     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
       
   614     assert (temp->is_neutral(), "invariant") ;
       
   615     hash = temp->hash();              // by current thread, check if the displaced
       
   616     if (hash) {                       // header contains hash code
       
   617       return hash;
       
   618     }
       
   619     // WARNING:
       
   620     //   The displaced header is strictly immutable.
       
   621     // It can NOT be changed in ANY cases. So we have
       
   622     // to inflate the header into heavyweight monitor
       
   623     // even the current thread owns the lock. The reason
       
   624     // is the BasicLock (stack slot) will be asynchronously
       
   625     // read by other threads during the inflate() function.
       
   626     // Any change to stack may not propagate to other threads
       
   627     // correctly.
       
   628   }
       
   629 
       
   630   // Inflate the monitor to set hash code
       
   631   monitor = ObjectSynchronizer::inflate(Self, obj);
       
   632   // Load displaced header and check it has hash code
       
   633   mark = monitor->header();
       
   634   assert (mark->is_neutral(), "invariant") ;
       
   635   hash = mark->hash();
       
   636   if (hash == 0) {
       
   637     hash = get_next_hash(Self, obj);
       
   638     temp = mark->copy_set_hash(hash); // merge hash code into header
       
   639     assert (temp->is_neutral(), "invariant") ;
       
   640     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
       
   641     if (test != mark) {
       
   642       // The only update to the header in the monitor (outside GC)
       
   643       // is install the hash code. If someone add new usage of
       
   644       // displaced header, please update this code
       
   645       hash = test->hash();
       
   646       assert (test->is_neutral(), "invariant") ;
       
   647       assert (hash != 0, "Trivial unexpected object/monitor header usage.");
       
   648     }
       
   649   }
       
   650   // We finally get the hash
       
   651   return hash;
       
   652 }
       
   653 
       
   654 // Deprecated -- use FastHashCode() instead.
       
   655 
       
   656 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
       
   657   return FastHashCode (Thread::current(), obj()) ;
       
   658 }
       
   659 
       
   660 
       
   661 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
       
   662                                                    Handle h_obj) {
       
   663   if (UseBiasedLocking) {
       
   664     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
       
   665     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   666   }
       
   667 
       
   668   assert(thread == JavaThread::current(), "Can only be called on current thread");
       
   669   oop obj = h_obj();
       
   670 
       
   671   markOop mark = ReadStableMark (obj) ;
       
   672 
       
   673   // Uncontended case, header points to stack
       
   674   if (mark->has_locker()) {
       
   675     return thread->is_lock_owned((address)mark->locker());
       
   676   }
       
   677   // Contended case, header points to ObjectMonitor (tagged pointer)
       
   678   if (mark->has_monitor()) {
       
   679     ObjectMonitor* monitor = mark->monitor();
       
   680     return monitor->is_entered(thread) != 0 ;
       
   681   }
       
   682   // Unlocked case, header in place
       
   683   assert(mark->is_neutral(), "sanity check");
       
   684   return false;
       
   685 }
       
   686 
       
   687 // Be aware of this method could revoke bias of the lock object.
       
   688 // This method querys the ownership of the lock handle specified by 'h_obj'.
       
   689 // If the current thread owns the lock, it returns owner_self. If no
       
   690 // thread owns the lock, it returns owner_none. Otherwise, it will return
       
   691 // ower_other.
       
   692 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
       
   693 (JavaThread *self, Handle h_obj) {
       
   694   // The caller must beware this method can revoke bias, and
       
   695   // revocation can result in a safepoint.
       
   696   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
       
   697   assert (self->thread_state() != _thread_blocked , "invariant") ;
       
   698 
       
   699   // Possible mark states: neutral, biased, stack-locked, inflated
       
   700 
       
   701   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
       
   702     // CASE: biased
       
   703     BiasedLocking::revoke_and_rebias(h_obj, false, self);
       
   704     assert(!h_obj->mark()->has_bias_pattern(),
       
   705            "biases should be revoked by now");
       
   706   }
       
   707 
       
   708   assert(self == JavaThread::current(), "Can only be called on current thread");
       
   709   oop obj = h_obj();
       
   710   markOop mark = ReadStableMark (obj) ;
       
   711 
       
   712   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
       
   713   if (mark->has_locker()) {
       
   714     return self->is_lock_owned((address)mark->locker()) ?
       
   715       owner_self : owner_other;
       
   716   }
       
   717 
       
   718   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
       
   719   // The Object:ObjectMonitor relationship is stable as long as we're
       
   720   // not at a safepoint.
       
   721   if (mark->has_monitor()) {
       
   722     void * owner = mark->monitor()->_owner ;
       
   723     if (owner == NULL) return owner_none ;
       
   724     return (owner == self ||
       
   725             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
       
   726   }
       
   727 
       
   728   // CASE: neutral
       
   729   assert(mark->is_neutral(), "sanity check");
       
   730   return owner_none ;           // it's unlocked
       
   731 }
       
   732 
       
   733 // FIXME: jvmti should call this
       
   734 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
       
   735   if (UseBiasedLocking) {
       
   736     if (SafepointSynchronize::is_at_safepoint()) {
       
   737       BiasedLocking::revoke_at_safepoint(h_obj);
       
   738     } else {
       
   739       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
       
   740     }
       
   741     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
   742   }
       
   743 
       
   744   oop obj = h_obj();
       
   745   address owner = NULL;
       
   746 
       
   747   markOop mark = ReadStableMark (obj) ;
       
   748 
       
   749   // Uncontended case, header points to stack
       
   750   if (mark->has_locker()) {
       
   751     owner = (address) mark->locker();
       
   752   }
       
   753 
       
   754   // Contended case, header points to ObjectMonitor (tagged pointer)
       
   755   if (mark->has_monitor()) {
       
   756     ObjectMonitor* monitor = mark->monitor();
       
   757     assert(monitor != NULL, "monitor should be non-null");
       
   758     owner = (address) monitor->owner();
       
   759   }
       
   760 
       
   761   if (owner != NULL) {
       
   762     return Threads::owning_thread_from_monitor_owner(owner, doLock);
       
   763   }
       
   764 
       
   765   // Unlocked case, header in place
       
   766   // Cannot have assertion since this object may have been
       
   767   // locked by another thread when reaching here.
       
   768   // assert(mark->is_neutral(), "sanity check");
       
   769 
       
   770   return NULL;
       
   771 }
       
   772 // Visitors ...
       
   773 
       
   774 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
       
   775   ObjectMonitor* block = gBlockList;
       
   776   ObjectMonitor* mid;
       
   777   while (block) {
       
   778     assert(block->object() == CHAINMARKER, "must be a block header");
       
   779     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
       
   780       mid = block + i;
       
   781       oop object = (oop) mid->object();
       
   782       if (object != NULL) {
       
   783         closure->do_monitor(mid);
       
   784       }
       
   785     }
       
   786     block = (ObjectMonitor*) block->FreeNext;
       
   787   }
       
   788 }
       
   789 
       
   790 // Get the next block in the block list.
       
   791 static inline ObjectMonitor* next(ObjectMonitor* block) {
       
   792   assert(block->object() == CHAINMARKER, "must be a block header");
       
   793   block = block->FreeNext ;
       
   794   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
       
   795   return block;
       
   796 }
       
   797 
       
   798 
       
   799 void ObjectSynchronizer::oops_do(OopClosure* f) {
       
   800   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
       
   801   for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
       
   802     assert(block->object() == CHAINMARKER, "must be a block header");
       
   803     for (int i = 1; i < _BLOCKSIZE; i++) {
       
   804       ObjectMonitor* mid = &block[i];
       
   805       if (mid->object() != NULL) {
       
   806         f->do_oop((oop*)mid->object_addr());
       
   807       }
       
   808     }
       
   809   }
       
   810 }
       
   811 
       
   812 
       
   813 // -----------------------------------------------------------------------------
       
   814 // ObjectMonitor Lifecycle
       
   815 // -----------------------
       
   816 // Inflation unlinks monitors from the global gFreeList and
       
   817 // associates them with objects.  Deflation -- which occurs at
       
   818 // STW-time -- disassociates idle monitors from objects.  Such
       
   819 // scavenged monitors are returned to the gFreeList.
       
   820 //
       
   821 // The global list is protected by ListLock.  All the critical sections
       
   822 // are short and operate in constant-time.
       
   823 //
       
   824 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
       
   825 //
       
   826 // Lifecycle:
       
   827 // --   unassigned and on the global free list
       
   828 // --   unassigned and on a thread's private omFreeList
       
   829 // --   assigned to an object.  The object is inflated and the mark refers
       
   830 //      to the objectmonitor.
       
   831 //
       
   832 
       
   833 
       
   834 // Constraining monitor pool growth via MonitorBound ...
       
   835 //
       
   836 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
       
   837 // the rate of scavenging is driven primarily by GC.  As such,  we can find
       
   838 // an inordinate number of monitors in circulation.
       
   839 // To avoid that scenario we can artificially induce a STW safepoint
       
   840 // if the pool appears to be growing past some reasonable bound.
       
   841 // Generally we favor time in space-time tradeoffs, but as there's no
       
   842 // natural back-pressure on the # of extant monitors we need to impose some
       
   843 // type of limit.  Beware that if MonitorBound is set to too low a value
       
   844 // we could just loop. In addition, if MonitorBound is set to a low value
       
   845 // we'll incur more safepoints, which are harmful to performance.
       
   846 // See also: GuaranteedSafepointInterval
       
   847 //
       
   848 // The current implementation uses asynchronous VM operations.
       
   849 //
       
   850 
       
   851 static void InduceScavenge (Thread * Self, const char * Whence) {
       
   852   // Induce STW safepoint to trim monitors
       
   853   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
       
   854   // More precisely, trigger an asynchronous STW safepoint as the number
       
   855   // of active monitors passes the specified threshold.
       
   856   // TODO: assert thread state is reasonable
       
   857 
       
   858   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
       
   859     if (ObjectMonitor::Knob_Verbose) {
       
   860       ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
       
   861       ::fflush(stdout) ;
       
   862     }
       
   863     // Induce a 'null' safepoint to scavenge monitors
       
   864     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
       
   865     // to the VMthread and have a lifespan longer than that of this activation record.
       
   866     // The VMThread will delete the op when completed.
       
   867     VMThread::execute (new VM_ForceAsyncSafepoint()) ;
       
   868 
       
   869     if (ObjectMonitor::Knob_Verbose) {
       
   870       ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
       
   871       ::fflush(stdout) ;
       
   872     }
       
   873   }
       
   874 }
       
   875 /* Too slow for general assert or debug
       
   876 void ObjectSynchronizer::verifyInUse (Thread *Self) {
       
   877    ObjectMonitor* mid;
       
   878    int inusetally = 0;
       
   879    for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
       
   880      inusetally ++;
       
   881    }
       
   882    assert(inusetally == Self->omInUseCount, "inuse count off");
       
   883 
       
   884    int freetally = 0;
       
   885    for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
       
   886      freetally ++;
       
   887    }
       
   888    assert(freetally == Self->omFreeCount, "free count off");
       
   889 }
       
   890 */
       
   891 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
       
   892     // A large MAXPRIVATE value reduces both list lock contention
       
   893     // and list coherency traffic, but also tends to increase the
       
   894     // number of objectMonitors in circulation as well as the STW
       
   895     // scavenge costs.  As usual, we lean toward time in space-time
       
   896     // tradeoffs.
       
   897     const int MAXPRIVATE = 1024 ;
       
   898     for (;;) {
       
   899         ObjectMonitor * m ;
       
   900 
       
   901         // 1: try to allocate from the thread's local omFreeList.
       
   902         // Threads will attempt to allocate first from their local list, then
       
   903         // from the global list, and only after those attempts fail will the thread
       
   904         // attempt to instantiate new monitors.   Thread-local free lists take
       
   905         // heat off the ListLock and improve allocation latency, as well as reducing
       
   906         // coherency traffic on the shared global list.
       
   907         m = Self->omFreeList ;
       
   908         if (m != NULL) {
       
   909            Self->omFreeList = m->FreeNext ;
       
   910            Self->omFreeCount -- ;
       
   911            // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
       
   912            guarantee (m->object() == NULL, "invariant") ;
       
   913            if (MonitorInUseLists) {
       
   914              m->FreeNext = Self->omInUseList;
       
   915              Self->omInUseList = m;
       
   916              Self->omInUseCount ++;
       
   917              // verifyInUse(Self);
       
   918            } else {
       
   919              m->FreeNext = NULL;
       
   920            }
       
   921            return m ;
       
   922         }
       
   923 
       
   924         // 2: try to allocate from the global gFreeList
       
   925         // CONSIDER: use muxTry() instead of muxAcquire().
       
   926         // If the muxTry() fails then drop immediately into case 3.
       
   927         // If we're using thread-local free lists then try
       
   928         // to reprovision the caller's free list.
       
   929         if (gFreeList != NULL) {
       
   930             // Reprovision the thread's omFreeList.
       
   931             // Use bulk transfers to reduce the allocation rate and heat
       
   932             // on various locks.
       
   933             Thread::muxAcquire (&ListLock, "omAlloc") ;
       
   934             for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
       
   935                 MonitorFreeCount --;
       
   936                 ObjectMonitor * take = gFreeList ;
       
   937                 gFreeList = take->FreeNext ;
       
   938                 guarantee (take->object() == NULL, "invariant") ;
       
   939                 guarantee (!take->is_busy(), "invariant") ;
       
   940                 take->Recycle() ;
       
   941                 omRelease (Self, take, false) ;
       
   942             }
       
   943             Thread::muxRelease (&ListLock) ;
       
   944             Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
       
   945             if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
       
   946             TEVENT (omFirst - reprovision) ;
       
   947 
       
   948             const int mx = MonitorBound ;
       
   949             if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
       
   950               // We can't safely induce a STW safepoint from omAlloc() as our thread
       
   951               // state may not be appropriate for such activities and callers may hold
       
   952               // naked oops, so instead we defer the action.
       
   953               InduceScavenge (Self, "omAlloc") ;
       
   954             }
       
   955             continue;
       
   956         }
       
   957 
       
   958         // 3: allocate a block of new ObjectMonitors
       
   959         // Both the local and global free lists are empty -- resort to malloc().
       
   960         // In the current implementation objectMonitors are TSM - immortal.
       
   961         assert (_BLOCKSIZE > 1, "invariant") ;
       
   962         ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
       
   963 
       
   964         // NOTE: (almost) no way to recover if allocation failed.
       
   965         // We might be able to induce a STW safepoint and scavenge enough
       
   966         // objectMonitors to permit progress.
       
   967         if (temp == NULL) {
       
   968             vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
       
   969         }
       
   970 
       
   971         // Format the block.
       
   972         // initialize the linked list, each monitor points to its next
       
   973         // forming the single linked free list, the very first monitor
       
   974         // will points to next block, which forms the block list.
       
   975         // The trick of using the 1st element in the block as gBlockList
       
   976         // linkage should be reconsidered.  A better implementation would
       
   977         // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
       
   978 
       
   979         for (int i = 1; i < _BLOCKSIZE ; i++) {
       
   980            temp[i].FreeNext = &temp[i+1];
       
   981         }
       
   982 
       
   983         // terminate the last monitor as the end of list
       
   984         temp[_BLOCKSIZE - 1].FreeNext = NULL ;
       
   985 
       
   986         // Element [0] is reserved for global list linkage
       
   987         temp[0].set_object(CHAINMARKER);
       
   988 
       
   989         // Consider carving out this thread's current request from the
       
   990         // block in hand.  This avoids some lock traffic and redundant
       
   991         // list activity.
       
   992 
       
   993         // Acquire the ListLock to manipulate BlockList and FreeList.
       
   994         // An Oyama-Taura-Yonezawa scheme might be more efficient.
       
   995         Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
       
   996         MonitorPopulation += _BLOCKSIZE-1;
       
   997         MonitorFreeCount += _BLOCKSIZE-1;
       
   998 
       
   999         // Add the new block to the list of extant blocks (gBlockList).
       
  1000         // The very first objectMonitor in a block is reserved and dedicated.
       
  1001         // It serves as blocklist "next" linkage.
       
  1002         temp[0].FreeNext = gBlockList;
       
  1003         gBlockList = temp;
       
  1004 
       
  1005         // Add the new string of objectMonitors to the global free list
       
  1006         temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
       
  1007         gFreeList = temp + 1;
       
  1008         Thread::muxRelease (&ListLock) ;
       
  1009         TEVENT (Allocate block of monitors) ;
       
  1010     }
       
  1011 }
       
  1012 
       
  1013 // Place "m" on the caller's private per-thread omFreeList.
       
  1014 // In practice there's no need to clamp or limit the number of
       
  1015 // monitors on a thread's omFreeList as the only time we'll call
       
  1016 // omRelease is to return a monitor to the free list after a CAS
       
  1017 // attempt failed.  This doesn't allow unbounded #s of monitors to
       
  1018 // accumulate on a thread's free list.
       
  1019 //
       
  1020 
       
  1021 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
       
  1022     guarantee (m->object() == NULL, "invariant") ;
       
  1023 
       
  1024     // Remove from omInUseList
       
  1025     if (MonitorInUseLists && fromPerThreadAlloc) {
       
  1026       ObjectMonitor* curmidinuse = NULL;
       
  1027       for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
       
  1028        if (m == mid) {
       
  1029          // extract from per-thread in-use-list
       
  1030          if (mid == Self->omInUseList) {
       
  1031            Self->omInUseList = mid->FreeNext;
       
  1032          } else if (curmidinuse != NULL) {
       
  1033            curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
       
  1034          }
       
  1035          Self->omInUseCount --;
       
  1036          // verifyInUse(Self);
       
  1037          break;
       
  1038        } else {
       
  1039          curmidinuse = mid;
       
  1040          mid = mid->FreeNext;
       
  1041       }
       
  1042     }
       
  1043   }
       
  1044 
       
  1045   // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
       
  1046   m->FreeNext = Self->omFreeList ;
       
  1047   Self->omFreeList = m ;
       
  1048   Self->omFreeCount ++ ;
       
  1049 }
       
  1050 
       
  1051 // Return the monitors of a moribund thread's local free list to
       
  1052 // the global free list.  Typically a thread calls omFlush() when
       
  1053 // it's dying.  We could also consider having the VM thread steal
       
  1054 // monitors from threads that have not run java code over a few
       
  1055 // consecutive STW safepoints.  Relatedly, we might decay
       
  1056 // omFreeProvision at STW safepoints.
       
  1057 //
       
  1058 // Also return the monitors of a moribund thread"s omInUseList to
       
  1059 // a global gOmInUseList under the global list lock so these
       
  1060 // will continue to be scanned.
       
  1061 //
       
  1062 // We currently call omFlush() from the Thread:: dtor _after the thread
       
  1063 // has been excised from the thread list and is no longer a mutator.
       
  1064 // That means that omFlush() can run concurrently with a safepoint and
       
  1065 // the scavenge operator.  Calling omFlush() from JavaThread::exit() might
       
  1066 // be a better choice as we could safely reason that that the JVM is
       
  1067 // not at a safepoint at the time of the call, and thus there could
       
  1068 // be not inopportune interleavings between omFlush() and the scavenge
       
  1069 // operator.
       
  1070 
       
  1071 void ObjectSynchronizer::omFlush (Thread * Self) {
       
  1072     ObjectMonitor * List = Self->omFreeList ;  // Null-terminated SLL
       
  1073     Self->omFreeList = NULL ;
       
  1074     ObjectMonitor * Tail = NULL ;
       
  1075     int Tally = 0;
       
  1076     if (List != NULL) {
       
  1077       ObjectMonitor * s ;
       
  1078       for (s = List ; s != NULL ; s = s->FreeNext) {
       
  1079           Tally ++ ;
       
  1080           Tail = s ;
       
  1081           guarantee (s->object() == NULL, "invariant") ;
       
  1082           guarantee (!s->is_busy(), "invariant") ;
       
  1083           s->set_owner (NULL) ;   // redundant but good hygiene
       
  1084           TEVENT (omFlush - Move one) ;
       
  1085       }
       
  1086       guarantee (Tail != NULL && List != NULL, "invariant") ;
       
  1087     }
       
  1088 
       
  1089     ObjectMonitor * InUseList = Self->omInUseList;
       
  1090     ObjectMonitor * InUseTail = NULL ;
       
  1091     int InUseTally = 0;
       
  1092     if (InUseList != NULL) {
       
  1093       Self->omInUseList = NULL;
       
  1094       ObjectMonitor *curom;
       
  1095       for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
       
  1096         InUseTail = curom;
       
  1097         InUseTally++;
       
  1098       }
       
  1099 // TODO debug
       
  1100       assert(Self->omInUseCount == InUseTally, "inuse count off");
       
  1101       Self->omInUseCount = 0;
       
  1102       guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
       
  1103     }
       
  1104 
       
  1105     Thread::muxAcquire (&ListLock, "omFlush") ;
       
  1106     if (Tail != NULL) {
       
  1107       Tail->FreeNext = gFreeList ;
       
  1108       gFreeList = List ;
       
  1109       MonitorFreeCount += Tally;
       
  1110     }
       
  1111 
       
  1112     if (InUseTail != NULL) {
       
  1113       InUseTail->FreeNext = gOmInUseList;
       
  1114       gOmInUseList = InUseList;
       
  1115       gOmInUseCount += InUseTally;
       
  1116     }
       
  1117 
       
  1118     Thread::muxRelease (&ListLock) ;
       
  1119     TEVENT (omFlush) ;
       
  1120 }
       
  1121 
       
  1122 // Fast path code shared by multiple functions
       
  1123 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
       
  1124   markOop mark = obj->mark();
       
  1125   if (mark->has_monitor()) {
       
  1126     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
       
  1127     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
       
  1128     return mark->monitor();
       
  1129   }
       
  1130   return ObjectSynchronizer::inflate(Thread::current(), obj);
       
  1131 }
       
  1132 
       
  1133 
       
  1134 // Note that we could encounter some performance loss through false-sharing as
       
  1135 // multiple locks occupy the same $ line.  Padding might be appropriate.
       
  1136 
       
  1137 
  1179 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
  1138 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
  1180   // Inflate mutates the heap ...
  1139   // Inflate mutates the heap ...
  1181   // Relaxing assertion for bug 6320749.
  1140   // Relaxing assertion for bug 6320749.
  1182   assert (Universe::verify_in_progress() ||
  1141   assert (Universe::verify_in_progress() ||
  1183           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
  1142           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
  1240           // in which INFLATING appears in the mark.
  1199           // in which INFLATING appears in the mark.
  1241           m->Recycle();
  1200           m->Recycle();
  1242           m->_Responsible  = NULL ;
  1201           m->_Responsible  = NULL ;
  1243           m->OwnerIsThread = 0 ;
  1202           m->OwnerIsThread = 0 ;
  1244           m->_recursions   = 0 ;
  1203           m->_recursions   = 0 ;
  1245           m->_SpinDuration = Knob_SpinLimit ;   // Consider: maintain by type/class
  1204           m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;   // Consider: maintain by type/class
  1246 
  1205 
  1247           markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
  1206           markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
  1248           if (cmp != mark) {
  1207           if (cmp != mark) {
  1249              omRelease (Self, m, true) ;
  1208              omRelease (Self, m, true) ;
  1250              continue ;       // Interference -- just retry
  1209              continue ;       // Interference -- just retry
  1300           guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
  1259           guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
  1301           object->release_set_mark(markOopDesc::encode(m));
  1260           object->release_set_mark(markOopDesc::encode(m));
  1302 
  1261 
  1303           // Hopefully the performance counters are allocated on distinct cache lines
  1262           // Hopefully the performance counters are allocated on distinct cache lines
  1304           // to avoid false sharing on MP systems ...
  1263           // to avoid false sharing on MP systems ...
  1305           if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
  1264           if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
  1306           TEVENT(Inflate: overwrite stacklock) ;
  1265           TEVENT(Inflate: overwrite stacklock) ;
  1307           if (TraceMonitorInflation) {
  1266           if (TraceMonitorInflation) {
  1308             if (object->is_instance()) {
  1267             if (object->is_instance()) {
  1309               ResourceMark rm;
  1268               ResourceMark rm;
  1310               tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
  1269               tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
  1333       m->set_owner(NULL);
  1292       m->set_owner(NULL);
  1334       m->set_object(object);
  1293       m->set_object(object);
  1335       m->OwnerIsThread = 1 ;
  1294       m->OwnerIsThread = 1 ;
  1336       m->_recursions   = 0 ;
  1295       m->_recursions   = 0 ;
  1337       m->_Responsible  = NULL ;
  1296       m->_Responsible  = NULL ;
  1338       m->_SpinDuration = Knob_SpinLimit ;       // consider: keep metastats by type/class
  1297       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;       // consider: keep metastats by type/class
  1339 
  1298 
  1340       if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
  1299       if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
  1341           m->set_object (NULL) ;
  1300           m->set_object (NULL) ;
  1342           m->set_owner  (NULL) ;
  1301           m->set_owner  (NULL) ;
  1343           m->OwnerIsThread = 0 ;
  1302           m->OwnerIsThread = 0 ;
  1350           // live-lock -- "Inflated" is an absorbing state.
  1309           // live-lock -- "Inflated" is an absorbing state.
  1351       }
  1310       }
  1352 
  1311 
  1353       // Hopefully the performance counters are allocated on distinct
  1312       // Hopefully the performance counters are allocated on distinct
  1354       // cache lines to avoid false sharing on MP systems ...
  1313       // cache lines to avoid false sharing on MP systems ...
  1355       if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
  1314       if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
  1356       TEVENT(Inflate: overwrite neutral) ;
  1315       TEVENT(Inflate: overwrite neutral) ;
  1357       if (TraceMonitorInflation) {
  1316       if (TraceMonitorInflation) {
  1358         if (object->is_instance()) {
  1317         if (object->is_instance()) {
  1359           ResourceMark rm;
  1318           ResourceMark rm;
  1360           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
  1319           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
  1364       }
  1323       }
  1365       return m ;
  1324       return m ;
  1366   }
  1325   }
  1367 }
  1326 }
  1368 
  1327 
  1369 
  1328 // Note that we could encounter some performance loss through false-sharing as
  1370 // This the fast monitor enter. The interpreter and compiler use
  1329 // multiple locks occupy the same $ line.  Padding might be appropriate.
  1371 // some assembly copies of this code. Make sure update those code
  1330 
  1372 // if the following function is changed. The implementation is
       
  1373 // extremely sensitive to race condition. Be careful.
       
  1374 
       
  1375 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
       
  1376  if (UseBiasedLocking) {
       
  1377     if (!SafepointSynchronize::is_at_safepoint()) {
       
  1378       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
       
  1379       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
       
  1380         return;
       
  1381       }
       
  1382     } else {
       
  1383       assert(!attempt_rebias, "can not rebias toward VM thread");
       
  1384       BiasedLocking::revoke_at_safepoint(obj);
       
  1385     }
       
  1386     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1387  }
       
  1388 
       
  1389  slow_enter (obj, lock, THREAD) ;
       
  1390 }
       
  1391 
       
  1392 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
       
  1393   assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
       
  1394   // if displaced header is null, the previous enter is recursive enter, no-op
       
  1395   markOop dhw = lock->displaced_header();
       
  1396   markOop mark ;
       
  1397   if (dhw == NULL) {
       
  1398      // Recursive stack-lock.
       
  1399      // Diagnostics -- Could be: stack-locked, inflating, inflated.
       
  1400      mark = object->mark() ;
       
  1401      assert (!mark->is_neutral(), "invariant") ;
       
  1402      if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
       
  1403         assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
       
  1404      }
       
  1405      if (mark->has_monitor()) {
       
  1406         ObjectMonitor * m = mark->monitor() ;
       
  1407         assert(((oop)(m->object()))->mark() == mark, "invariant") ;
       
  1408         assert(m->is_entered(THREAD), "invariant") ;
       
  1409      }
       
  1410      return ;
       
  1411   }
       
  1412 
       
  1413   mark = object->mark() ;
       
  1414 
       
  1415   // If the object is stack-locked by the current thread, try to
       
  1416   // swing the displaced header from the box back to the mark.
       
  1417   if (mark == (markOop) lock) {
       
  1418      assert (dhw->is_neutral(), "invariant") ;
       
  1419      if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
       
  1420         TEVENT (fast_exit: release stacklock) ;
       
  1421         return;
       
  1422      }
       
  1423   }
       
  1424 
       
  1425   ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
       
  1426 }
       
  1427 
       
  1428 // This routine is used to handle interpreter/compiler slow case
       
  1429 // We don't need to use fast path here, because it must have been
       
  1430 // failed in the interpreter/compiler code.
       
  1431 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
       
  1432   markOop mark = obj->mark();
       
  1433   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
       
  1434 
       
  1435   if (mark->is_neutral()) {
       
  1436     // Anticipate successful CAS -- the ST of the displaced mark must
       
  1437     // be visible <= the ST performed by the CAS.
       
  1438     lock->set_displaced_header(mark);
       
  1439     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
       
  1440       TEVENT (slow_enter: release stacklock) ;
       
  1441       return ;
       
  1442     }
       
  1443     // Fall through to inflate() ...
       
  1444   } else
       
  1445   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
       
  1446     assert(lock != mark->locker(), "must not re-lock the same lock");
       
  1447     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
       
  1448     lock->set_displaced_header(NULL);
       
  1449     return;
       
  1450   }
       
  1451 
       
  1452 #if 0
       
  1453   // The following optimization isn't particularly useful.
       
  1454   if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
       
  1455     lock->set_displaced_header (NULL) ;
       
  1456     return ;
       
  1457   }
       
  1458 #endif
       
  1459 
       
  1460   // The object header will never be displaced to this lock,
       
  1461   // so it does not matter what the value is, except that it
       
  1462   // must be non-zero to avoid looking like a re-entrant lock,
       
  1463   // and must not look locked either.
       
  1464   lock->set_displaced_header(markOopDesc::unused_mark());
       
  1465   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
       
  1466 }
       
  1467 
       
  1468 // This routine is used to handle interpreter/compiler slow case
       
  1469 // We don't need to use fast path here, because it must have
       
  1470 // failed in the interpreter/compiler code. Simply use the heavy
       
  1471 // weight monitor should be ok, unless someone find otherwise.
       
  1472 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
       
  1473   fast_exit (object, lock, THREAD) ;
       
  1474 }
       
  1475 
       
  1476 // NOTE: must use heavy weight monitor to handle jni monitor enter
       
  1477 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
       
  1478   // the current locking is from JNI instead of Java code
       
  1479   TEVENT (jni_enter) ;
       
  1480   if (UseBiasedLocking) {
       
  1481     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1482     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1483   }
       
  1484   THREAD->set_current_pending_monitor_is_from_java(false);
       
  1485   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
       
  1486   THREAD->set_current_pending_monitor_is_from_java(true);
       
  1487 }
       
  1488 
       
  1489 // NOTE: must use heavy weight monitor to handle jni monitor enter
       
  1490 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
       
  1491   if (UseBiasedLocking) {
       
  1492     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1493     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1494   }
       
  1495 
       
  1496   ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
       
  1497   return monitor->try_enter(THREAD);
       
  1498 }
       
  1499 
       
  1500 
       
  1501 // NOTE: must use heavy weight monitor to handle jni monitor exit
       
  1502 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
       
  1503   TEVENT (jni_exit) ;
       
  1504   if (UseBiasedLocking) {
       
  1505     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1506   }
       
  1507   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1508 
       
  1509   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
       
  1510   // If this thread has locked the object, exit the monitor.  Note:  can't use
       
  1511   // monitor->check(CHECK); must exit even if an exception is pending.
       
  1512   if (monitor->check(THREAD)) {
       
  1513      monitor->exit(THREAD);
       
  1514   }
       
  1515 }
       
  1516 
       
  1517 // complete_exit()/reenter() are used to wait on a nested lock
       
  1518 // i.e. to give up an outer lock completely and then re-enter
       
  1519 // Used when holding nested locks - lock acquisition order: lock1 then lock2
       
  1520 //  1) complete_exit lock1 - saving recursion count
       
  1521 //  2) wait on lock2
       
  1522 //  3) when notified on lock2, unlock lock2
       
  1523 //  4) reenter lock1 with original recursion count
       
  1524 //  5) lock lock2
       
  1525 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
       
  1526 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
       
  1527   TEVENT (complete_exit) ;
       
  1528   if (UseBiasedLocking) {
       
  1529     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1530     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1531   }
       
  1532 
       
  1533   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
       
  1534 
       
  1535   return monitor->complete_exit(THREAD);
       
  1536 }
       
  1537 
       
  1538 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
       
  1539 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
       
  1540   TEVENT (reenter) ;
       
  1541   if (UseBiasedLocking) {
       
  1542     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1543     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1544   }
       
  1545 
       
  1546   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
       
  1547 
       
  1548   monitor->reenter(recursion, THREAD);
       
  1549 }
       
  1550 
       
  1551 // This exists only as a workaround of dtrace bug 6254741
       
  1552 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
       
  1553   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
       
  1554   return 0;
       
  1555 }
       
  1556 
       
  1557 // NOTE: must use heavy weight monitor to handle wait()
       
  1558 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
       
  1559   if (UseBiasedLocking) {
       
  1560     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1561     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1562   }
       
  1563   if (millis < 0) {
       
  1564     TEVENT (wait - throw IAX) ;
       
  1565     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
       
  1566   }
       
  1567   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
       
  1568   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
       
  1569   monitor->wait(millis, true, THREAD);
       
  1570 
       
  1571   /* This dummy call is in place to get around dtrace bug 6254741.  Once
       
  1572      that's fixed we can uncomment the following line and remove the call */
       
  1573   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
       
  1574   dtrace_waited_probe(monitor, obj, THREAD);
       
  1575 }
       
  1576 
       
  1577 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
       
  1578   if (UseBiasedLocking) {
       
  1579     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1580     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1581   }
       
  1582   if (millis < 0) {
       
  1583     TEVENT (wait - throw IAX) ;
       
  1584     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
       
  1585   }
       
  1586   ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
       
  1587 }
       
  1588 
       
  1589 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
       
  1590  if (UseBiasedLocking) {
       
  1591     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1592     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1593   }
       
  1594 
       
  1595   markOop mark = obj->mark();
       
  1596   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
       
  1597     return;
       
  1598   }
       
  1599   ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
       
  1600 }
       
  1601 
       
  1602 // NOTE: see comment of notify()
       
  1603 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
       
  1604   if (UseBiasedLocking) {
       
  1605     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
       
  1606     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1607   }
       
  1608 
       
  1609   markOop mark = obj->mark();
       
  1610   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
       
  1611     return;
       
  1612   }
       
  1613   ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
       
  1614 }
       
  1615 
       
  1616 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
       
  1617   if (UseBiasedLocking) {
       
  1618     // NOTE: many places throughout the JVM do not expect a safepoint
       
  1619     // to be taken here, in particular most operations on perm gen
       
  1620     // objects. However, we only ever bias Java instances and all of
       
  1621     // the call sites of identity_hash that might revoke biases have
       
  1622     // been checked to make sure they can handle a safepoint. The
       
  1623     // added check of the bias pattern is to avoid useless calls to
       
  1624     // thread-local storage.
       
  1625     if (obj->mark()->has_bias_pattern()) {
       
  1626       // Box and unbox the raw reference just in case we cause a STW safepoint.
       
  1627       Handle hobj (Self, obj) ;
       
  1628       // Relaxing assertion for bug 6320749.
       
  1629       assert (Universe::verify_in_progress() ||
       
  1630               !SafepointSynchronize::is_at_safepoint(),
       
  1631              "biases should not be seen by VM thread here");
       
  1632       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
       
  1633       obj = hobj() ;
       
  1634       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1635     }
       
  1636   }
       
  1637 
       
  1638   // hashCode() is a heap mutator ...
       
  1639   // Relaxing assertion for bug 6320749.
       
  1640   assert (Universe::verify_in_progress() ||
       
  1641           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
       
  1642   assert (Universe::verify_in_progress() ||
       
  1643           Self->is_Java_thread() , "invariant") ;
       
  1644   assert (Universe::verify_in_progress() ||
       
  1645          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
       
  1646 
       
  1647   ObjectMonitor* monitor = NULL;
       
  1648   markOop temp, test;
       
  1649   intptr_t hash;
       
  1650   markOop mark = ReadStableMark (obj);
       
  1651 
       
  1652   // object should remain ineligible for biased locking
       
  1653   assert (!mark->has_bias_pattern(), "invariant") ;
       
  1654 
       
  1655   if (mark->is_neutral()) {
       
  1656     hash = mark->hash();              // this is a normal header
       
  1657     if (hash) {                       // if it has hash, just return it
       
  1658       return hash;
       
  1659     }
       
  1660     hash = get_next_hash(Self, obj);  // allocate a new hash code
       
  1661     temp = mark->copy_set_hash(hash); // merge the hash code into header
       
  1662     // use (machine word version) atomic operation to install the hash
       
  1663     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
       
  1664     if (test == mark) {
       
  1665       return hash;
       
  1666     }
       
  1667     // If atomic operation failed, we must inflate the header
       
  1668     // into heavy weight monitor. We could add more code here
       
  1669     // for fast path, but it does not worth the complexity.
       
  1670   } else if (mark->has_monitor()) {
       
  1671     monitor = mark->monitor();
       
  1672     temp = monitor->header();
       
  1673     assert (temp->is_neutral(), "invariant") ;
       
  1674     hash = temp->hash();
       
  1675     if (hash) {
       
  1676       return hash;
       
  1677     }
       
  1678     // Skip to the following code to reduce code size
       
  1679   } else if (Self->is_lock_owned((address)mark->locker())) {
       
  1680     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
       
  1681     assert (temp->is_neutral(), "invariant") ;
       
  1682     hash = temp->hash();              // by current thread, check if the displaced
       
  1683     if (hash) {                       // header contains hash code
       
  1684       return hash;
       
  1685     }
       
  1686     // WARNING:
       
  1687     //   The displaced header is strictly immutable.
       
  1688     // It can NOT be changed in ANY cases. So we have
       
  1689     // to inflate the header into heavyweight monitor
       
  1690     // even the current thread owns the lock. The reason
       
  1691     // is the BasicLock (stack slot) will be asynchronously
       
  1692     // read by other threads during the inflate() function.
       
  1693     // Any change to stack may not propagate to other threads
       
  1694     // correctly.
       
  1695   }
       
  1696 
       
  1697   // Inflate the monitor to set hash code
       
  1698   monitor = ObjectSynchronizer::inflate(Self, obj);
       
  1699   // Load displaced header and check it has hash code
       
  1700   mark = monitor->header();
       
  1701   assert (mark->is_neutral(), "invariant") ;
       
  1702   hash = mark->hash();
       
  1703   if (hash == 0) {
       
  1704     hash = get_next_hash(Self, obj);
       
  1705     temp = mark->copy_set_hash(hash); // merge hash code into header
       
  1706     assert (temp->is_neutral(), "invariant") ;
       
  1707     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
       
  1708     if (test != mark) {
       
  1709       // The only update to the header in the monitor (outside GC)
       
  1710       // is install the hash code. If someone add new usage of
       
  1711       // displaced header, please update this code
       
  1712       hash = test->hash();
       
  1713       assert (test->is_neutral(), "invariant") ;
       
  1714       assert (hash != 0, "Trivial unexpected object/monitor header usage.");
       
  1715     }
       
  1716   }
       
  1717   // We finally get the hash
       
  1718   return hash;
       
  1719 }
       
  1720 
       
  1721 // Deprecated -- use FastHashCode() instead.
       
  1722 
       
  1723 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
       
  1724   return FastHashCode (Thread::current(), obj()) ;
       
  1725 }
       
  1726 
       
  1727 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
       
  1728                                                    Handle h_obj) {
       
  1729   if (UseBiasedLocking) {
       
  1730     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
       
  1731     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1732   }
       
  1733 
       
  1734   assert(thread == JavaThread::current(), "Can only be called on current thread");
       
  1735   oop obj = h_obj();
       
  1736 
       
  1737   markOop mark = ReadStableMark (obj) ;
       
  1738 
       
  1739   // Uncontended case, header points to stack
       
  1740   if (mark->has_locker()) {
       
  1741     return thread->is_lock_owned((address)mark->locker());
       
  1742   }
       
  1743   // Contended case, header points to ObjectMonitor (tagged pointer)
       
  1744   if (mark->has_monitor()) {
       
  1745     ObjectMonitor* monitor = mark->monitor();
       
  1746     return monitor->is_entered(thread) != 0 ;
       
  1747   }
       
  1748   // Unlocked case, header in place
       
  1749   assert(mark->is_neutral(), "sanity check");
       
  1750   return false;
       
  1751 }
       
  1752 
       
  1753 // Be aware of this method could revoke bias of the lock object.
       
  1754 // This method querys the ownership of the lock handle specified by 'h_obj'.
       
  1755 // If the current thread owns the lock, it returns owner_self. If no
       
  1756 // thread owns the lock, it returns owner_none. Otherwise, it will return
       
  1757 // ower_other.
       
  1758 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
       
  1759 (JavaThread *self, Handle h_obj) {
       
  1760   // The caller must beware this method can revoke bias, and
       
  1761   // revocation can result in a safepoint.
       
  1762   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
       
  1763   assert (self->thread_state() != _thread_blocked , "invariant") ;
       
  1764 
       
  1765   // Possible mark states: neutral, biased, stack-locked, inflated
       
  1766 
       
  1767   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
       
  1768     // CASE: biased
       
  1769     BiasedLocking::revoke_and_rebias(h_obj, false, self);
       
  1770     assert(!h_obj->mark()->has_bias_pattern(),
       
  1771            "biases should be revoked by now");
       
  1772   }
       
  1773 
       
  1774   assert(self == JavaThread::current(), "Can only be called on current thread");
       
  1775   oop obj = h_obj();
       
  1776   markOop mark = ReadStableMark (obj) ;
       
  1777 
       
  1778   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
       
  1779   if (mark->has_locker()) {
       
  1780     return self->is_lock_owned((address)mark->locker()) ?
       
  1781       owner_self : owner_other;
       
  1782   }
       
  1783 
       
  1784   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
       
  1785   // The Object:ObjectMonitor relationship is stable as long as we're
       
  1786   // not at a safepoint.
       
  1787   if (mark->has_monitor()) {
       
  1788     void * owner = mark->monitor()->_owner ;
       
  1789     if (owner == NULL) return owner_none ;
       
  1790     return (owner == self ||
       
  1791             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
       
  1792   }
       
  1793 
       
  1794   // CASE: neutral
       
  1795   assert(mark->is_neutral(), "sanity check");
       
  1796   return owner_none ;           // it's unlocked
       
  1797 }
       
  1798 
       
  1799 // FIXME: jvmti should call this
       
  1800 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
       
  1801   if (UseBiasedLocking) {
       
  1802     if (SafepointSynchronize::is_at_safepoint()) {
       
  1803       BiasedLocking::revoke_at_safepoint(h_obj);
       
  1804     } else {
       
  1805       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
       
  1806     }
       
  1807     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
       
  1808   }
       
  1809 
       
  1810   oop obj = h_obj();
       
  1811   address owner = NULL;
       
  1812 
       
  1813   markOop mark = ReadStableMark (obj) ;
       
  1814 
       
  1815   // Uncontended case, header points to stack
       
  1816   if (mark->has_locker()) {
       
  1817     owner = (address) mark->locker();
       
  1818   }
       
  1819 
       
  1820   // Contended case, header points to ObjectMonitor (tagged pointer)
       
  1821   if (mark->has_monitor()) {
       
  1822     ObjectMonitor* monitor = mark->monitor();
       
  1823     assert(monitor != NULL, "monitor should be non-null");
       
  1824     owner = (address) monitor->owner();
       
  1825   }
       
  1826 
       
  1827   if (owner != NULL) {
       
  1828     return Threads::owning_thread_from_monitor_owner(owner, doLock);
       
  1829   }
       
  1830 
       
  1831   // Unlocked case, header in place
       
  1832   // Cannot have assertion since this object may have been
       
  1833   // locked by another thread when reaching here.
       
  1834   // assert(mark->is_neutral(), "sanity check");
       
  1835 
       
  1836   return NULL;
       
  1837 }
       
  1838 
       
  1839 // Iterate through monitor cache and attempt to release thread's monitors
       
  1840 // Gives up on a particular monitor if an exception occurs, but continues
       
  1841 // the overall iteration, swallowing the exception.
       
  1842 class ReleaseJavaMonitorsClosure: public MonitorClosure {
       
  1843 private:
       
  1844   TRAPS;
       
  1845 
       
  1846 public:
       
  1847   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
       
  1848   void do_monitor(ObjectMonitor* mid) {
       
  1849     if (mid->owner() == THREAD) {
       
  1850       (void)mid->complete_exit(CHECK);
       
  1851     }
       
  1852   }
       
  1853 };
       
  1854 
       
  1855 // Release all inflated monitors owned by THREAD.  Lightweight monitors are
       
  1856 // ignored.  This is meant to be called during JNI thread detach which assumes
       
  1857 // all remaining monitors are heavyweight.  All exceptions are swallowed.
       
  1858 // Scanning the extant monitor list can be time consuming.
       
  1859 // A simple optimization is to add a per-thread flag that indicates a thread
       
  1860 // called jni_monitorenter() during its lifetime.
       
  1861 //
       
  1862 // Instead of No_Savepoint_Verifier it might be cheaper to
       
  1863 // use an idiom of the form:
       
  1864 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
       
  1865 //   <code that must not run at safepoint>
       
  1866 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
       
  1867 // Since the tests are extremely cheap we could leave them enabled
       
  1868 // for normal product builds.
       
  1869 
       
  1870 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
       
  1871   assert(THREAD == JavaThread::current(), "must be current Java thread");
       
  1872   No_Safepoint_Verifier nsv ;
       
  1873   ReleaseJavaMonitorsClosure rjmc(THREAD);
       
  1874   Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
       
  1875   ObjectSynchronizer::monitors_iterate(&rjmc);
       
  1876   Thread::muxRelease(&ListLock);
       
  1877   THREAD->clear_pending_exception();
       
  1878 }
       
  1879 
       
  1880 // Visitors ...
       
  1881 
       
  1882 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
       
  1883   ObjectMonitor* block = gBlockList;
       
  1884   ObjectMonitor* mid;
       
  1885   while (block) {
       
  1886     assert(block->object() == CHAINMARKER, "must be a block header");
       
  1887     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
       
  1888       mid = block + i;
       
  1889       oop object = (oop) mid->object();
       
  1890       if (object != NULL) {
       
  1891         closure->do_monitor(mid);
       
  1892       }
       
  1893     }
       
  1894     block = (ObjectMonitor*) block->FreeNext;
       
  1895   }
       
  1896 }
       
  1897 
       
  1898 void ObjectSynchronizer::oops_do(OopClosure* f) {
       
  1899   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
       
  1900   for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
       
  1901     assert(block->object() == CHAINMARKER, "must be a block header");
       
  1902     for (int i = 1; i < _BLOCKSIZE; i++) {
       
  1903       ObjectMonitor* mid = &block[i];
       
  1904       if (mid->object() != NULL) {
       
  1905         f->do_oop((oop*)mid->object_addr());
       
  1906       }
       
  1907     }
       
  1908   }
       
  1909 }
       
  1910 
  1331 
  1911 // Deflate_idle_monitors() is called at all safepoints, immediately
  1332 // Deflate_idle_monitors() is called at all safepoints, immediately
  1912 // after all mutators are stopped, but before any objects have moved.
  1333 // after all mutators are stopped, but before any objects have moved.
  1913 // It traverses the list of known monitors, deflating where possible.
  1334 // It traverses the list of known monitors, deflating where possible.
  1914 // The scavenged monitor are returned to the monitor free list.
  1335 // The scavenged monitor are returned to the monitor free list.
  1934 // Perversely, the heap size -- and thus the STW safepoint rate --
  1355 // Perversely, the heap size -- and thus the STW safepoint rate --
  1935 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
  1356 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
  1936 // which in turn can mean large(r) numbers of objectmonitors in circulation.
  1357 // which in turn can mean large(r) numbers of objectmonitors in circulation.
  1937 // This is an unfortunate aspect of this design.
  1358 // This is an unfortunate aspect of this design.
  1938 //
  1359 //
  1939 // Another refinement would be to refrain from calling deflate_idle_monitors()
  1360 
  1940 // except at stop-the-world points associated with garbage collections.
  1361 enum ManifestConstants {
  1941 //
  1362     ClearResponsibleAtSTW   = 0,
  1942 // An even better solution would be to deflate on-the-fly, aggressively,
  1363     MaximumRecheckInterval  = 1000
  1943 // at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
  1364 } ;
  1944 
       
  1945 
  1365 
  1946 // Deflate a single monitor if not in use
  1366 // Deflate a single monitor if not in use
  1947 // Return true if deflated, false if in use
  1367 // Return true if deflated, false if in use
  1948 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
  1368 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
  1949                                          ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
  1369                                          ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
  2086 
  1506 
  2087   MonitorFreeCount += nScavenged;
  1507   MonitorFreeCount += nScavenged;
  2088 
  1508 
  2089   // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
  1509   // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
  2090 
  1510 
  2091   if (Knob_Verbose) {
  1511   if (ObjectMonitor::Knob_Verbose) {
  2092     ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
  1512     ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
  2093         nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
  1513         nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
  2094         MonitorPopulation, MonitorFreeCount) ;
  1514         MonitorPopulation, MonitorFreeCount) ;
  2095     ::fflush(stdout) ;
  1515     ::fflush(stdout) ;
  2096   }
  1516   }
  2105      FreeTail->FreeNext = gFreeList ;
  1525      FreeTail->FreeNext = gFreeList ;
  2106      gFreeList = FreeHead ;
  1526      gFreeList = FreeHead ;
  2107   }
  1527   }
  2108   Thread::muxRelease (&ListLock) ;
  1528   Thread::muxRelease (&ListLock) ;
  2109 
  1529 
  2110   if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
  1530   if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
  2111   if (_sync_MonExtant  != NULL) _sync_MonExtant ->set_value(nInCirculation);
  1531   if (ObjectMonitor::_sync_MonExtant  != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
  2112 
  1532 
  2113   // TODO: Add objectMonitor leak detection.
  1533   // TODO: Add objectMonitor leak detection.
  2114   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
  1534   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
  2115   GVars.stwRandom = os::random() ;
  1535   GVars.stwRandom = os::random() ;
  2116   GVars.stwCycle ++ ;
  1536   GVars.stwCycle ++ ;
  2117 }
  1537 }
  2118 
  1538 
  2119 // A macro is used below because there may already be a pending
  1539 // Monitor cleanup on JavaThread::exit
  2120 // exception which should not abort the execution of the routines
  1540 
  2121 // which use this (which is why we don't put this into check_slow and
  1541 // Iterate through monitor cache and attempt to release thread's monitors
  2122 // call it with a CHECK argument).
  1542 // Gives up on a particular monitor if an exception occurs, but continues
  2123 
  1543 // the overall iteration, swallowing the exception.
  2124 #define CHECK_OWNER()                                                             \
  1544 class ReleaseJavaMonitorsClosure: public MonitorClosure {
  2125   do {                                                                            \
  1545 private:
  2126     if (THREAD != _owner) {                                                       \
  1546   TRAPS;
  2127       if (THREAD->is_lock_owned((address) _owner)) {                              \
  1547 
  2128         _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
  1548 public:
  2129         _recursions = 0;                                                          \
  1549   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
  2130         OwnerIsThread = 1 ;                                                       \
  1550   void do_monitor(ObjectMonitor* mid) {
  2131       } else {                                                                    \
  1551     if (mid->owner() == THREAD) {
  2132         TEVENT (Throw IMSX) ;                                                     \
  1552       (void)mid->complete_exit(CHECK);
  2133         THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
  1553     }
  2134       }                                                                           \
  1554   }
  2135     }                                                                             \
  1555 };
  2136   } while (false)
  1556 
  2137 
  1557 // Release all inflated monitors owned by THREAD.  Lightweight monitors are
  2138 // TODO-FIXME: eliminate ObjectWaiters.  Replace this visitor/enumerator
  1558 // ignored.  This is meant to be called during JNI thread detach which assumes
  2139 // interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
  1559 // all remaining monitors are heavyweight.  All exceptions are swallowed.
  2140 
  1560 // Scanning the extant monitor list can be time consuming.
  2141 ObjectWaiter* ObjectMonitor::first_waiter() {
  1561 // A simple optimization is to add a per-thread flag that indicates a thread
  2142   return _WaitSet;
  1562 // called jni_monitorenter() during its lifetime.
  2143 }
  1563 //
  2144 
  1564 // Instead of No_Savepoint_Verifier it might be cheaper to
  2145 ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
  1565 // use an idiom of the form:
  2146   return o->_next;
  1566 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
  2147 }
  1567 //   <code that must not run at safepoint>
  2148 
  1568 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
  2149 Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
  1569 // Since the tests are extremely cheap we could leave them enabled
  2150   return o->_thread;
  1570 // for normal product builds.
  2151 }
  1571 
  2152 
  1572 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
  2153 // initialize the monitor, exception the semaphore, all other fields
  1573   assert(THREAD == JavaThread::current(), "must be current Java thread");
  2154 // are simple integers or pointers
  1574   No_Safepoint_Verifier nsv ;
  2155 ObjectMonitor::ObjectMonitor() {
  1575   ReleaseJavaMonitorsClosure rjmc(THREAD);
  2156   _header       = NULL;
  1576   Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
  2157   _count        = 0;
  1577   ObjectSynchronizer::monitors_iterate(&rjmc);
  2158   _waiters      = 0,
  1578   Thread::muxRelease(&ListLock);
  2159   _recursions   = 0;
  1579   THREAD->clear_pending_exception();
  2160   _object       = NULL;
  1580 }
  2161   _owner        = NULL;
       
  2162   _WaitSet      = NULL;
       
  2163   _WaitSetLock  = 0 ;
       
  2164   _Responsible  = NULL ;
       
  2165   _succ         = NULL ;
       
  2166   _cxq          = NULL ;
       
  2167   FreeNext      = NULL ;
       
  2168   _EntryList    = NULL ;
       
  2169   _SpinFreq     = 0 ;
       
  2170   _SpinClock    = 0 ;
       
  2171   OwnerIsThread = 0 ;
       
  2172 }
       
  2173 
       
  2174 ObjectMonitor::~ObjectMonitor() {
       
  2175    // TODO: Add asserts ...
       
  2176    // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
       
  2177    // _count == 0 _EntryList  == NULL etc
       
  2178 }
       
  2179 
       
  2180 intptr_t ObjectMonitor::is_busy() const {
       
  2181   // TODO-FIXME: merge _count and _waiters.
       
  2182   // TODO-FIXME: assert _owner == null implies _recursions = 0
       
  2183   // TODO-FIXME: assert _WaitSet != null implies _count > 0
       
  2184   return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
       
  2185 }
       
  2186 
       
  2187 void ObjectMonitor::Recycle () {
       
  2188   // TODO: add stronger asserts ...
       
  2189   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
       
  2190   // _count == 0 EntryList  == NULL
       
  2191   // _recursions == 0 _WaitSet == NULL
       
  2192   // TODO: assert (is_busy()|_recursions) == 0
       
  2193   _succ          = NULL ;
       
  2194   _EntryList     = NULL ;
       
  2195   _cxq           = NULL ;
       
  2196   _WaitSet       = NULL ;
       
  2197   _recursions    = 0 ;
       
  2198   _SpinFreq      = 0 ;
       
  2199   _SpinClock     = 0 ;
       
  2200   OwnerIsThread  = 0 ;
       
  2201 }
       
  2202 
       
  2203 // WaitSet management ...
       
  2204 
       
  2205 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
       
  2206   assert(node != NULL, "should not dequeue NULL node");
       
  2207   assert(node->_prev == NULL, "node already in list");
       
  2208   assert(node->_next == NULL, "node already in list");
       
  2209   // put node at end of queue (circular doubly linked list)
       
  2210   if (_WaitSet == NULL) {
       
  2211     _WaitSet = node;
       
  2212     node->_prev = node;
       
  2213     node->_next = node;
       
  2214   } else {
       
  2215     ObjectWaiter* head = _WaitSet ;
       
  2216     ObjectWaiter* tail = head->_prev;
       
  2217     assert(tail->_next == head, "invariant check");
       
  2218     tail->_next = node;
       
  2219     head->_prev = node;
       
  2220     node->_next = head;
       
  2221     node->_prev = tail;
       
  2222   }
       
  2223 }
       
  2224 
       
  2225 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
       
  2226   // dequeue the very first waiter
       
  2227   ObjectWaiter* waiter = _WaitSet;
       
  2228   if (waiter) {
       
  2229     DequeueSpecificWaiter(waiter);
       
  2230   }
       
  2231   return waiter;
       
  2232 }
       
  2233 
       
  2234 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
       
  2235   assert(node != NULL, "should not dequeue NULL node");
       
  2236   assert(node->_prev != NULL, "node already removed from list");
       
  2237   assert(node->_next != NULL, "node already removed from list");
       
  2238   // when the waiter has woken up because of interrupt,
       
  2239   // timeout or other spurious wake-up, dequeue the
       
  2240   // waiter from waiting list
       
  2241   ObjectWaiter* next = node->_next;
       
  2242   if (next == node) {
       
  2243     assert(node->_prev == node, "invariant check");
       
  2244     _WaitSet = NULL;
       
  2245   } else {
       
  2246     ObjectWaiter* prev = node->_prev;
       
  2247     assert(prev->_next == node, "invariant check");
       
  2248     assert(next->_prev == node, "invariant check");
       
  2249     next->_prev = prev;
       
  2250     prev->_next = next;
       
  2251     if (_WaitSet == node) {
       
  2252       _WaitSet = next;
       
  2253     }
       
  2254   }
       
  2255   node->_next = NULL;
       
  2256   node->_prev = NULL;
       
  2257 }
       
  2258 
       
  2259 static char * kvGet (char * kvList, const char * Key) {
       
  2260     if (kvList == NULL) return NULL ;
       
  2261     size_t n = strlen (Key) ;
       
  2262     char * Search ;
       
  2263     for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
       
  2264         if (strncmp (Search, Key, n) == 0) {
       
  2265             if (Search[n] == '=') return Search + n + 1 ;
       
  2266             if (Search[n] == 0)   return (char *) "1" ;
       
  2267         }
       
  2268     }
       
  2269     return NULL ;
       
  2270 }
       
  2271 
       
  2272 static int kvGetInt (char * kvList, const char * Key, int Default) {
       
  2273     char * v = kvGet (kvList, Key) ;
       
  2274     int rslt = v ? ::strtol (v, NULL, 0) : Default ;
       
  2275     if (Knob_ReportSettings && v != NULL) {
       
  2276         ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
       
  2277         ::fflush (stdout) ;
       
  2278     }
       
  2279     return rslt ;
       
  2280 }
       
  2281 
       
  2282 // By convention we unlink a contending thread from EntryList|cxq immediately
       
  2283 // after the thread acquires the lock in ::enter().  Equally, we could defer
       
  2284 // unlinking the thread until ::exit()-time.
       
  2285 
       
  2286 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
       
  2287 {
       
  2288     assert (_owner == Self, "invariant") ;
       
  2289     assert (SelfNode->_thread == Self, "invariant") ;
       
  2290 
       
  2291     if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
       
  2292         // Normal case: remove Self from the DLL EntryList .
       
  2293         // This is a constant-time operation.
       
  2294         ObjectWaiter * nxt = SelfNode->_next ;
       
  2295         ObjectWaiter * prv = SelfNode->_prev ;
       
  2296         if (nxt != NULL) nxt->_prev = prv ;
       
  2297         if (prv != NULL) prv->_next = nxt ;
       
  2298         if (SelfNode == _EntryList ) _EntryList = nxt ;
       
  2299         assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  2300         assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  2301         TEVENT (Unlink from EntryList) ;
       
  2302     } else {
       
  2303         guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
       
  2304         // Inopportune interleaving -- Self is still on the cxq.
       
  2305         // This usually means the enqueue of self raced an exiting thread.
       
  2306         // Normally we'll find Self near the front of the cxq, so
       
  2307         // dequeueing is typically fast.  If needbe we can accelerate
       
  2308         // this with some MCS/CHL-like bidirectional list hints and advisory
       
  2309         // back-links so dequeueing from the interior will normally operate
       
  2310         // in constant-time.
       
  2311         // Dequeue Self from either the head (with CAS) or from the interior
       
  2312         // with a linear-time scan and normal non-atomic memory operations.
       
  2313         // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
       
  2314         // and then unlink Self from EntryList.  We have to drain eventually,
       
  2315         // so it might as well be now.
       
  2316 
       
  2317         ObjectWaiter * v = _cxq ;
       
  2318         assert (v != NULL, "invariant") ;
       
  2319         if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
       
  2320             // The CAS above can fail from interference IFF a "RAT" arrived.
       
  2321             // In that case Self must be in the interior and can no longer be
       
  2322             // at the head of cxq.
       
  2323             if (v == SelfNode) {
       
  2324                 assert (_cxq != v, "invariant") ;
       
  2325                 v = _cxq ;          // CAS above failed - start scan at head of list
       
  2326             }
       
  2327             ObjectWaiter * p ;
       
  2328             ObjectWaiter * q = NULL ;
       
  2329             for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
       
  2330                 q = p ;
       
  2331                 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
       
  2332             }
       
  2333             assert (v != SelfNode,  "invariant") ;
       
  2334             assert (p == SelfNode,  "Node not found on cxq") ;
       
  2335             assert (p != _cxq,      "invariant") ;
       
  2336             assert (q != NULL,      "invariant") ;
       
  2337             assert (q->_next == p,  "invariant") ;
       
  2338             q->_next = p->_next ;
       
  2339         }
       
  2340         TEVENT (Unlink from cxq) ;
       
  2341     }
       
  2342 
       
  2343     // Diagnostic hygiene ...
       
  2344     SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
       
  2345     SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
       
  2346     SelfNode->TState = ObjectWaiter::TS_RUN ;
       
  2347 }
       
  2348 
       
  2349 // Caveat: TryLock() is not necessarily serializing if it returns failure.
       
  2350 // Callers must compensate as needed.
       
  2351 
       
  2352 int ObjectMonitor::TryLock (Thread * Self) {
       
  2353    for (;;) {
       
  2354       void * own = _owner ;
       
  2355       if (own != NULL) return 0 ;
       
  2356       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
       
  2357          // Either guarantee _recursions == 0 or set _recursions = 0.
       
  2358          assert (_recursions == 0, "invariant") ;
       
  2359          assert (_owner == Self, "invariant") ;
       
  2360          // CONSIDER: set or assert that OwnerIsThread == 1
       
  2361          return 1 ;
       
  2362       }
       
  2363       // The lock had been free momentarily, but we lost the race to the lock.
       
  2364       // Interference -- the CAS failed.
       
  2365       // We can either return -1 or retry.
       
  2366       // Retry doesn't make as much sense because the lock was just acquired.
       
  2367       if (true) return -1 ;
       
  2368    }
       
  2369 }
       
  2370 
       
  2371 // NotRunnable() -- informed spinning
       
  2372 //
       
  2373 // Don't bother spinning if the owner is not eligible to drop the lock.
       
  2374 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
       
  2375 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
       
  2376 // The thread must be runnable in order to drop the lock in timely fashion.
       
  2377 // If the _owner is not runnable then spinning will not likely be
       
  2378 // successful (profitable).
       
  2379 //
       
  2380 // Beware -- the thread referenced by _owner could have died
       
  2381 // so a simply fetch from _owner->_thread_state might trap.
       
  2382 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
       
  2383 // Because of the lifecycle issues the schedctl and _thread_state values
       
  2384 // observed by NotRunnable() might be garbage.  NotRunnable must
       
  2385 // tolerate this and consider the observed _thread_state value
       
  2386 // as advisory.
       
  2387 //
       
  2388 // Beware too, that _owner is sometimes a BasicLock address and sometimes
       
  2389 // a thread pointer.  We differentiate the two cases with OwnerIsThread.
       
  2390 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
       
  2391 // with the LSB of _owner.  Another option would be to probablistically probe
       
  2392 // the putative _owner->TypeTag value.
       
  2393 //
       
  2394 // Checking _thread_state isn't perfect.  Even if the thread is
       
  2395 // in_java it might be blocked on a page-fault or have been preempted
       
  2396 // and sitting on a ready/dispatch queue.  _thread state in conjunction
       
  2397 // with schedctl.sc_state gives us a good picture of what the
       
  2398 // thread is doing, however.
       
  2399 //
       
  2400 // TODO: check schedctl.sc_state.
       
  2401 // We'll need to use SafeFetch32() to read from the schedctl block.
       
  2402 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
       
  2403 //
       
  2404 // The return value from NotRunnable() is *advisory* -- the
       
  2405 // result is based on sampling and is not necessarily coherent.
       
  2406 // The caller must tolerate false-negative and false-positive errors.
       
  2407 // Spinning, in general, is probabilistic anyway.
       
  2408 
       
  2409 
       
  2410 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
       
  2411     // Check either OwnerIsThread or ox->TypeTag == 2BAD.
       
  2412     if (!OwnerIsThread) return 0 ;
       
  2413 
       
  2414     if (ox == NULL) return 0 ;
       
  2415 
       
  2416     // Avoid transitive spinning ...
       
  2417     // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
       
  2418     // Immediately after T1 acquires L it's possible that T2, also
       
  2419     // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
       
  2420     // This occurs transiently after T1 acquired L but before
       
  2421     // T1 managed to clear T1.Stalled.  T2 does not need to abort
       
  2422     // its spin in this circumstance.
       
  2423     intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
       
  2424 
       
  2425     if (BlockedOn == 1) return 1 ;
       
  2426     if (BlockedOn != 0) {
       
  2427       return BlockedOn != intptr_t(this) && _owner == ox ;
       
  2428     }
       
  2429 
       
  2430     assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
       
  2431     int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
       
  2432     // consider also: jst != _thread_in_Java -- but that's overspecific.
       
  2433     return jst == _thread_blocked || jst == _thread_in_native ;
       
  2434 }
       
  2435 
       
  2436 
       
  2437 // Adaptive spin-then-block - rational spinning
       
  2438 //
       
  2439 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
       
  2440 // algorithm.  On high order SMP systems it would be better to start with
       
  2441 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
       
  2442 // a contending thread could enqueue itself on the cxq and then spin locally
       
  2443 // on a thread-specific variable such as its ParkEvent._Event flag.
       
  2444 // That's left as an exercise for the reader.  Note that global spinning is
       
  2445 // not problematic on Niagara, as the L2$ serves the interconnect and has both
       
  2446 // low latency and massive bandwidth.
       
  2447 //
       
  2448 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
       
  2449 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
       
  2450 // (duration) or we can fix the count at approximately the duration of
       
  2451 // a context switch and vary the frequency.   Of course we could also
       
  2452 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
       
  2453 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
       
  2454 //
       
  2455 // This implementation varies the duration "D", where D varies with
       
  2456 // the success rate of recent spin attempts. (D is capped at approximately
       
  2457 // length of a round-trip context switch).  The success rate for recent
       
  2458 // spin attempts is a good predictor of the success rate of future spin
       
  2459 // attempts.  The mechanism adapts automatically to varying critical
       
  2460 // section length (lock modality), system load and degree of parallelism.
       
  2461 // D is maintained per-monitor in _SpinDuration and is initialized
       
  2462 // optimistically.  Spin frequency is fixed at 100%.
       
  2463 //
       
  2464 // Note that _SpinDuration is volatile, but we update it without locks
       
  2465 // or atomics.  The code is designed so that _SpinDuration stays within
       
  2466 // a reasonable range even in the presence of races.  The arithmetic
       
  2467 // operations on _SpinDuration are closed over the domain of legal values,
       
  2468 // so at worst a race will install and older but still legal value.
       
  2469 // At the very worst this introduces some apparent non-determinism.
       
  2470 // We might spin when we shouldn't or vice-versa, but since the spin
       
  2471 // count are relatively short, even in the worst case, the effect is harmless.
       
  2472 //
       
  2473 // Care must be taken that a low "D" value does not become an
       
  2474 // an absorbing state.  Transient spinning failures -- when spinning
       
  2475 // is overall profitable -- should not cause the system to converge
       
  2476 // on low "D" values.  We want spinning to be stable and predictable
       
  2477 // and fairly responsive to change and at the same time we don't want
       
  2478 // it to oscillate, become metastable, be "too" non-deterministic,
       
  2479 // or converge on or enter undesirable stable absorbing states.
       
  2480 //
       
  2481 // We implement a feedback-based control system -- using past behavior
       
  2482 // to predict future behavior.  We face two issues: (a) if the
       
  2483 // input signal is random then the spin predictor won't provide optimal
       
  2484 // results, and (b) if the signal frequency is too high then the control
       
  2485 // system, which has some natural response lag, will "chase" the signal.
       
  2486 // (b) can arise from multimodal lock hold times.  Transient preemption
       
  2487 // can also result in apparent bimodal lock hold times.
       
  2488 // Although sub-optimal, neither condition is particularly harmful, as
       
  2489 // in the worst-case we'll spin when we shouldn't or vice-versa.
       
  2490 // The maximum spin duration is rather short so the failure modes aren't bad.
       
  2491 // To be conservative, I've tuned the gain in system to bias toward
       
  2492 // _not spinning.  Relatedly, the system can sometimes enter a mode where it
       
  2493 // "rings" or oscillates between spinning and not spinning.  This happens
       
  2494 // when spinning is just on the cusp of profitability, however, so the
       
  2495 // situation is not dire.  The state is benign -- there's no need to add
       
  2496 // hysteresis control to damp the transition rate between spinning and
       
  2497 // not spinning.
       
  2498 //
       
  2499 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
       
  2500 //
       
  2501 // Spin-then-block strategies ...
       
  2502 //
       
  2503 // Thoughts on ways to improve spinning :
       
  2504 //
       
  2505 // *  Periodically call {psr_}getloadavg() while spinning, and
       
  2506 //    permit unbounded spinning if the load average is <
       
  2507 //    the number of processors.  Beware, however, that getloadavg()
       
  2508 //    is exceptionally fast on solaris (about 1/10 the cost of a full
       
  2509 //    spin cycle, but quite expensive on linux.  Beware also, that
       
  2510 //    multiple JVMs could "ring" or oscillate in a feedback loop.
       
  2511 //    Sufficient damping would solve that problem.
       
  2512 //
       
  2513 // *  We currently use spin loops with iteration counters to approximate
       
  2514 //    spinning for some interval.  Given the availability of high-precision
       
  2515 //    time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
       
  2516 //    someday reimplement the spin loops to duration-based instead of iteration-based.
       
  2517 //
       
  2518 // *  Don't spin if there are more than N = (CPUs/2) threads
       
  2519 //        currently spinning on the monitor (or globally).
       
  2520 //    That is, limit the number of concurrent spinners.
       
  2521 //    We might also limit the # of spinners in the JVM, globally.
       
  2522 //
       
  2523 // *  If a spinning thread observes _owner change hands it should
       
  2524 //    abort the spin (and park immediately) or at least debit
       
  2525 //    the spin counter by a large "penalty".
       
  2526 //
       
  2527 // *  Classically, the spin count is either K*(CPUs-1) or is a
       
  2528 //        simple constant that approximates the length of a context switch.
       
  2529 //    We currently use a value -- computed by a special utility -- that
       
  2530 //    approximates round-trip context switch times.
       
  2531 //
       
  2532 // *  Normally schedctl_start()/_stop() is used to advise the kernel
       
  2533 //    to avoid preempting threads that are running in short, bounded
       
  2534 //    critical sections.  We could use the schedctl hooks in an inverted
       
  2535 //    sense -- spinners would set the nopreempt flag, but poll the preempt
       
  2536 //    pending flag.  If a spinner observed a pending preemption it'd immediately
       
  2537 //    abort the spin and park.   As such, the schedctl service acts as
       
  2538 //    a preemption warning mechanism.
       
  2539 //
       
  2540 // *  In lieu of spinning, if the system is running below saturation
       
  2541 //    (that is, loadavg() << #cpus), we can instead suppress futile
       
  2542 //    wakeup throttling, or even wake more than one successor at exit-time.
       
  2543 //    The net effect is largely equivalent to spinning.  In both cases,
       
  2544 //    contending threads go ONPROC and opportunistically attempt to acquire
       
  2545 //    the lock, decreasing lock handover latency at the expense of wasted
       
  2546 //    cycles and context switching.
       
  2547 //
       
  2548 // *  We might to spin less after we've parked as the thread will
       
  2549 //    have less $ and TLB affinity with the processor.
       
  2550 //    Likewise, we might spin less if we come ONPROC on a different
       
  2551 //    processor or after a long period (>> rechose_interval).
       
  2552 //
       
  2553 // *  A table-driven state machine similar to Solaris' dispadmin scheduling
       
  2554 //    tables might be a better design.  Instead of encoding information in
       
  2555 //    _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
       
  2556 //    discrete states.   Success or failure during a spin would drive
       
  2557 //    state transitions, and each state node would contain a spin count.
       
  2558 //
       
  2559 // *  If the processor is operating in a mode intended to conserve power
       
  2560 //    (such as Intel's SpeedStep) or to reduce thermal output (thermal
       
  2561 //    step-down mode) then the Java synchronization subsystem should
       
  2562 //    forgo spinning.
       
  2563 //
       
  2564 // *  The minimum spin duration should be approximately the worst-case
       
  2565 //    store propagation latency on the platform.  That is, the time
       
  2566 //    it takes a store on CPU A to become visible on CPU B, where A and
       
  2567 //    B are "distant".
       
  2568 //
       
  2569 // *  We might want to factor a thread's priority in the spin policy.
       
  2570 //    Threads with a higher priority might spin for slightly longer.
       
  2571 //    Similarly, if we use back-off in the TATAS loop, lower priority
       
  2572 //    threads might back-off longer.  We don't currently use a
       
  2573 //    thread's priority when placing it on the entry queue.  We may
       
  2574 //    want to consider doing so in future releases.
       
  2575 //
       
  2576 // *  We might transiently drop a thread's scheduling priority while it spins.
       
  2577 //    SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
       
  2578 //    would suffice.  We could even consider letting the thread spin indefinitely at
       
  2579 //    a depressed or "idle" priority.  This brings up fairness issues, however --
       
  2580 //    in a saturated system a thread would with a reduced priority could languish
       
  2581 //    for extended periods on the ready queue.
       
  2582 //
       
  2583 // *  While spinning try to use the otherwise wasted time to help the VM make
       
  2584 //    progress:
       
  2585 //
       
  2586 //    -- YieldTo() the owner, if the owner is OFFPROC but ready
       
  2587 //       Done our remaining quantum directly to the ready thread.
       
  2588 //       This helps "push" the lock owner through the critical section.
       
  2589 //       It also tends to improve affinity/locality as the lock
       
  2590 //       "migrates" less frequently between CPUs.
       
  2591 //    -- Walk our own stack in anticipation of blocking.  Memoize the roots.
       
  2592 //    -- Perform strand checking for other thread.  Unpark potential strandees.
       
  2593 //    -- Help GC: trace or mark -- this would need to be a bounded unit of work.
       
  2594 //       Unfortunately this will pollute our $ and TLBs.  Recall that we
       
  2595 //       spin to avoid context switching -- context switching has an
       
  2596 //       immediate cost in latency, a disruptive cost to other strands on a CMT
       
  2597 //       processor, and an amortized cost because of the D$ and TLB cache
       
  2598 //       reload transient when the thread comes back ONPROC and repopulates
       
  2599 //       $s and TLBs.
       
  2600 //    -- call getloadavg() to see if the system is saturated.  It'd probably
       
  2601 //       make sense to call getloadavg() half way through the spin.
       
  2602 //       If the system isn't at full capacity the we'd simply reset
       
  2603 //       the spin counter to and extend the spin attempt.
       
  2604 //    -- Doug points out that we should use the same "helping" policy
       
  2605 //       in thread.yield().
       
  2606 //
       
  2607 // *  Try MONITOR-MWAIT on systems that support those instructions.
       
  2608 //
       
  2609 // *  The spin statistics that drive spin decisions & frequency are
       
  2610 //    maintained in the objectmonitor structure so if we deflate and reinflate
       
  2611 //    we lose spin state.  In practice this is not usually a concern
       
  2612 //    as the default spin state after inflation is aggressive (optimistic)
       
  2613 //    and tends toward spinning.  So in the worst case for a lock where
       
  2614 //    spinning is not profitable we may spin unnecessarily for a brief
       
  2615 //    period.  But then again, if a lock is contended it'll tend not to deflate
       
  2616 //    in the first place.
       
  2617 
       
  2618 
       
  2619 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
       
  2620 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
       
  2621 
       
  2622 // Spinning: Fixed frequency (100%), vary duration
       
  2623 
       
  2624 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
       
  2625 
       
  2626     // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
       
  2627     int ctr = Knob_FixedSpin ;
       
  2628     if (ctr != 0) {
       
  2629         while (--ctr >= 0) {
       
  2630             if (TryLock (Self) > 0) return 1 ;
       
  2631             SpinPause () ;
       
  2632         }
       
  2633         return 0 ;
       
  2634     }
       
  2635 
       
  2636     for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
       
  2637       if (TryLock(Self) > 0) {
       
  2638         // Increase _SpinDuration ...
       
  2639         // Note that we don't clamp SpinDuration precisely at SpinLimit.
       
  2640         // Raising _SpurDuration to the poverty line is key.
       
  2641         int x = _SpinDuration ;
       
  2642         if (x < Knob_SpinLimit) {
       
  2643            if (x < Knob_Poverty) x = Knob_Poverty ;
       
  2644            _SpinDuration = x + Knob_BonusB ;
       
  2645         }
       
  2646         return 1 ;
       
  2647       }
       
  2648       SpinPause () ;
       
  2649     }
       
  2650 
       
  2651     // Admission control - verify preconditions for spinning
       
  2652     //
       
  2653     // We always spin a little bit, just to prevent _SpinDuration == 0 from
       
  2654     // becoming an absorbing state.  Put another way, we spin briefly to
       
  2655     // sample, just in case the system load, parallelism, contention, or lock
       
  2656     // modality changed.
       
  2657     //
       
  2658     // Consider the following alternative:
       
  2659     // Periodically set _SpinDuration = _SpinLimit and try a long/full
       
  2660     // spin attempt.  "Periodically" might mean after a tally of
       
  2661     // the # of failed spin attempts (or iterations) reaches some threshold.
       
  2662     // This takes us into the realm of 1-out-of-N spinning, where we
       
  2663     // hold the duration constant but vary the frequency.
       
  2664 
       
  2665     ctr = _SpinDuration  ;
       
  2666     if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
       
  2667     if (ctr <= 0) return 0 ;
       
  2668 
       
  2669     if (Knob_SuccRestrict && _succ != NULL) return 0 ;
       
  2670     if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
       
  2671        TEVENT (Spin abort - notrunnable [TOP]);
       
  2672        return 0 ;
       
  2673     }
       
  2674 
       
  2675     int MaxSpin = Knob_MaxSpinners ;
       
  2676     if (MaxSpin >= 0) {
       
  2677        if (_Spinner > MaxSpin) {
       
  2678           TEVENT (Spin abort -- too many spinners) ;
       
  2679           return 0 ;
       
  2680        }
       
  2681        // Slighty racy, but benign ...
       
  2682        Adjust (&_Spinner, 1) ;
       
  2683     }
       
  2684 
       
  2685     // We're good to spin ... spin ingress.
       
  2686     // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
       
  2687     // when preparing to LD...CAS _owner, etc and the CAS is likely
       
  2688     // to succeed.
       
  2689     int hits    = 0 ;
       
  2690     int msk     = 0 ;
       
  2691     int caspty  = Knob_CASPenalty ;
       
  2692     int oxpty   = Knob_OXPenalty ;
       
  2693     int sss     = Knob_SpinSetSucc ;
       
  2694     if (sss && _succ == NULL ) _succ = Self ;
       
  2695     Thread * prv = NULL ;
       
  2696 
       
  2697     // There are three ways to exit the following loop:
       
  2698     // 1.  A successful spin where this thread has acquired the lock.
       
  2699     // 2.  Spin failure with prejudice
       
  2700     // 3.  Spin failure without prejudice
       
  2701 
       
  2702     while (--ctr >= 0) {
       
  2703 
       
  2704       // Periodic polling -- Check for pending GC
       
  2705       // Threads may spin while they're unsafe.
       
  2706       // We don't want spinning threads to delay the JVM from reaching
       
  2707       // a stop-the-world safepoint or to steal cycles from GC.
       
  2708       // If we detect a pending safepoint we abort in order that
       
  2709       // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
       
  2710       // this thread, if safe, doesn't steal cycles from GC.
       
  2711       // This is in keeping with the "no loitering in runtime" rule.
       
  2712       // We periodically check to see if there's a safepoint pending.
       
  2713       if ((ctr & 0xFF) == 0) {
       
  2714          if (SafepointSynchronize::do_call_back()) {
       
  2715             TEVENT (Spin: safepoint) ;
       
  2716             goto Abort ;           // abrupt spin egress
       
  2717          }
       
  2718          if (Knob_UsePause & 1) SpinPause () ;
       
  2719 
       
  2720          int (*scb)(intptr_t,int) = SpinCallbackFunction ;
       
  2721          if (hits > 50 && scb != NULL) {
       
  2722             int abend = (*scb)(SpinCallbackArgument, 0) ;
       
  2723          }
       
  2724       }
       
  2725 
       
  2726       if (Knob_UsePause & 2) SpinPause() ;
       
  2727 
       
  2728       // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
       
  2729       // This is useful on classic SMP systems, but is of less utility on
       
  2730       // N1-style CMT platforms.
       
  2731       //
       
  2732       // Trade-off: lock acquisition latency vs coherency bandwidth.
       
  2733       // Lock hold times are typically short.  A histogram
       
  2734       // of successful spin attempts shows that we usually acquire
       
  2735       // the lock early in the spin.  That suggests we want to
       
  2736       // sample _owner frequently in the early phase of the spin,
       
  2737       // but then back-off and sample less frequently as the spin
       
  2738       // progresses.  The back-off makes a good citizen on SMP big
       
  2739       // SMP systems.  Oversampling _owner can consume excessive
       
  2740       // coherency bandwidth.  Relatedly, if we _oversample _owner we
       
  2741       // can inadvertently interfere with the the ST m->owner=null.
       
  2742       // executed by the lock owner.
       
  2743       if (ctr & msk) continue ;
       
  2744       ++hits ;
       
  2745       if ((hits & 0xF) == 0) {
       
  2746         // The 0xF, above, corresponds to the exponent.
       
  2747         // Consider: (msk+1)|msk
       
  2748         msk = ((msk << 2)|3) & BackOffMask ;
       
  2749       }
       
  2750 
       
  2751       // Probe _owner with TATAS
       
  2752       // If this thread observes the monitor transition or flicker
       
  2753       // from locked to unlocked to locked, then the odds that this
       
  2754       // thread will acquire the lock in this spin attempt go down
       
  2755       // considerably.  The same argument applies if the CAS fails
       
  2756       // or if we observe _owner change from one non-null value to
       
  2757       // another non-null value.   In such cases we might abort
       
  2758       // the spin without prejudice or apply a "penalty" to the
       
  2759       // spin count-down variable "ctr", reducing it by 100, say.
       
  2760 
       
  2761       Thread * ox = (Thread *) _owner ;
       
  2762       if (ox == NULL) {
       
  2763          ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
       
  2764          if (ox == NULL) {
       
  2765             // The CAS succeeded -- this thread acquired ownership
       
  2766             // Take care of some bookkeeping to exit spin state.
       
  2767             if (sss && _succ == Self) {
       
  2768                _succ = NULL ;
       
  2769             }
       
  2770             if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
       
  2771 
       
  2772             // Increase _SpinDuration :
       
  2773             // The spin was successful (profitable) so we tend toward
       
  2774             // longer spin attempts in the future.
       
  2775             // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
       
  2776             // If we acquired the lock early in the spin cycle it
       
  2777             // makes sense to increase _SpinDuration proportionally.
       
  2778             // Note that we don't clamp SpinDuration precisely at SpinLimit.
       
  2779             int x = _SpinDuration ;
       
  2780             if (x < Knob_SpinLimit) {
       
  2781                 if (x < Knob_Poverty) x = Knob_Poverty ;
       
  2782                 _SpinDuration = x + Knob_Bonus ;
       
  2783             }
       
  2784             return 1 ;
       
  2785          }
       
  2786 
       
  2787          // The CAS failed ... we can take any of the following actions:
       
  2788          // * penalize: ctr -= Knob_CASPenalty
       
  2789          // * exit spin with prejudice -- goto Abort;
       
  2790          // * exit spin without prejudice.
       
  2791          // * Since CAS is high-latency, retry again immediately.
       
  2792          prv = ox ;
       
  2793          TEVENT (Spin: cas failed) ;
       
  2794          if (caspty == -2) break ;
       
  2795          if (caspty == -1) goto Abort ;
       
  2796          ctr -= caspty ;
       
  2797          continue ;
       
  2798       }
       
  2799 
       
  2800       // Did lock ownership change hands ?
       
  2801       if (ox != prv && prv != NULL ) {
       
  2802           TEVENT (spin: Owner changed)
       
  2803           if (oxpty == -2) break ;
       
  2804           if (oxpty == -1) goto Abort ;
       
  2805           ctr -= oxpty ;
       
  2806       }
       
  2807       prv = ox ;
       
  2808 
       
  2809       // Abort the spin if the owner is not executing.
       
  2810       // The owner must be executing in order to drop the lock.
       
  2811       // Spinning while the owner is OFFPROC is idiocy.
       
  2812       // Consider: ctr -= RunnablePenalty ;
       
  2813       if (Knob_OState && NotRunnable (Self, ox)) {
       
  2814          TEVENT (Spin abort - notrunnable);
       
  2815          goto Abort ;
       
  2816       }
       
  2817       if (sss && _succ == NULL ) _succ = Self ;
       
  2818    }
       
  2819 
       
  2820    // Spin failed with prejudice -- reduce _SpinDuration.
       
  2821    // TODO: Use an AIMD-like policy to adjust _SpinDuration.
       
  2822    // AIMD is globally stable.
       
  2823    TEVENT (Spin failure) ;
       
  2824    {
       
  2825      int x = _SpinDuration ;
       
  2826      if (x > 0) {
       
  2827         // Consider an AIMD scheme like: x -= (x >> 3) + 100
       
  2828         // This is globally sample and tends to damp the response.
       
  2829         x -= Knob_Penalty ;
       
  2830         if (x < 0) x = 0 ;
       
  2831         _SpinDuration = x ;
       
  2832      }
       
  2833    }
       
  2834 
       
  2835  Abort:
       
  2836    if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
       
  2837    if (sss && _succ == Self) {
       
  2838       _succ = NULL ;
       
  2839       // Invariant: after setting succ=null a contending thread
       
  2840       // must recheck-retry _owner before parking.  This usually happens
       
  2841       // in the normal usage of TrySpin(), but it's safest
       
  2842       // to make TrySpin() as foolproof as possible.
       
  2843       OrderAccess::fence() ;
       
  2844       if (TryLock(Self) > 0) return 1 ;
       
  2845    }
       
  2846    return 0 ;
       
  2847 }
       
  2848 
       
  2849 #define TrySpin TrySpin_VaryDuration
       
  2850 
       
  2851 static void DeferredInitialize () {
       
  2852   if (InitDone > 0) return ;
       
  2853   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
       
  2854       while (InitDone != 1) ;
       
  2855       return ;
       
  2856   }
       
  2857 
       
  2858   // One-shot global initialization ...
       
  2859   // The initialization is idempotent, so we don't need locks.
       
  2860   // In the future consider doing this via os::init_2().
       
  2861   // SyncKnobs consist of <Key>=<Value> pairs in the style
       
  2862   // of environment variables.  Start by converting ':' to NUL.
       
  2863 
       
  2864   if (SyncKnobs == NULL) SyncKnobs = "" ;
       
  2865 
       
  2866   size_t sz = strlen (SyncKnobs) ;
       
  2867   char * knobs = (char *) malloc (sz + 2) ;
       
  2868   if (knobs == NULL) {
       
  2869      vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
       
  2870      guarantee (0, "invariant") ;
       
  2871   }
       
  2872   strcpy (knobs, SyncKnobs) ;
       
  2873   knobs[sz+1] = 0 ;
       
  2874   for (char * p = knobs ; *p ; p++) {
       
  2875      if (*p == ':') *p = 0 ;
       
  2876   }
       
  2877 
       
  2878   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
       
  2879   SETKNOB(ReportSettings) ;
       
  2880   SETKNOB(Verbose) ;
       
  2881   SETKNOB(FixedSpin) ;
       
  2882   SETKNOB(SpinLimit) ;
       
  2883   SETKNOB(SpinBase) ;
       
  2884   SETKNOB(SpinBackOff);
       
  2885   SETKNOB(CASPenalty) ;
       
  2886   SETKNOB(OXPenalty) ;
       
  2887   SETKNOB(LogSpins) ;
       
  2888   SETKNOB(SpinSetSucc) ;
       
  2889   SETKNOB(SuccEnabled) ;
       
  2890   SETKNOB(SuccRestrict) ;
       
  2891   SETKNOB(Penalty) ;
       
  2892   SETKNOB(Bonus) ;
       
  2893   SETKNOB(BonusB) ;
       
  2894   SETKNOB(Poverty) ;
       
  2895   SETKNOB(SpinAfterFutile) ;
       
  2896   SETKNOB(UsePause) ;
       
  2897   SETKNOB(SpinEarly) ;
       
  2898   SETKNOB(OState) ;
       
  2899   SETKNOB(MaxSpinners) ;
       
  2900   SETKNOB(PreSpin) ;
       
  2901   SETKNOB(ExitPolicy) ;
       
  2902   SETKNOB(QMode);
       
  2903   SETKNOB(ResetEvent) ;
       
  2904   SETKNOB(MoveNotifyee) ;
       
  2905   SETKNOB(FastHSSEC) ;
       
  2906   #undef SETKNOB
       
  2907 
       
  2908   if (os::is_MP()) {
       
  2909      BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
       
  2910      if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
       
  2911      // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
       
  2912   } else {
       
  2913      Knob_SpinLimit = 0 ;
       
  2914      Knob_SpinBase  = 0 ;
       
  2915      Knob_PreSpin   = 0 ;
       
  2916      Knob_FixedSpin = -1 ;
       
  2917   }
       
  2918 
       
  2919   if (Knob_LogSpins == 0) {
       
  2920      ObjectSynchronizer::_sync_FailedSpins = NULL ;
       
  2921   }
       
  2922 
       
  2923   free (knobs) ;
       
  2924   OrderAccess::fence() ;
       
  2925   InitDone = 1 ;
       
  2926 }
       
  2927 
       
  2928 // Theory of operations -- Monitors lists, thread residency, etc:
       
  2929 //
       
  2930 // * A thread acquires ownership of a monitor by successfully
       
  2931 //   CAS()ing the _owner field from null to non-null.
       
  2932 //
       
  2933 // * Invariant: A thread appears on at most one monitor list --
       
  2934 //   cxq, EntryList or WaitSet -- at any one time.
       
  2935 //
       
  2936 // * Contending threads "push" themselves onto the cxq with CAS
       
  2937 //   and then spin/park.
       
  2938 //
       
  2939 // * After a contending thread eventually acquires the lock it must
       
  2940 //   dequeue itself from either the EntryList or the cxq.
       
  2941 //
       
  2942 // * The exiting thread identifies and unparks an "heir presumptive"
       
  2943 //   tentative successor thread on the EntryList.  Critically, the
       
  2944 //   exiting thread doesn't unlink the successor thread from the EntryList.
       
  2945 //   After having been unparked, the wakee will recontend for ownership of
       
  2946 //   the monitor.   The successor (wakee) will either acquire the lock or
       
  2947 //   re-park itself.
       
  2948 //
       
  2949 //   Succession is provided for by a policy of competitive handoff.
       
  2950 //   The exiting thread does _not_ grant or pass ownership to the
       
  2951 //   successor thread.  (This is also referred to as "handoff" succession").
       
  2952 //   Instead the exiting thread releases ownership and possibly wakes
       
  2953 //   a successor, so the successor can (re)compete for ownership of the lock.
       
  2954 //   If the EntryList is empty but the cxq is populated the exiting
       
  2955 //   thread will drain the cxq into the EntryList.  It does so by
       
  2956 //   by detaching the cxq (installing null with CAS) and folding
       
  2957 //   the threads from the cxq into the EntryList.  The EntryList is
       
  2958 //   doubly linked, while the cxq is singly linked because of the
       
  2959 //   CAS-based "push" used to enqueue recently arrived threads (RATs).
       
  2960 //
       
  2961 // * Concurrency invariants:
       
  2962 //
       
  2963 //   -- only the monitor owner may access or mutate the EntryList.
       
  2964 //      The mutex property of the monitor itself protects the EntryList
       
  2965 //      from concurrent interference.
       
  2966 //   -- Only the monitor owner may detach the cxq.
       
  2967 //
       
  2968 // * The monitor entry list operations avoid locks, but strictly speaking
       
  2969 //   they're not lock-free.  Enter is lock-free, exit is not.
       
  2970 //   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
       
  2971 //
       
  2972 // * The cxq can have multiple concurrent "pushers" but only one concurrent
       
  2973 //   detaching thread.  This mechanism is immune from the ABA corruption.
       
  2974 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
       
  2975 //
       
  2976 // * Taken together, the cxq and the EntryList constitute or form a
       
  2977 //   single logical queue of threads stalled trying to acquire the lock.
       
  2978 //   We use two distinct lists to improve the odds of a constant-time
       
  2979 //   dequeue operation after acquisition (in the ::enter() epilog) and
       
  2980 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
       
  2981 //   A key desideratum is to minimize queue & monitor metadata manipulation
       
  2982 //   that occurs while holding the monitor lock -- that is, we want to
       
  2983 //   minimize monitor lock holds times.  Note that even a small amount of
       
  2984 //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
       
  2985 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
       
  2986 //   locks and monitor metadata.
       
  2987 //
       
  2988 //   Cxq points to the the set of Recently Arrived Threads attempting entry.
       
  2989 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
       
  2990 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
       
  2991 //   the unlocking thread notices that EntryList is null but _cxq is != null.
       
  2992 //
       
  2993 //   The EntryList is ordered by the prevailing queue discipline and
       
  2994 //   can be organized in any convenient fashion, such as a doubly-linked list or
       
  2995 //   a circular doubly-linked list.  Critically, we want insert and delete operations
       
  2996 //   to operate in constant-time.  If we need a priority queue then something akin
       
  2997 //   to Solaris' sleepq would work nicely.  Viz.,
       
  2998 //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
       
  2999 //   Queue discipline is enforced at ::exit() time, when the unlocking thread
       
  3000 //   drains the cxq into the EntryList, and orders or reorders the threads on the
       
  3001 //   EntryList accordingly.
       
  3002 //
       
  3003 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
       
  3004 //   somewhat similar to an elevator-scan.
       
  3005 //
       
  3006 // * The monitor synchronization subsystem avoids the use of native
       
  3007 //   synchronization primitives except for the narrow platform-specific
       
  3008 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
       
  3009 //   the semantics of park-unpark.  Put another way, this monitor implementation
       
  3010 //   depends only on atomic operations and park-unpark.  The monitor subsystem
       
  3011 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
       
  3012 //   underlying OS manages the READY<->RUN transitions.
       
  3013 //
       
  3014 // * Waiting threads reside on the WaitSet list -- wait() puts
       
  3015 //   the caller onto the WaitSet.
       
  3016 //
       
  3017 // * notify() or notifyAll() simply transfers threads from the WaitSet to
       
  3018 //   either the EntryList or cxq.  Subsequent exit() operations will
       
  3019 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
       
  3020 //   it's likely the notifyee would simply impale itself on the lock held
       
  3021 //   by the notifier.
       
  3022 //
       
  3023 // * An interesting alternative is to encode cxq as (List,LockByte) where
       
  3024 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
       
  3025 //   variable, like _recursions, in the scheme.  The threads or Events that form
       
  3026 //   the list would have to be aligned in 256-byte addresses.  A thread would
       
  3027 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
       
  3028 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
       
  3029 //   Note that is is *not* word-tearing, but it does presume that full-word
       
  3030 //   CAS operations are coherent with intermix with STB operations.  That's true
       
  3031 //   on most common processors.
       
  3032 //
       
  3033 // * See also http://blogs.sun.com/dave
       
  3034 
       
  3035 
       
  3036 void ATTR ObjectMonitor::EnterI (TRAPS) {
       
  3037     Thread * Self = THREAD ;
       
  3038     assert (Self->is_Java_thread(), "invariant") ;
       
  3039     assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
       
  3040 
       
  3041     // Try the lock - TATAS
       
  3042     if (TryLock (Self) > 0) {
       
  3043         assert (_succ != Self              , "invariant") ;
       
  3044         assert (_owner == Self             , "invariant") ;
       
  3045         assert (_Responsible != Self       , "invariant") ;
       
  3046         return ;
       
  3047     }
       
  3048 
       
  3049     DeferredInitialize () ;
       
  3050 
       
  3051     // We try one round of spinning *before* enqueueing Self.
       
  3052     //
       
  3053     // If the _owner is ready but OFFPROC we could use a YieldTo()
       
  3054     // operation to donate the remainder of this thread's quantum
       
  3055     // to the owner.  This has subtle but beneficial affinity
       
  3056     // effects.
       
  3057 
       
  3058     if (TrySpin (Self) > 0) {
       
  3059         assert (_owner == Self        , "invariant") ;
       
  3060         assert (_succ != Self         , "invariant") ;
       
  3061         assert (_Responsible != Self  , "invariant") ;
       
  3062         return ;
       
  3063     }
       
  3064 
       
  3065     // The Spin failed -- Enqueue and park the thread ...
       
  3066     assert (_succ  != Self            , "invariant") ;
       
  3067     assert (_owner != Self            , "invariant") ;
       
  3068     assert (_Responsible != Self      , "invariant") ;
       
  3069 
       
  3070     // Enqueue "Self" on ObjectMonitor's _cxq.
       
  3071     //
       
  3072     // Node acts as a proxy for Self.
       
  3073     // As an aside, if were to ever rewrite the synchronization code mostly
       
  3074     // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
       
  3075     // Java objects.  This would avoid awkward lifecycle and liveness issues,
       
  3076     // as well as eliminate a subset of ABA issues.
       
  3077     // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
       
  3078     //
       
  3079 
       
  3080     ObjectWaiter node(Self) ;
       
  3081     Self->_ParkEvent->reset() ;
       
  3082     node._prev   = (ObjectWaiter *) 0xBAD ;
       
  3083     node.TState  = ObjectWaiter::TS_CXQ ;
       
  3084 
       
  3085     // Push "Self" onto the front of the _cxq.
       
  3086     // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
       
  3087     // Note that spinning tends to reduce the rate at which threads
       
  3088     // enqueue and dequeue on EntryList|cxq.
       
  3089     ObjectWaiter * nxt ;
       
  3090     for (;;) {
       
  3091         node._next = nxt = _cxq ;
       
  3092         if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
       
  3093 
       
  3094         // Interference - the CAS failed because _cxq changed.  Just retry.
       
  3095         // As an optional optimization we retry the lock.
       
  3096         if (TryLock (Self) > 0) {
       
  3097             assert (_succ != Self         , "invariant") ;
       
  3098             assert (_owner == Self        , "invariant") ;
       
  3099             assert (_Responsible != Self  , "invariant") ;
       
  3100             return ;
       
  3101         }
       
  3102     }
       
  3103 
       
  3104     // Check for cxq|EntryList edge transition to non-null.  This indicates
       
  3105     // the onset of contention.  While contention persists exiting threads
       
  3106     // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
       
  3107     // operations revert to the faster 1-0 mode.  This enter operation may interleave
       
  3108     // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
       
  3109     // arrange for one of the contending thread to use a timed park() operations
       
  3110     // to detect and recover from the race.  (Stranding is form of progress failure
       
  3111     // where the monitor is unlocked but all the contending threads remain parked).
       
  3112     // That is, at least one of the contended threads will periodically poll _owner.
       
  3113     // One of the contending threads will become the designated "Responsible" thread.
       
  3114     // The Responsible thread uses a timed park instead of a normal indefinite park
       
  3115     // operation -- it periodically wakes and checks for and recovers from potential
       
  3116     // strandings admitted by 1-0 exit operations.   We need at most one Responsible
       
  3117     // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
       
  3118     // be responsible for a monitor.
       
  3119     //
       
  3120     // Currently, one of the contended threads takes on the added role of "Responsible".
       
  3121     // A viable alternative would be to use a dedicated "stranding checker" thread
       
  3122     // that periodically iterated over all the threads (or active monitors) and unparked
       
  3123     // successors where there was risk of stranding.  This would help eliminate the
       
  3124     // timer scalability issues we see on some platforms as we'd only have one thread
       
  3125     // -- the checker -- parked on a timer.
       
  3126 
       
  3127     if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
       
  3128         // Try to assume the role of responsible thread for the monitor.
       
  3129         // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
       
  3130         Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
       
  3131     }
       
  3132 
       
  3133     // The lock have been released while this thread was occupied queueing
       
  3134     // itself onto _cxq.  To close the race and avoid "stranding" and
       
  3135     // progress-liveness failure we must resample-retry _owner before parking.
       
  3136     // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
       
  3137     // In this case the ST-MEMBAR is accomplished with CAS().
       
  3138     //
       
  3139     // TODO: Defer all thread state transitions until park-time.
       
  3140     // Since state transitions are heavy and inefficient we'd like
       
  3141     // to defer the state transitions until absolutely necessary,
       
  3142     // and in doing so avoid some transitions ...
       
  3143 
       
  3144     TEVENT (Inflated enter - Contention) ;
       
  3145     int nWakeups = 0 ;
       
  3146     int RecheckInterval = 1 ;
       
  3147 
       
  3148     for (;;) {
       
  3149 
       
  3150         if (TryLock (Self) > 0) break ;
       
  3151         assert (_owner != Self, "invariant") ;
       
  3152 
       
  3153         if ((SyncFlags & 2) && _Responsible == NULL) {
       
  3154            Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
       
  3155         }
       
  3156 
       
  3157         // park self
       
  3158         if (_Responsible == Self || (SyncFlags & 1)) {
       
  3159             TEVENT (Inflated enter - park TIMED) ;
       
  3160             Self->_ParkEvent->park ((jlong) RecheckInterval) ;
       
  3161             // Increase the RecheckInterval, but clamp the value.
       
  3162             RecheckInterval *= 8 ;
       
  3163             if (RecheckInterval > 1000) RecheckInterval = 1000 ;
       
  3164         } else {
       
  3165             TEVENT (Inflated enter - park UNTIMED) ;
       
  3166             Self->_ParkEvent->park() ;
       
  3167         }
       
  3168 
       
  3169         if (TryLock(Self) > 0) break ;
       
  3170 
       
  3171         // The lock is still contested.
       
  3172         // Keep a tally of the # of futile wakeups.
       
  3173         // Note that the counter is not protected by a lock or updated by atomics.
       
  3174         // That is by design - we trade "lossy" counters which are exposed to
       
  3175         // races during updates for a lower probe effect.
       
  3176         TEVENT (Inflated enter - Futile wakeup) ;
       
  3177         if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
       
  3178            ObjectSynchronizer::_sync_FutileWakeups->inc() ;
       
  3179         }
       
  3180         ++ nWakeups ;
       
  3181 
       
  3182         // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
       
  3183         // We can defer clearing _succ until after the spin completes
       
  3184         // TrySpin() must tolerate being called with _succ == Self.
       
  3185         // Try yet another round of adaptive spinning.
       
  3186         if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
       
  3187 
       
  3188         // We can find that we were unpark()ed and redesignated _succ while
       
  3189         // we were spinning.  That's harmless.  If we iterate and call park(),
       
  3190         // park() will consume the event and return immediately and we'll
       
  3191         // just spin again.  This pattern can repeat, leaving _succ to simply
       
  3192         // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
       
  3193         // Alternately, we can sample fired() here, and if set, forgo spinning
       
  3194         // in the next iteration.
       
  3195 
       
  3196         if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
       
  3197            Self->_ParkEvent->reset() ;
       
  3198            OrderAccess::fence() ;
       
  3199         }
       
  3200         if (_succ == Self) _succ = NULL ;
       
  3201 
       
  3202         // Invariant: after clearing _succ a thread *must* retry _owner before parking.
       
  3203         OrderAccess::fence() ;
       
  3204     }
       
  3205 
       
  3206     // Egress :
       
  3207     // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
       
  3208     // Normally we'll find Self on the EntryList .
       
  3209     // From the perspective of the lock owner (this thread), the
       
  3210     // EntryList is stable and cxq is prepend-only.
       
  3211     // The head of cxq is volatile but the interior is stable.
       
  3212     // In addition, Self.TState is stable.
       
  3213 
       
  3214     assert (_owner == Self      , "invariant") ;
       
  3215     assert (object() != NULL    , "invariant") ;
       
  3216     // I'd like to write:
       
  3217     //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
  3218     // but as we're at a safepoint that's not safe.
       
  3219 
       
  3220     UnlinkAfterAcquire (Self, &node) ;
       
  3221     if (_succ == Self) _succ = NULL ;
       
  3222 
       
  3223     assert (_succ != Self, "invariant") ;
       
  3224     if (_Responsible == Self) {
       
  3225         _Responsible = NULL ;
       
  3226         // Dekker pivot-point.
       
  3227         // Consider OrderAccess::storeload() here
       
  3228 
       
  3229         // We may leave threads on cxq|EntryList without a designated
       
  3230         // "Responsible" thread.  This is benign.  When this thread subsequently
       
  3231         // exits the monitor it can "see" such preexisting "old" threads --
       
  3232         // threads that arrived on the cxq|EntryList before the fence, above --
       
  3233         // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
       
  3234         // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
       
  3235         // non-null and elect a new "Responsible" timer thread.
       
  3236         //
       
  3237         // This thread executes:
       
  3238         //    ST Responsible=null; MEMBAR    (in enter epilog - here)
       
  3239         //    LD cxq|EntryList               (in subsequent exit)
       
  3240         //
       
  3241         // Entering threads in the slow/contended path execute:
       
  3242         //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
       
  3243         //    The (ST cxq; MEMBAR) is accomplished with CAS().
       
  3244         //
       
  3245         // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
       
  3246         // exit operation from floating above the ST Responsible=null.
       
  3247         //
       
  3248         // In *practice* however, EnterI() is always followed by some atomic
       
  3249         // operation such as the decrement of _count in ::enter().  Those atomics
       
  3250         // obviate the need for the explicit MEMBAR, above.
       
  3251     }
       
  3252 
       
  3253     // We've acquired ownership with CAS().
       
  3254     // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
       
  3255     // But since the CAS() this thread may have also stored into _succ,
       
  3256     // EntryList, cxq or Responsible.  These meta-data updates must be
       
  3257     // visible __before this thread subsequently drops the lock.
       
  3258     // Consider what could occur if we didn't enforce this constraint --
       
  3259     // STs to monitor meta-data and user-data could reorder with (become
       
  3260     // visible after) the ST in exit that drops ownership of the lock.
       
  3261     // Some other thread could then acquire the lock, but observe inconsistent
       
  3262     // or old monitor meta-data and heap data.  That violates the JMM.
       
  3263     // To that end, the 1-0 exit() operation must have at least STST|LDST
       
  3264     // "release" barrier semantics.  Specifically, there must be at least a
       
  3265     // STST|LDST barrier in exit() before the ST of null into _owner that drops
       
  3266     // the lock.   The barrier ensures that changes to monitor meta-data and data
       
  3267     // protected by the lock will be visible before we release the lock, and
       
  3268     // therefore before some other thread (CPU) has a chance to acquire the lock.
       
  3269     // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
       
  3270     //
       
  3271     // Critically, any prior STs to _succ or EntryList must be visible before
       
  3272     // the ST of null into _owner in the *subsequent* (following) corresponding
       
  3273     // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
       
  3274     // execute a serializing instruction.
       
  3275 
       
  3276     if (SyncFlags & 8) {
       
  3277        OrderAccess::fence() ;
       
  3278     }
       
  3279     return ;
       
  3280 }
       
  3281 
       
  3282 // ExitSuspendEquivalent:
       
  3283 // A faster alternate to handle_special_suspend_equivalent_condition()
       
  3284 //
       
  3285 // handle_special_suspend_equivalent_condition() unconditionally
       
  3286 // acquires the SR_lock.  On some platforms uncontended MutexLocker()
       
  3287 // operations have high latency.  Note that in ::enter() we call HSSEC
       
  3288 // while holding the monitor, so we effectively lengthen the critical sections.
       
  3289 //
       
  3290 // There are a number of possible solutions:
       
  3291 //
       
  3292 // A.  To ameliorate the problem we might also defer state transitions
       
  3293 //     to as late as possible -- just prior to parking.
       
  3294 //     Given that, we'd call HSSEC after having returned from park(),
       
  3295 //     but before attempting to acquire the monitor.  This is only a
       
  3296 //     partial solution.  It avoids calling HSSEC while holding the
       
  3297 //     monitor (good), but it still increases successor reacquisition latency --
       
  3298 //     the interval between unparking a successor and the time the successor
       
  3299 //     resumes and retries the lock.  See ReenterI(), which defers state transitions.
       
  3300 //     If we use this technique we can also avoid EnterI()-exit() loop
       
  3301 //     in ::enter() where we iteratively drop the lock and then attempt
       
  3302 //     to reacquire it after suspending.
       
  3303 //
       
  3304 // B.  In the future we might fold all the suspend bits into a
       
  3305 //     composite per-thread suspend flag and then update it with CAS().
       
  3306 //     Alternately, a Dekker-like mechanism with multiple variables
       
  3307 //     would suffice:
       
  3308 //       ST Self->_suspend_equivalent = false
       
  3309 //       MEMBAR
       
  3310 //       LD Self_>_suspend_flags
       
  3311 //
       
  3312 
       
  3313 
       
  3314 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
       
  3315    int Mode = Knob_FastHSSEC ;
       
  3316    if (Mode && !jSelf->is_external_suspend()) {
       
  3317       assert (jSelf->is_suspend_equivalent(), "invariant") ;
       
  3318       jSelf->clear_suspend_equivalent() ;
       
  3319       if (2 == Mode) OrderAccess::storeload() ;
       
  3320       if (!jSelf->is_external_suspend()) return false ;
       
  3321       // We raced a suspension -- fall thru into the slow path
       
  3322       TEVENT (ExitSuspendEquivalent - raced) ;
       
  3323       jSelf->set_suspend_equivalent() ;
       
  3324    }
       
  3325    return jSelf->handle_special_suspend_equivalent_condition() ;
       
  3326 }
       
  3327 
       
  3328 
       
  3329 // ReenterI() is a specialized inline form of the latter half of the
       
  3330 // contended slow-path from EnterI().  We use ReenterI() only for
       
  3331 // monitor reentry in wait().
       
  3332 //
       
  3333 // In the future we should reconcile EnterI() and ReenterI(), adding
       
  3334 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
       
  3335 // loop accordingly.
       
  3336 
       
  3337 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
       
  3338     assert (Self != NULL                , "invariant") ;
       
  3339     assert (SelfNode != NULL            , "invariant") ;
       
  3340     assert (SelfNode->_thread == Self   , "invariant") ;
       
  3341     assert (_waiters > 0                , "invariant") ;
       
  3342     assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
       
  3343     assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
       
  3344     JavaThread * jt = (JavaThread *) Self ;
       
  3345 
       
  3346     int nWakeups = 0 ;
       
  3347     for (;;) {
       
  3348         ObjectWaiter::TStates v = SelfNode->TState ;
       
  3349         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
       
  3350         assert    (_owner != Self, "invariant") ;
       
  3351 
       
  3352         if (TryLock (Self) > 0) break ;
       
  3353         if (TrySpin (Self) > 0) break ;
       
  3354 
       
  3355         TEVENT (Wait Reentry - parking) ;
       
  3356 
       
  3357         // State transition wrappers around park() ...
       
  3358         // ReenterI() wisely defers state transitions until
       
  3359         // it's clear we must park the thread.
       
  3360         {
       
  3361            OSThreadContendState osts(Self->osthread());
       
  3362            ThreadBlockInVM tbivm(jt);
       
  3363 
       
  3364            // cleared by handle_special_suspend_equivalent_condition()
       
  3365            // or java_suspend_self()
       
  3366            jt->set_suspend_equivalent();
       
  3367            if (SyncFlags & 1) {
       
  3368               Self->_ParkEvent->park ((jlong)1000) ;
       
  3369            } else {
       
  3370               Self->_ParkEvent->park () ;
       
  3371            }
       
  3372 
       
  3373            // were we externally suspended while we were waiting?
       
  3374            for (;;) {
       
  3375               if (!ExitSuspendEquivalent (jt)) break ;
       
  3376               if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
       
  3377               jt->java_suspend_self();
       
  3378               jt->set_suspend_equivalent();
       
  3379            }
       
  3380         }
       
  3381 
       
  3382         // Try again, but just so we distinguish between futile wakeups and
       
  3383         // successful wakeups.  The following test isn't algorithmically
       
  3384         // necessary, but it helps us maintain sensible statistics.
       
  3385         if (TryLock(Self) > 0) break ;
       
  3386 
       
  3387         // The lock is still contested.
       
  3388         // Keep a tally of the # of futile wakeups.
       
  3389         // Note that the counter is not protected by a lock or updated by atomics.
       
  3390         // That is by design - we trade "lossy" counters which are exposed to
       
  3391         // races during updates for a lower probe effect.
       
  3392         TEVENT (Wait Reentry - futile wakeup) ;
       
  3393         ++ nWakeups ;
       
  3394 
       
  3395         // Assuming this is not a spurious wakeup we'll normally
       
  3396         // find that _succ == Self.
       
  3397         if (_succ == Self) _succ = NULL ;
       
  3398 
       
  3399         // Invariant: after clearing _succ a contending thread
       
  3400         // *must* retry  _owner before parking.
       
  3401         OrderAccess::fence() ;
       
  3402 
       
  3403         if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
       
  3404           ObjectSynchronizer::_sync_FutileWakeups->inc() ;
       
  3405         }
       
  3406     }
       
  3407 
       
  3408     // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
       
  3409     // Normally we'll find Self on the EntryList.
       
  3410     // Unlinking from the EntryList is constant-time and atomic-free.
       
  3411     // From the perspective of the lock owner (this thread), the
       
  3412     // EntryList is stable and cxq is prepend-only.
       
  3413     // The head of cxq is volatile but the interior is stable.
       
  3414     // In addition, Self.TState is stable.
       
  3415 
       
  3416     assert (_owner == Self, "invariant") ;
       
  3417     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
  3418     UnlinkAfterAcquire (Self, SelfNode) ;
       
  3419     if (_succ == Self) _succ = NULL ;
       
  3420     assert (_succ != Self, "invariant") ;
       
  3421     SelfNode->TState = ObjectWaiter::TS_RUN ;
       
  3422     OrderAccess::fence() ;      // see comments at the end of EnterI()
       
  3423 }
       
  3424 
       
  3425 bool ObjectMonitor::try_enter(Thread* THREAD) {
       
  3426   if (THREAD != _owner) {
       
  3427     if (THREAD->is_lock_owned ((address)_owner)) {
       
  3428        assert(_recursions == 0, "internal state error");
       
  3429        _owner = THREAD ;
       
  3430        _recursions = 1 ;
       
  3431        OwnerIsThread = 1 ;
       
  3432        return true;
       
  3433     }
       
  3434     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       
  3435       return false;
       
  3436     }
       
  3437     return true;
       
  3438   } else {
       
  3439     _recursions++;
       
  3440     return true;
       
  3441   }
       
  3442 }
       
  3443 
       
  3444 void ATTR ObjectMonitor::enter(TRAPS) {
       
  3445   // The following code is ordered to check the most common cases first
       
  3446   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
       
  3447   Thread * const Self = THREAD ;
       
  3448   void * cur ;
       
  3449 
       
  3450   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
       
  3451   if (cur == NULL) {
       
  3452      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
       
  3453      assert (_recursions == 0   , "invariant") ;
       
  3454      assert (_owner      == Self, "invariant") ;
       
  3455      // CONSIDER: set or assert OwnerIsThread == 1
       
  3456      return ;
       
  3457   }
       
  3458 
       
  3459   if (cur == Self) {
       
  3460      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
       
  3461      _recursions ++ ;
       
  3462      return ;
       
  3463   }
       
  3464 
       
  3465   if (Self->is_lock_owned ((address)cur)) {
       
  3466     assert (_recursions == 0, "internal state error");
       
  3467     _recursions = 1 ;
       
  3468     // Commute owner from a thread-specific on-stack BasicLockObject address to
       
  3469     // a full-fledged "Thread *".
       
  3470     _owner = Self ;
       
  3471     OwnerIsThread = 1 ;
       
  3472     return ;
       
  3473   }
       
  3474 
       
  3475   // We've encountered genuine contention.
       
  3476   assert (Self->_Stalled == 0, "invariant") ;
       
  3477   Self->_Stalled = intptr_t(this) ;
       
  3478 
       
  3479   // Try one round of spinning *before* enqueueing Self
       
  3480   // and before going through the awkward and expensive state
       
  3481   // transitions.  The following spin is strictly optional ...
       
  3482   // Note that if we acquire the monitor from an initial spin
       
  3483   // we forgo posting JVMTI events and firing DTRACE probes.
       
  3484   if (Knob_SpinEarly && TrySpin (Self) > 0) {
       
  3485      assert (_owner == Self      , "invariant") ;
       
  3486      assert (_recursions == 0    , "invariant") ;
       
  3487      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
  3488      Self->_Stalled = 0 ;
       
  3489      return ;
       
  3490   }
       
  3491 
       
  3492   assert (_owner != Self          , "invariant") ;
       
  3493   assert (_succ  != Self          , "invariant") ;
       
  3494   assert (Self->is_Java_thread()  , "invariant") ;
       
  3495   JavaThread * jt = (JavaThread *) Self ;
       
  3496   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
       
  3497   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
       
  3498   assert (this->object() != NULL  , "invariant") ;
       
  3499   assert (_count >= 0, "invariant") ;
       
  3500 
       
  3501   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
       
  3502   // Ensure the object-monitor relationship remains stable while there's contention.
       
  3503   Atomic::inc_ptr(&_count);
       
  3504 
       
  3505   { // Change java thread status to indicate blocked on monitor enter.
       
  3506     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
       
  3507 
       
  3508     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
       
  3509     if (JvmtiExport::should_post_monitor_contended_enter()) {
       
  3510       JvmtiExport::post_monitor_contended_enter(jt, this);
       
  3511     }
       
  3512 
       
  3513     OSThreadContendState osts(Self->osthread());
       
  3514     ThreadBlockInVM tbivm(jt);
       
  3515 
       
  3516     Self->set_current_pending_monitor(this);
       
  3517 
       
  3518     // TODO-FIXME: change the following for(;;) loop to straight-line code.
       
  3519     for (;;) {
       
  3520       jt->set_suspend_equivalent();
       
  3521       // cleared by handle_special_suspend_equivalent_condition()
       
  3522       // or java_suspend_self()
       
  3523 
       
  3524       EnterI (THREAD) ;
       
  3525 
       
  3526       if (!ExitSuspendEquivalent(jt)) break ;
       
  3527 
       
  3528       //
       
  3529       // We have acquired the contended monitor, but while we were
       
  3530       // waiting another thread suspended us. We don't want to enter
       
  3531       // the monitor while suspended because that would surprise the
       
  3532       // thread that suspended us.
       
  3533       //
       
  3534           _recursions = 0 ;
       
  3535       _succ = NULL ;
       
  3536       exit (Self) ;
       
  3537 
       
  3538       jt->java_suspend_self();
       
  3539     }
       
  3540     Self->set_current_pending_monitor(NULL);
       
  3541   }
       
  3542 
       
  3543   Atomic::dec_ptr(&_count);
       
  3544   assert (_count >= 0, "invariant") ;
       
  3545   Self->_Stalled = 0 ;
       
  3546 
       
  3547   // Must either set _recursions = 0 or ASSERT _recursions == 0.
       
  3548   assert (_recursions == 0     , "invariant") ;
       
  3549   assert (_owner == Self       , "invariant") ;
       
  3550   assert (_succ  != Self       , "invariant") ;
       
  3551   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
  3552 
       
  3553   // The thread -- now the owner -- is back in vm mode.
       
  3554   // Report the glorious news via TI,DTrace and jvmstat.
       
  3555   // The probe effect is non-trivial.  All the reportage occurs
       
  3556   // while we hold the monitor, increasing the length of the critical
       
  3557   // section.  Amdahl's parallel speedup law comes vividly into play.
       
  3558   //
       
  3559   // Another option might be to aggregate the events (thread local or
       
  3560   // per-monitor aggregation) and defer reporting until a more opportune
       
  3561   // time -- such as next time some thread encounters contention but has
       
  3562   // yet to acquire the lock.  While spinning that thread could
       
  3563   // spinning we could increment JVMStat counters, etc.
       
  3564 
       
  3565   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
       
  3566   if (JvmtiExport::should_post_monitor_contended_entered()) {
       
  3567     JvmtiExport::post_monitor_contended_entered(jt, this);
       
  3568   }
       
  3569   if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
       
  3570      ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
       
  3571   }
       
  3572 }
       
  3573 
       
  3574 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
       
  3575    assert (_owner == Self, "invariant") ;
       
  3576 
       
  3577    // Exit protocol:
       
  3578    // 1. ST _succ = wakee
       
  3579    // 2. membar #loadstore|#storestore;
       
  3580    // 2. ST _owner = NULL
       
  3581    // 3. unpark(wakee)
       
  3582 
       
  3583    _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
       
  3584    ParkEvent * Trigger = Wakee->_event ;
       
  3585 
       
  3586    // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
       
  3587    // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
       
  3588    // out-of-scope (non-extant).
       
  3589    Wakee  = NULL ;
       
  3590 
       
  3591    // Drop the lock
       
  3592    OrderAccess::release_store_ptr (&_owner, NULL) ;
       
  3593    OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
       
  3594 
       
  3595    // TODO-FIXME:
       
  3596    // If there's a safepoint pending the best policy would be to
       
  3597    // get _this thread to a safepoint and only wake the successor
       
  3598    // after the safepoint completed.  monitorexit uses a "leaf"
       
  3599    // state transition, however, so this thread can't become
       
  3600    // safe at this point in time.  (Its stack isn't walkable).
       
  3601    // The next best thing is to defer waking the successor by
       
  3602    // adding to a list of thread to be unparked after at the
       
  3603    // end of the forthcoming STW).
       
  3604    if (SafepointSynchronize::do_call_back()) {
       
  3605       TEVENT (unpark before SAFEPOINT) ;
       
  3606    }
       
  3607 
       
  3608    // Possible optimizations ...
       
  3609    //
       
  3610    // * Consider: set Wakee->UnparkTime = timeNow()
       
  3611    //   When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
       
  3612    //   By measuring recent ONPROC latency we can approximate the
       
  3613    //   system load.  In turn, we can feed that information back
       
  3614    //   into the spinning & succession policies.
       
  3615    //   (ONPROC latency correlates strongly with load).
       
  3616    //
       
  3617    // * Pull affinity:
       
  3618    //   If the wakee is cold then transiently setting it's affinity
       
  3619    //   to the current CPU is a good idea.
       
  3620    //   See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
       
  3621    DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
       
  3622    Trigger->unpark() ;
       
  3623 
       
  3624    // Maintain stats and report events to JVMTI
       
  3625    if (ObjectSynchronizer::_sync_Parks != NULL) {
       
  3626       ObjectSynchronizer::_sync_Parks->inc() ;
       
  3627    }
       
  3628 }
       
  3629 
       
  3630 
       
  3631 // exit()
       
  3632 // ~~~~~~
       
  3633 // Note that the collector can't reclaim the objectMonitor or deflate
       
  3634 // the object out from underneath the thread calling ::exit() as the
       
  3635 // thread calling ::exit() never transitions to a stable state.
       
  3636 // This inhibits GC, which in turn inhibits asynchronous (and
       
  3637 // inopportune) reclamation of "this".
       
  3638 //
       
  3639 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
       
  3640 // There's one exception to the claim above, however.  EnterI() can call
       
  3641 // exit() to drop a lock if the acquirer has been externally suspended.
       
  3642 // In that case exit() is called with _thread_state as _thread_blocked,
       
  3643 // but the monitor's _count field is > 0, which inhibits reclamation.
       
  3644 //
       
  3645 // 1-0 exit
       
  3646 // ~~~~~~~~
       
  3647 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
       
  3648 // the fast-path operators have been optimized so the common ::exit()
       
  3649 // operation is 1-0.  See i486.ad fast_unlock(), for instance.
       
  3650 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
       
  3651 // greatly improves latency -- MEMBAR and CAS having considerable local
       
  3652 // latency on modern processors -- but at the cost of "stranding".  Absent the
       
  3653 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
       
  3654 // ::enter() path, resulting in the entering thread being stranding
       
  3655 // and a progress-liveness failure.   Stranding is extremely rare.
       
  3656 // We use timers (timed park operations) & periodic polling to detect
       
  3657 // and recover from stranding.  Potentially stranded threads periodically
       
  3658 // wake up and poll the lock.  See the usage of the _Responsible variable.
       
  3659 //
       
  3660 // The CAS() in enter provides for safety and exclusion, while the CAS or
       
  3661 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
       
  3662 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
       
  3663 // We detect and recover from stranding with timers.
       
  3664 //
       
  3665 // If a thread transiently strands it'll park until (a) another
       
  3666 // thread acquires the lock and then drops the lock, at which time the
       
  3667 // exiting thread will notice and unpark the stranded thread, or, (b)
       
  3668 // the timer expires.  If the lock is high traffic then the stranding latency
       
  3669 // will be low due to (a).  If the lock is low traffic then the odds of
       
  3670 // stranding are lower, although the worst-case stranding latency
       
  3671 // is longer.  Critically, we don't want to put excessive load in the
       
  3672 // platform's timer subsystem.  We want to minimize both the timer injection
       
  3673 // rate (timers created/sec) as well as the number of timers active at
       
  3674 // any one time.  (more precisely, we want to minimize timer-seconds, which is
       
  3675 // the integral of the # of active timers at any instant over time).
       
  3676 // Both impinge on OS scalability.  Given that, at most one thread parked on
       
  3677 // a monitor will use a timer.
       
  3678 
       
  3679 void ATTR ObjectMonitor::exit(TRAPS) {
       
  3680    Thread * Self = THREAD ;
       
  3681    if (THREAD != _owner) {
       
  3682      if (THREAD->is_lock_owned((address) _owner)) {
       
  3683        // Transmute _owner from a BasicLock pointer to a Thread address.
       
  3684        // We don't need to hold _mutex for this transition.
       
  3685        // Non-null to Non-null is safe as long as all readers can
       
  3686        // tolerate either flavor.
       
  3687        assert (_recursions == 0, "invariant") ;
       
  3688        _owner = THREAD ;
       
  3689        _recursions = 0 ;
       
  3690        OwnerIsThread = 1 ;
       
  3691      } else {
       
  3692        // NOTE: we need to handle unbalanced monitor enter/exit
       
  3693        // in native code by throwing an exception.
       
  3694        // TODO: Throw an IllegalMonitorStateException ?
       
  3695        TEVENT (Exit - Throw IMSX) ;
       
  3696        assert(false, "Non-balanced monitor enter/exit!");
       
  3697        if (false) {
       
  3698           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
       
  3699        }
       
  3700        return;
       
  3701      }
       
  3702    }
       
  3703 
       
  3704    if (_recursions != 0) {
       
  3705      _recursions--;        // this is simple recursive enter
       
  3706      TEVENT (Inflated exit - recursive) ;
       
  3707      return ;
       
  3708    }
       
  3709 
       
  3710    // Invariant: after setting Responsible=null an thread must execute
       
  3711    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
       
  3712    if ((SyncFlags & 4) == 0) {
       
  3713       _Responsible = NULL ;
       
  3714    }
       
  3715 
       
  3716    for (;;) {
       
  3717       assert (THREAD == _owner, "invariant") ;
       
  3718 
       
  3719       // Fast-path monitor exit:
       
  3720       //
       
  3721       // Observe the Dekker/Lamport duality:
       
  3722       // A thread in ::exit() executes:
       
  3723       //   ST Owner=null; MEMBAR; LD EntryList|cxq.
       
  3724       // A thread in the contended ::enter() path executes the complementary:
       
  3725       //   ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
       
  3726       //
       
  3727       // Note that there's a benign race in the exit path.  We can drop the
       
  3728       // lock, another thread can reacquire the lock immediately, and we can
       
  3729       // then wake a thread unnecessarily (yet another flavor of futile wakeup).
       
  3730       // This is benign, and we've structured the code so the windows are short
       
  3731       // and the frequency of such futile wakeups is low.
       
  3732       //
       
  3733       // We could eliminate the race by encoding both the "LOCKED" state and
       
  3734       // the queue head in a single word.  Exit would then use either CAS to
       
  3735       // clear the LOCKED bit/byte.  This precludes the desirable 1-0 optimization,
       
  3736       // however.
       
  3737       //
       
  3738       // Possible fast-path ::exit() optimization:
       
  3739       // The current fast-path exit implementation fetches both cxq and EntryList.
       
  3740       // See also i486.ad fast_unlock().  Testing has shown that two LDs
       
  3741       // isn't measurably slower than a single LD on any platforms.
       
  3742       // Still, we could reduce the 2 LDs to one or zero by one of the following:
       
  3743       //
       
  3744       // - Use _count instead of cxq|EntryList
       
  3745       //   We intend to eliminate _count, however, when we switch
       
  3746       //   to on-the-fly deflation in ::exit() as is used in
       
  3747       //   Metalocks and RelaxedLocks.
       
  3748       //
       
  3749       // - Establish the invariant that cxq == null implies EntryList == null.
       
  3750       //   set cxq == EMPTY (1) to encode the state where cxq is empty
       
  3751       //   by EntryList != null.  EMPTY is a distinguished value.
       
  3752       //   The fast-path exit() would fetch cxq but not EntryList.
       
  3753       //
       
  3754       // - Encode succ as follows:
       
  3755       //   succ = t :  Thread t is the successor -- t is ready or is spinning.
       
  3756       //               Exiting thread does not need to wake a successor.
       
  3757       //   succ = 0 :  No successor required -> (EntryList|cxq) == null
       
  3758       //               Exiting thread does not need to wake a successor
       
  3759       //   succ = 1 :  Successor required    -> (EntryList|cxq) != null and
       
  3760       //               logically succ == null.
       
  3761       //               Exiting thread must wake a successor.
       
  3762       //
       
  3763       //   The 1-1 fast-exit path would appear as :
       
  3764       //     _owner = null ; membar ;
       
  3765       //     if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
       
  3766       //     goto FastPathDone ;
       
  3767       //
       
  3768       //   and the 1-0 fast-exit path would appear as:
       
  3769       //      if (_succ == 1) goto SlowPath
       
  3770       //      Owner = null ;
       
  3771       //      goto FastPathDone
       
  3772       //
       
  3773       // - Encode the LSB of _owner as 1 to indicate that exit()
       
  3774       //   must use the slow-path and make a successor ready.
       
  3775       //   (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
       
  3776       //   (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
       
  3777       //   The 1-0 fast exit path would read:
       
  3778       //      if (_owner != Self) goto SlowPath
       
  3779       //      _owner = null
       
  3780       //      goto FastPathDone
       
  3781 
       
  3782       if (Knob_ExitPolicy == 0) {
       
  3783          // release semantics: prior loads and stores from within the critical section
       
  3784          // must not float (reorder) past the following store that drops the lock.
       
  3785          // On SPARC that requires MEMBAR #loadstore|#storestore.
       
  3786          // But of course in TSO #loadstore|#storestore is not required.
       
  3787          // I'd like to write one of the following:
       
  3788          // A.  OrderAccess::release() ; _owner = NULL
       
  3789          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
       
  3790          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
       
  3791          // store into a _dummy variable.  That store is not needed, but can result
       
  3792          // in massive wasteful coherency traffic on classic SMP systems.
       
  3793          // Instead, I use release_store(), which is implemented as just a simple
       
  3794          // ST on x64, x86 and SPARC.
       
  3795          OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
       
  3796          OrderAccess::storeload() ;                         // See if we need to wake a successor
       
  3797          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
       
  3798             TEVENT (Inflated exit - simple egress) ;
       
  3799             return ;
       
  3800          }
       
  3801          TEVENT (Inflated exit - complex egress) ;
       
  3802 
       
  3803          // Normally the exiting thread is responsible for ensuring succession,
       
  3804          // but if other successors are ready or other entering threads are spinning
       
  3805          // then this thread can simply store NULL into _owner and exit without
       
  3806          // waking a successor.  The existence of spinners or ready successors
       
  3807          // guarantees proper succession (liveness).  Responsibility passes to the
       
  3808          // ready or running successors.  The exiting thread delegates the duty.
       
  3809          // More precisely, if a successor already exists this thread is absolved
       
  3810          // of the responsibility of waking (unparking) one.
       
  3811          //
       
  3812          // The _succ variable is critical to reducing futile wakeup frequency.
       
  3813          // _succ identifies the "heir presumptive" thread that has been made
       
  3814          // ready (unparked) but that has not yet run.  We need only one such
       
  3815          // successor thread to guarantee progress.
       
  3816          // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
       
  3817          // section 3.3 "Futile Wakeup Throttling" for details.
       
  3818          //
       
  3819          // Note that spinners in Enter() also set _succ non-null.
       
  3820          // In the current implementation spinners opportunistically set
       
  3821          // _succ so that exiting threads might avoid waking a successor.
       
  3822          // Another less appealing alternative would be for the exiting thread
       
  3823          // to drop the lock and then spin briefly to see if a spinner managed
       
  3824          // to acquire the lock.  If so, the exiting thread could exit
       
  3825          // immediately without waking a successor, otherwise the exiting
       
  3826          // thread would need to dequeue and wake a successor.
       
  3827          // (Note that we'd need to make the post-drop spin short, but no
       
  3828          // shorter than the worst-case round-trip cache-line migration time.
       
  3829          // The dropped lock needs to become visible to the spinner, and then
       
  3830          // the acquisition of the lock by the spinner must become visible to
       
  3831          // the exiting thread).
       
  3832          //
       
  3833 
       
  3834          // It appears that an heir-presumptive (successor) must be made ready.
       
  3835          // Only the current lock owner can manipulate the EntryList or
       
  3836          // drain _cxq, so we need to reacquire the lock.  If we fail
       
  3837          // to reacquire the lock the responsibility for ensuring succession
       
  3838          // falls to the new owner.
       
  3839          //
       
  3840          if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       
  3841             return ;
       
  3842          }
       
  3843          TEVENT (Exit - Reacquired) ;
       
  3844       } else {
       
  3845          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
       
  3846             OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
       
  3847             OrderAccess::storeload() ;
       
  3848             // Ratify the previously observed values.
       
  3849             if (_cxq == NULL || _succ != NULL) {
       
  3850                 TEVENT (Inflated exit - simple egress) ;
       
  3851                 return ;
       
  3852             }
       
  3853 
       
  3854             // inopportune interleaving -- the exiting thread (this thread)
       
  3855             // in the fast-exit path raced an entering thread in the slow-enter
       
  3856             // path.
       
  3857             // We have two choices:
       
  3858             // A.  Try to reacquire the lock.
       
  3859             //     If the CAS() fails return immediately, otherwise
       
  3860             //     we either restart/rerun the exit operation, or simply
       
  3861             //     fall-through into the code below which wakes a successor.
       
  3862             // B.  If the elements forming the EntryList|cxq are TSM
       
  3863             //     we could simply unpark() the lead thread and return
       
  3864             //     without having set _succ.
       
  3865             if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       
  3866                TEVENT (Inflated exit - reacquired succeeded) ;
       
  3867                return ;
       
  3868             }
       
  3869             TEVENT (Inflated exit - reacquired failed) ;
       
  3870          } else {
       
  3871             TEVENT (Inflated exit - complex egress) ;
       
  3872          }
       
  3873       }
       
  3874 
       
  3875       guarantee (_owner == THREAD, "invariant") ;
       
  3876 
       
  3877       // Select an appropriate successor ("heir presumptive") from the EntryList
       
  3878       // and make it ready.  Generally we just wake the head of EntryList .
       
  3879       // There's no algorithmic constraint that we use the head - it's just
       
  3880       // a policy decision.   Note that the thread at head of the EntryList
       
  3881       // remains at the head until it acquires the lock.  This means we'll
       
  3882       // repeatedly wake the same thread until it manages to grab the lock.
       
  3883       // This is generally a good policy - if we're seeing lots of futile wakeups
       
  3884       // at least we're waking/rewaking a thread that's like to be hot or warm
       
  3885       // (have residual D$ and TLB affinity).
       
  3886       //
       
  3887       // "Wakeup locality" optimization:
       
  3888       // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
       
  3889       // In the future we'll try to bias the selection mechanism
       
  3890       // to preferentially pick a thread that recently ran on
       
  3891       // a processor element that shares cache with the CPU on which
       
  3892       // the exiting thread is running.   We need access to Solaris'
       
  3893       // schedctl.sc_cpu to make that work.
       
  3894       //
       
  3895       ObjectWaiter * w = NULL ;
       
  3896       int QMode = Knob_QMode ;
       
  3897 
       
  3898       if (QMode == 2 && _cxq != NULL) {
       
  3899           // QMode == 2 : cxq has precedence over EntryList.
       
  3900           // Try to directly wake a successor from the cxq.
       
  3901           // If successful, the successor will need to unlink itself from cxq.
       
  3902           w = _cxq ;
       
  3903           assert (w != NULL, "invariant") ;
       
  3904           assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  3905           ExitEpilog (Self, w) ;
       
  3906           return ;
       
  3907       }
       
  3908 
       
  3909       if (QMode == 3 && _cxq != NULL) {
       
  3910           // Aggressively drain cxq into EntryList at the first opportunity.
       
  3911           // This policy ensure that recently-run threads live at the head of EntryList.
       
  3912           // Drain _cxq into EntryList - bulk transfer.
       
  3913           // First, detach _cxq.
       
  3914           // The following loop is tantamount to: w = swap (&cxq, NULL)
       
  3915           w = _cxq ;
       
  3916           for (;;) {
       
  3917              assert (w != NULL, "Invariant") ;
       
  3918              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
       
  3919              if (u == w) break ;
       
  3920              w = u ;
       
  3921           }
       
  3922           assert (w != NULL              , "invariant") ;
       
  3923 
       
  3924           ObjectWaiter * q = NULL ;
       
  3925           ObjectWaiter * p ;
       
  3926           for (p = w ; p != NULL ; p = p->_next) {
       
  3927               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  3928               p->TState = ObjectWaiter::TS_ENTER ;
       
  3929               p->_prev = q ;
       
  3930               q = p ;
       
  3931           }
       
  3932 
       
  3933           // Append the RATs to the EntryList
       
  3934           // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
       
  3935           ObjectWaiter * Tail ;
       
  3936           for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
       
  3937           if (Tail == NULL) {
       
  3938               _EntryList = w ;
       
  3939           } else {
       
  3940               Tail->_next = w ;
       
  3941               w->_prev = Tail ;
       
  3942           }
       
  3943 
       
  3944           // Fall thru into code that tries to wake a successor from EntryList
       
  3945       }
       
  3946 
       
  3947       if (QMode == 4 && _cxq != NULL) {
       
  3948           // Aggressively drain cxq into EntryList at the first opportunity.
       
  3949           // This policy ensure that recently-run threads live at the head of EntryList.
       
  3950 
       
  3951           // Drain _cxq into EntryList - bulk transfer.
       
  3952           // First, detach _cxq.
       
  3953           // The following loop is tantamount to: w = swap (&cxq, NULL)
       
  3954           w = _cxq ;
       
  3955           for (;;) {
       
  3956              assert (w != NULL, "Invariant") ;
       
  3957              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
       
  3958              if (u == w) break ;
       
  3959              w = u ;
       
  3960           }
       
  3961           assert (w != NULL              , "invariant") ;
       
  3962 
       
  3963           ObjectWaiter * q = NULL ;
       
  3964           ObjectWaiter * p ;
       
  3965           for (p = w ; p != NULL ; p = p->_next) {
       
  3966               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  3967               p->TState = ObjectWaiter::TS_ENTER ;
       
  3968               p->_prev = q ;
       
  3969               q = p ;
       
  3970           }
       
  3971 
       
  3972           // Prepend the RATs to the EntryList
       
  3973           if (_EntryList != NULL) {
       
  3974               q->_next = _EntryList ;
       
  3975               _EntryList->_prev = q ;
       
  3976           }
       
  3977           _EntryList = w ;
       
  3978 
       
  3979           // Fall thru into code that tries to wake a successor from EntryList
       
  3980       }
       
  3981 
       
  3982       w = _EntryList  ;
       
  3983       if (w != NULL) {
       
  3984           // I'd like to write: guarantee (w->_thread != Self).
       
  3985           // But in practice an exiting thread may find itself on the EntryList.
       
  3986           // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
       
  3987           // then calls exit().  Exit release the lock by setting O._owner to NULL.
       
  3988           // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
       
  3989           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
       
  3990           // release the lock "O".  T2 resumes immediately after the ST of null into
       
  3991           // _owner, above.  T2 notices that the EntryList is populated, so it
       
  3992           // reacquires the lock and then finds itself on the EntryList.
       
  3993           // Given all that, we have to tolerate the circumstance where "w" is
       
  3994           // associated with Self.
       
  3995           assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  3996           ExitEpilog (Self, w) ;
       
  3997           return ;
       
  3998       }
       
  3999 
       
  4000       // If we find that both _cxq and EntryList are null then just
       
  4001       // re-run the exit protocol from the top.
       
  4002       w = _cxq ;
       
  4003       if (w == NULL) continue ;
       
  4004 
       
  4005       // Drain _cxq into EntryList - bulk transfer.
       
  4006       // First, detach _cxq.
       
  4007       // The following loop is tantamount to: w = swap (&cxq, NULL)
       
  4008       for (;;) {
       
  4009           assert (w != NULL, "Invariant") ;
       
  4010           ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
       
  4011           if (u == w) break ;
       
  4012           w = u ;
       
  4013       }
       
  4014       TEVENT (Inflated exit - drain cxq into EntryList) ;
       
  4015 
       
  4016       assert (w != NULL              , "invariant") ;
       
  4017       assert (_EntryList  == NULL    , "invariant") ;
       
  4018 
       
  4019       // Convert the LIFO SLL anchored by _cxq into a DLL.
       
  4020       // The list reorganization step operates in O(LENGTH(w)) time.
       
  4021       // It's critical that this step operate quickly as
       
  4022       // "Self" still holds the outer-lock, restricting parallelism
       
  4023       // and effectively lengthening the critical section.
       
  4024       // Invariant: s chases t chases u.
       
  4025       // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
       
  4026       // we have faster access to the tail.
       
  4027 
       
  4028       if (QMode == 1) {
       
  4029          // QMode == 1 : drain cxq to EntryList, reversing order
       
  4030          // We also reverse the order of the list.
       
  4031          ObjectWaiter * s = NULL ;
       
  4032          ObjectWaiter * t = w ;
       
  4033          ObjectWaiter * u = NULL ;
       
  4034          while (t != NULL) {
       
  4035              guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
       
  4036              t->TState = ObjectWaiter::TS_ENTER ;
       
  4037              u = t->_next ;
       
  4038              t->_prev = u ;
       
  4039              t->_next = s ;
       
  4040              s = t;
       
  4041              t = u ;
       
  4042          }
       
  4043          _EntryList  = s ;
       
  4044          assert (s != NULL, "invariant") ;
       
  4045       } else {
       
  4046          // QMode == 0 or QMode == 2
       
  4047          _EntryList = w ;
       
  4048          ObjectWaiter * q = NULL ;
       
  4049          ObjectWaiter * p ;
       
  4050          for (p = w ; p != NULL ; p = p->_next) {
       
  4051              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
       
  4052              p->TState = ObjectWaiter::TS_ENTER ;
       
  4053              p->_prev = q ;
       
  4054              q = p ;
       
  4055          }
       
  4056       }
       
  4057 
       
  4058       // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
       
  4059       // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
       
  4060 
       
  4061       // See if we can abdicate to a spinner instead of waking a thread.
       
  4062       // A primary goal of the implementation is to reduce the
       
  4063       // context-switch rate.
       
  4064       if (_succ != NULL) continue;
       
  4065 
       
  4066       w = _EntryList  ;
       
  4067       if (w != NULL) {
       
  4068           guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  4069           ExitEpilog (Self, w) ;
       
  4070           return ;
       
  4071       }
       
  4072    }
       
  4073 }
       
  4074 // complete_exit exits a lock returning recursion count
       
  4075 // complete_exit/reenter operate as a wait without waiting
       
  4076 // complete_exit requires an inflated monitor
       
  4077 // The _owner field is not always the Thread addr even with an
       
  4078 // inflated monitor, e.g. the monitor can be inflated by a non-owning
       
  4079 // thread due to contention.
       
  4080 intptr_t ObjectMonitor::complete_exit(TRAPS) {
       
  4081    Thread * const Self = THREAD;
       
  4082    assert(Self->is_Java_thread(), "Must be Java thread!");
       
  4083    JavaThread *jt = (JavaThread *)THREAD;
       
  4084 
       
  4085    DeferredInitialize();
       
  4086 
       
  4087    if (THREAD != _owner) {
       
  4088     if (THREAD->is_lock_owned ((address)_owner)) {
       
  4089        assert(_recursions == 0, "internal state error");
       
  4090        _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
       
  4091        _recursions = 0 ;
       
  4092        OwnerIsThread = 1 ;
       
  4093     }
       
  4094    }
       
  4095 
       
  4096    guarantee(Self == _owner, "complete_exit not owner");
       
  4097    intptr_t save = _recursions; // record the old recursion count
       
  4098    _recursions = 0;        // set the recursion level to be 0
       
  4099    exit (Self) ;           // exit the monitor
       
  4100    guarantee (_owner != Self, "invariant");
       
  4101    return save;
       
  4102 }
       
  4103 
       
  4104 // reenter() enters a lock and sets recursion count
       
  4105 // complete_exit/reenter operate as a wait without waiting
       
  4106 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
       
  4107    Thread * const Self = THREAD;
       
  4108    assert(Self->is_Java_thread(), "Must be Java thread!");
       
  4109    JavaThread *jt = (JavaThread *)THREAD;
       
  4110 
       
  4111    guarantee(_owner != Self, "reenter already owner");
       
  4112    enter (THREAD);       // enter the monitor
       
  4113    guarantee (_recursions == 0, "reenter recursion");
       
  4114    _recursions = recursions;
       
  4115    return;
       
  4116 }
       
  4117 
       
  4118 // Note: a subset of changes to ObjectMonitor::wait()
       
  4119 // will need to be replicated in complete_exit above
       
  4120 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
       
  4121    Thread * const Self = THREAD ;
       
  4122    assert(Self->is_Java_thread(), "Must be Java thread!");
       
  4123    JavaThread *jt = (JavaThread *)THREAD;
       
  4124 
       
  4125    DeferredInitialize () ;
       
  4126 
       
  4127    // Throw IMSX or IEX.
       
  4128    CHECK_OWNER();
       
  4129 
       
  4130    // check for a pending interrupt
       
  4131    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
       
  4132      // post monitor waited event.  Note that this is past-tense, we are done waiting.
       
  4133      if (JvmtiExport::should_post_monitor_waited()) {
       
  4134         // Note: 'false' parameter is passed here because the
       
  4135         // wait was not timed out due to thread interrupt.
       
  4136         JvmtiExport::post_monitor_waited(jt, this, false);
       
  4137      }
       
  4138      TEVENT (Wait - Throw IEX) ;
       
  4139      THROW(vmSymbols::java_lang_InterruptedException());
       
  4140      return ;
       
  4141    }
       
  4142    TEVENT (Wait) ;
       
  4143 
       
  4144    assert (Self->_Stalled == 0, "invariant") ;
       
  4145    Self->_Stalled = intptr_t(this) ;
       
  4146    jt->set_current_waiting_monitor(this);
       
  4147 
       
  4148    // create a node to be put into the queue
       
  4149    // Critically, after we reset() the event but prior to park(), we must check
       
  4150    // for a pending interrupt.
       
  4151    ObjectWaiter node(Self);
       
  4152    node.TState = ObjectWaiter::TS_WAIT ;
       
  4153    Self->_ParkEvent->reset() ;
       
  4154    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
       
  4155 
       
  4156    // Enter the waiting queue, which is a circular doubly linked list in this case
       
  4157    // but it could be a priority queue or any data structure.
       
  4158    // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
       
  4159    // by the the owner of the monitor *except* in the case where park()
       
  4160    // returns because of a timeout of interrupt.  Contention is exceptionally rare
       
  4161    // so we use a simple spin-lock instead of a heavier-weight blocking lock.
       
  4162 
       
  4163    Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
       
  4164    AddWaiter (&node) ;
       
  4165    Thread::SpinRelease (&_WaitSetLock) ;
       
  4166 
       
  4167    if ((SyncFlags & 4) == 0) {
       
  4168       _Responsible = NULL ;
       
  4169    }
       
  4170    intptr_t save = _recursions; // record the old recursion count
       
  4171    _waiters++;                  // increment the number of waiters
       
  4172    _recursions = 0;             // set the recursion level to be 1
       
  4173    exit (Self) ;                    // exit the monitor
       
  4174    guarantee (_owner != Self, "invariant") ;
       
  4175 
       
  4176    // As soon as the ObjectMonitor's ownership is dropped in the exit()
       
  4177    // call above, another thread can enter() the ObjectMonitor, do the
       
  4178    // notify(), and exit() the ObjectMonitor. If the other thread's
       
  4179    // exit() call chooses this thread as the successor and the unpark()
       
  4180    // call happens to occur while this thread is posting a
       
  4181    // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
       
  4182    // handler using RawMonitors and consuming the unpark().
       
  4183    //
       
  4184    // To avoid the problem, we re-post the event. This does no harm
       
  4185    // even if the original unpark() was not consumed because we are the
       
  4186    // chosen successor for this monitor.
       
  4187    if (node._notified != 0 && _succ == Self) {
       
  4188       node._event->unpark();
       
  4189    }
       
  4190 
       
  4191    // The thread is on the WaitSet list - now park() it.
       
  4192    // On MP systems it's conceivable that a brief spin before we park
       
  4193    // could be profitable.
       
  4194    //
       
  4195    // TODO-FIXME: change the following logic to a loop of the form
       
  4196    //   while (!timeout && !interrupted && _notified == 0) park()
       
  4197 
       
  4198    int ret = OS_OK ;
       
  4199    int WasNotified = 0 ;
       
  4200    { // State transition wrappers
       
  4201      OSThread* osthread = Self->osthread();
       
  4202      OSThreadWaitState osts(osthread, true);
       
  4203      {
       
  4204        ThreadBlockInVM tbivm(jt);
       
  4205        // Thread is in thread_blocked state and oop access is unsafe.
       
  4206        jt->set_suspend_equivalent();
       
  4207 
       
  4208        if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
       
  4209            // Intentionally empty
       
  4210        } else
       
  4211        if (node._notified == 0) {
       
  4212          if (millis <= 0) {
       
  4213             Self->_ParkEvent->park () ;
       
  4214          } else {
       
  4215             ret = Self->_ParkEvent->park (millis) ;
       
  4216          }
       
  4217        }
       
  4218 
       
  4219        // were we externally suspended while we were waiting?
       
  4220        if (ExitSuspendEquivalent (jt)) {
       
  4221           // TODO-FIXME: add -- if succ == Self then succ = null.
       
  4222           jt->java_suspend_self();
       
  4223        }
       
  4224 
       
  4225      } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
       
  4226 
       
  4227 
       
  4228      // Node may be on the WaitSet, the EntryList (or cxq), or in transition
       
  4229      // from the WaitSet to the EntryList.
       
  4230      // See if we need to remove Node from the WaitSet.
       
  4231      // We use double-checked locking to avoid grabbing _WaitSetLock
       
  4232      // if the thread is not on the wait queue.
       
  4233      //
       
  4234      // Note that we don't need a fence before the fetch of TState.
       
  4235      // In the worst case we'll fetch a old-stale value of TS_WAIT previously
       
  4236      // written by the is thread. (perhaps the fetch might even be satisfied
       
  4237      // by a look-aside into the processor's own store buffer, although given
       
  4238      // the length of the code path between the prior ST and this load that's
       
  4239      // highly unlikely).  If the following LD fetches a stale TS_WAIT value
       
  4240      // then we'll acquire the lock and then re-fetch a fresh TState value.
       
  4241      // That is, we fail toward safety.
       
  4242 
       
  4243      if (node.TState == ObjectWaiter::TS_WAIT) {
       
  4244          Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
       
  4245          if (node.TState == ObjectWaiter::TS_WAIT) {
       
  4246             DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
       
  4247             assert(node._notified == 0, "invariant");
       
  4248             node.TState = ObjectWaiter::TS_RUN ;
       
  4249          }
       
  4250          Thread::SpinRelease (&_WaitSetLock) ;
       
  4251      }
       
  4252 
       
  4253      // The thread is now either on off-list (TS_RUN),
       
  4254      // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
       
  4255      // The Node's TState variable is stable from the perspective of this thread.
       
  4256      // No other threads will asynchronously modify TState.
       
  4257      guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
       
  4258      OrderAccess::loadload() ;
       
  4259      if (_succ == Self) _succ = NULL ;
       
  4260      WasNotified = node._notified ;
       
  4261 
       
  4262      // Reentry phase -- reacquire the monitor.
       
  4263      // re-enter contended monitor after object.wait().
       
  4264      // retain OBJECT_WAIT state until re-enter successfully completes
       
  4265      // Thread state is thread_in_vm and oop access is again safe,
       
  4266      // although the raw address of the object may have changed.
       
  4267      // (Don't cache naked oops over safepoints, of course).
       
  4268 
       
  4269      // post monitor waited event. Note that this is past-tense, we are done waiting.
       
  4270      if (JvmtiExport::should_post_monitor_waited()) {
       
  4271        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
       
  4272      }
       
  4273      OrderAccess::fence() ;
       
  4274 
       
  4275      assert (Self->_Stalled != 0, "invariant") ;
       
  4276      Self->_Stalled = 0 ;
       
  4277 
       
  4278      assert (_owner != Self, "invariant") ;
       
  4279      ObjectWaiter::TStates v = node.TState ;
       
  4280      if (v == ObjectWaiter::TS_RUN) {
       
  4281          enter (Self) ;
       
  4282      } else {
       
  4283          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
       
  4284          ReenterI (Self, &node) ;
       
  4285          node.wait_reenter_end(this);
       
  4286      }
       
  4287 
       
  4288      // Self has reacquired the lock.
       
  4289      // Lifecycle - the node representing Self must not appear on any queues.
       
  4290      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
       
  4291      // want residual elements associated with this thread left on any lists.
       
  4292      guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
       
  4293      assert    (_owner == Self, "invariant") ;
       
  4294      assert    (_succ != Self , "invariant") ;
       
  4295    } // OSThreadWaitState()
       
  4296 
       
  4297    jt->set_current_waiting_monitor(NULL);
       
  4298 
       
  4299    guarantee (_recursions == 0, "invariant") ;
       
  4300    _recursions = save;     // restore the old recursion count
       
  4301    _waiters--;             // decrement the number of waiters
       
  4302 
       
  4303    // Verify a few postconditions
       
  4304    assert (_owner == Self       , "invariant") ;
       
  4305    assert (_succ  != Self       , "invariant") ;
       
  4306    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
       
  4307 
       
  4308    if (SyncFlags & 32) {
       
  4309       OrderAccess::fence() ;
       
  4310    }
       
  4311 
       
  4312    // check if the notification happened
       
  4313    if (!WasNotified) {
       
  4314      // no, it could be timeout or Thread.interrupt() or both
       
  4315      // check for interrupt event, otherwise it is timeout
       
  4316      if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
       
  4317        TEVENT (Wait - throw IEX from epilog) ;
       
  4318        THROW(vmSymbols::java_lang_InterruptedException());
       
  4319      }
       
  4320    }
       
  4321 
       
  4322    // NOTE: Spurious wake up will be consider as timeout.
       
  4323    // Monitor notify has precedence over thread interrupt.
       
  4324 }
       
  4325 
       
  4326 
       
  4327 // Consider:
       
  4328 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
       
  4329 // then instead of transferring a thread from the WaitSet to the EntryList
       
  4330 // we might just dequeue a thread from the WaitSet and directly unpark() it.
       
  4331 
       
  4332 void ObjectMonitor::notify(TRAPS) {
       
  4333   CHECK_OWNER();
       
  4334   if (_WaitSet == NULL) {
       
  4335      TEVENT (Empty-Notify) ;
       
  4336      return ;
       
  4337   }
       
  4338   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
       
  4339 
       
  4340   int Policy = Knob_MoveNotifyee ;
       
  4341 
       
  4342   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
       
  4343   ObjectWaiter * iterator = DequeueWaiter() ;
       
  4344   if (iterator != NULL) {
       
  4345      TEVENT (Notify1 - Transfer) ;
       
  4346      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
       
  4347      guarantee (iterator->_notified == 0, "invariant") ;
       
  4348      // Disposition - what might we do with iterator ?
       
  4349      // a.  add it directly to the EntryList - either tail or head.
       
  4350      // b.  push it onto the front of the _cxq.
       
  4351      // For now we use (a).
       
  4352      if (Policy != 4) {
       
  4353         iterator->TState = ObjectWaiter::TS_ENTER ;
       
  4354      }
       
  4355      iterator->_notified = 1 ;
       
  4356 
       
  4357      ObjectWaiter * List = _EntryList ;
       
  4358      if (List != NULL) {
       
  4359         assert (List->_prev == NULL, "invariant") ;
       
  4360         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  4361         assert (List != iterator, "invariant") ;
       
  4362      }
       
  4363 
       
  4364      if (Policy == 0) {       // prepend to EntryList
       
  4365          if (List == NULL) {
       
  4366              iterator->_next = iterator->_prev = NULL ;
       
  4367              _EntryList = iterator ;
       
  4368          } else {
       
  4369              List->_prev = iterator ;
       
  4370              iterator->_next = List ;
       
  4371              iterator->_prev = NULL ;
       
  4372              _EntryList = iterator ;
       
  4373         }
       
  4374      } else
       
  4375      if (Policy == 1) {      // append to EntryList
       
  4376          if (List == NULL) {
       
  4377              iterator->_next = iterator->_prev = NULL ;
       
  4378              _EntryList = iterator ;
       
  4379          } else {
       
  4380             // CONSIDER:  finding the tail currently requires a linear-time walk of
       
  4381             // the EntryList.  We can make tail access constant-time by converting to
       
  4382             // a CDLL instead of using our current DLL.
       
  4383             ObjectWaiter * Tail ;
       
  4384             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
       
  4385             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
       
  4386             Tail->_next = iterator ;
       
  4387             iterator->_prev = Tail ;
       
  4388             iterator->_next = NULL ;
       
  4389         }
       
  4390      } else
       
  4391      if (Policy == 2) {      // prepend to cxq
       
  4392          // prepend to cxq
       
  4393          if (List == NULL) {
       
  4394              iterator->_next = iterator->_prev = NULL ;
       
  4395              _EntryList = iterator ;
       
  4396          } else {
       
  4397             iterator->TState = ObjectWaiter::TS_CXQ ;
       
  4398             for (;;) {
       
  4399                 ObjectWaiter * Front = _cxq ;
       
  4400                 iterator->_next = Front ;
       
  4401                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
       
  4402                     break ;
       
  4403                 }
       
  4404             }
       
  4405          }
       
  4406      } else
       
  4407      if (Policy == 3) {      // append to cxq
       
  4408         iterator->TState = ObjectWaiter::TS_CXQ ;
       
  4409         for (;;) {
       
  4410             ObjectWaiter * Tail ;
       
  4411             Tail = _cxq ;
       
  4412             if (Tail == NULL) {
       
  4413                 iterator->_next = NULL ;
       
  4414                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
       
  4415                    break ;
       
  4416                 }
       
  4417             } else {
       
  4418                 while (Tail->_next != NULL) Tail = Tail->_next ;
       
  4419                 Tail->_next = iterator ;
       
  4420                 iterator->_prev = Tail ;
       
  4421                 iterator->_next = NULL ;
       
  4422                 break ;
       
  4423             }
       
  4424         }
       
  4425      } else {
       
  4426         ParkEvent * ev = iterator->_event ;
       
  4427         iterator->TState = ObjectWaiter::TS_RUN ;
       
  4428         OrderAccess::fence() ;
       
  4429         ev->unpark() ;
       
  4430      }
       
  4431 
       
  4432      if (Policy < 4) {
       
  4433        iterator->wait_reenter_begin(this);
       
  4434      }
       
  4435 
       
  4436      // _WaitSetLock protects the wait queue, not the EntryList.  We could
       
  4437      // move the add-to-EntryList operation, above, outside the critical section
       
  4438      // protected by _WaitSetLock.  In practice that's not useful.  With the
       
  4439      // exception of  wait() timeouts and interrupts the monitor owner
       
  4440      // is the only thread that grabs _WaitSetLock.  There's almost no contention
       
  4441      // on _WaitSetLock so it's not profitable to reduce the length of the
       
  4442      // critical section.
       
  4443   }
       
  4444 
       
  4445   Thread::SpinRelease (&_WaitSetLock) ;
       
  4446 
       
  4447   if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
       
  4448      ObjectSynchronizer::_sync_Notifications->inc() ;
       
  4449   }
       
  4450 }
       
  4451 
       
  4452 
       
  4453 void ObjectMonitor::notifyAll(TRAPS) {
       
  4454   CHECK_OWNER();
       
  4455   ObjectWaiter* iterator;
       
  4456   if (_WaitSet == NULL) {
       
  4457       TEVENT (Empty-NotifyAll) ;
       
  4458       return ;
       
  4459   }
       
  4460   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
       
  4461 
       
  4462   int Policy = Knob_MoveNotifyee ;
       
  4463   int Tally = 0 ;
       
  4464   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
       
  4465 
       
  4466   for (;;) {
       
  4467      iterator = DequeueWaiter () ;
       
  4468      if (iterator == NULL) break ;
       
  4469      TEVENT (NotifyAll - Transfer1) ;
       
  4470      ++Tally ;
       
  4471 
       
  4472      // Disposition - what might we do with iterator ?
       
  4473      // a.  add it directly to the EntryList - either tail or head.
       
  4474      // b.  push it onto the front of the _cxq.
       
  4475      // For now we use (a).
       
  4476      //
       
  4477      // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
       
  4478      // to the EntryList.  This could be done more efficiently with a single bulk transfer,
       
  4479      // but in practice it's not time-critical.  Beware too, that in prepend-mode we invert the
       
  4480      // order of the waiters.  Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
       
  4481      // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
       
  4482      // be "DCBAXYZ".
       
  4483 
       
  4484      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
       
  4485      guarantee (iterator->_notified == 0, "invariant") ;
       
  4486      iterator->_notified = 1 ;
       
  4487      if (Policy != 4) {
       
  4488         iterator->TState = ObjectWaiter::TS_ENTER ;
       
  4489      }
       
  4490 
       
  4491      ObjectWaiter * List = _EntryList ;
       
  4492      if (List != NULL) {
       
  4493         assert (List->_prev == NULL, "invariant") ;
       
  4494         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  4495         assert (List != iterator, "invariant") ;
       
  4496      }
       
  4497 
       
  4498      if (Policy == 0) {       // prepend to EntryList
       
  4499          if (List == NULL) {
       
  4500              iterator->_next = iterator->_prev = NULL ;
       
  4501              _EntryList = iterator ;
       
  4502          } else {
       
  4503              List->_prev = iterator ;
       
  4504              iterator->_next = List ;
       
  4505              iterator->_prev = NULL ;
       
  4506              _EntryList = iterator ;
       
  4507         }
       
  4508      } else
       
  4509      if (Policy == 1) {      // append to EntryList
       
  4510          if (List == NULL) {
       
  4511              iterator->_next = iterator->_prev = NULL ;
       
  4512              _EntryList = iterator ;
       
  4513          } else {
       
  4514             // CONSIDER:  finding the tail currently requires a linear-time walk of
       
  4515             // the EntryList.  We can make tail access constant-time by converting to
       
  4516             // a CDLL instead of using our current DLL.
       
  4517             ObjectWaiter * Tail ;
       
  4518             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
       
  4519             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
       
  4520             Tail->_next = iterator ;
       
  4521             iterator->_prev = Tail ;
       
  4522             iterator->_next = NULL ;
       
  4523         }
       
  4524      } else
       
  4525      if (Policy == 2) {      // prepend to cxq
       
  4526          // prepend to cxq
       
  4527          iterator->TState = ObjectWaiter::TS_CXQ ;
       
  4528          for (;;) {
       
  4529              ObjectWaiter * Front = _cxq ;
       
  4530              iterator->_next = Front ;
       
  4531              if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
       
  4532                  break ;
       
  4533              }
       
  4534          }
       
  4535      } else
       
  4536      if (Policy == 3) {      // append to cxq
       
  4537         iterator->TState = ObjectWaiter::TS_CXQ ;
       
  4538         for (;;) {
       
  4539             ObjectWaiter * Tail ;
       
  4540             Tail = _cxq ;
       
  4541             if (Tail == NULL) {
       
  4542                 iterator->_next = NULL ;
       
  4543                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
       
  4544                    break ;
       
  4545                 }
       
  4546             } else {
       
  4547                 while (Tail->_next != NULL) Tail = Tail->_next ;
       
  4548                 Tail->_next = iterator ;
       
  4549                 iterator->_prev = Tail ;
       
  4550                 iterator->_next = NULL ;
       
  4551                 break ;
       
  4552             }
       
  4553         }
       
  4554      } else {
       
  4555         ParkEvent * ev = iterator->_event ;
       
  4556         iterator->TState = ObjectWaiter::TS_RUN ;
       
  4557         OrderAccess::fence() ;
       
  4558         ev->unpark() ;
       
  4559      }
       
  4560 
       
  4561      if (Policy < 4) {
       
  4562        iterator->wait_reenter_begin(this);
       
  4563      }
       
  4564 
       
  4565      // _WaitSetLock protects the wait queue, not the EntryList.  We could
       
  4566      // move the add-to-EntryList operation, above, outside the critical section
       
  4567      // protected by _WaitSetLock.  In practice that's not useful.  With the
       
  4568      // exception of  wait() timeouts and interrupts the monitor owner
       
  4569      // is the only thread that grabs _WaitSetLock.  There's almost no contention
       
  4570      // on _WaitSetLock so it's not profitable to reduce the length of the
       
  4571      // critical section.
       
  4572   }
       
  4573 
       
  4574   Thread::SpinRelease (&_WaitSetLock) ;
       
  4575 
       
  4576   if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
       
  4577      ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
       
  4578   }
       
  4579 }
       
  4580 
       
  4581 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
       
  4582 // TODO-FIXME: remove check_slow() -- it's likely dead.
       
  4583 
       
  4584 void ObjectMonitor::check_slow(TRAPS) {
       
  4585   TEVENT (check_slow - throw IMSX) ;
       
  4586   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
       
  4587   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
       
  4588 }
       
  4589 
       
  4590 
       
  4591 // -------------------------------------------------------------------------
       
  4592 // The raw monitor subsystem is entirely distinct from normal
       
  4593 // java-synchronization or jni-synchronization.  raw monitors are not
       
  4594 // associated with objects.  They can be implemented in any manner
       
  4595 // that makes sense.  The original implementors decided to piggy-back
       
  4596 // the raw-monitor implementation on the existing Java objectMonitor mechanism.
       
  4597 // This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
       
  4598 // Specifically, we should not implement raw monitors via java monitors.
       
  4599 // Time permitting, we should disentangle and deconvolve the two implementations
       
  4600 // and move the resulting raw monitor implementation over to the JVMTI directories.
       
  4601 // Ideally, the raw monitor implementation would be built on top of
       
  4602 // park-unpark and nothing else.
       
  4603 //
       
  4604 // raw monitors are used mainly by JVMTI
       
  4605 // The raw monitor implementation borrows the ObjectMonitor structure,
       
  4606 // but the operators are degenerate and extremely simple.
       
  4607 //
       
  4608 // Mixed use of a single objectMonitor instance -- as both a raw monitor
       
  4609 // and a normal java monitor -- is not permissible.
       
  4610 //
       
  4611 // Note that we use the single RawMonitor_lock to protect queue operations for
       
  4612 // _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
       
  4613 // is deprecated and rare, this is not of concern.  The RawMonitor_lock can not
       
  4614 // be held indefinitely.  The critical sections must be short and bounded.
       
  4615 //
       
  4616 // -------------------------------------------------------------------------
       
  4617 
       
  4618 int ObjectMonitor::SimpleEnter (Thread * Self) {
       
  4619   for (;;) {
       
  4620     if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
       
  4621        return OS_OK ;
       
  4622     }
       
  4623 
       
  4624     ObjectWaiter Node (Self) ;
       
  4625     Self->_ParkEvent->reset() ;     // strictly optional
       
  4626     Node.TState = ObjectWaiter::TS_ENTER ;
       
  4627 
       
  4628     RawMonitor_lock->lock_without_safepoint_check() ;
       
  4629     Node._next  = _EntryList ;
       
  4630     _EntryList  = &Node ;
       
  4631     OrderAccess::fence() ;
       
  4632     if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
       
  4633         _EntryList = Node._next ;
       
  4634         RawMonitor_lock->unlock() ;
       
  4635         return OS_OK ;
       
  4636     }
       
  4637     RawMonitor_lock->unlock() ;
       
  4638     while (Node.TState == ObjectWaiter::TS_ENTER) {
       
  4639        Self->_ParkEvent->park() ;
       
  4640     }
       
  4641   }
       
  4642 }
       
  4643 
       
  4644 int ObjectMonitor::SimpleExit (Thread * Self) {
       
  4645   guarantee (_owner == Self, "invariant") ;
       
  4646   OrderAccess::release_store_ptr (&_owner, NULL) ;
       
  4647   OrderAccess::fence() ;
       
  4648   if (_EntryList == NULL) return OS_OK ;
       
  4649   ObjectWaiter * w ;
       
  4650 
       
  4651   RawMonitor_lock->lock_without_safepoint_check() ;
       
  4652   w = _EntryList ;
       
  4653   if (w != NULL) {
       
  4654       _EntryList = w->_next ;
       
  4655   }
       
  4656   RawMonitor_lock->unlock() ;
       
  4657   if (w != NULL) {
       
  4658       guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
       
  4659       ParkEvent * ev = w->_event ;
       
  4660       w->TState = ObjectWaiter::TS_RUN ;
       
  4661       OrderAccess::fence() ;
       
  4662       ev->unpark() ;
       
  4663   }
       
  4664   return OS_OK ;
       
  4665 }
       
  4666 
       
  4667 int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
       
  4668   guarantee (_owner == Self  , "invariant") ;
       
  4669   guarantee (_recursions == 0, "invariant") ;
       
  4670 
       
  4671   ObjectWaiter Node (Self) ;
       
  4672   Node._notified = 0 ;
       
  4673   Node.TState    = ObjectWaiter::TS_WAIT ;
       
  4674 
       
  4675   RawMonitor_lock->lock_without_safepoint_check() ;
       
  4676   Node._next     = _WaitSet ;
       
  4677   _WaitSet       = &Node ;
       
  4678   RawMonitor_lock->unlock() ;
       
  4679 
       
  4680   SimpleExit (Self) ;
       
  4681   guarantee (_owner != Self, "invariant") ;
       
  4682 
       
  4683   int ret = OS_OK ;
       
  4684   if (millis <= 0) {
       
  4685     Self->_ParkEvent->park();
       
  4686   } else {
       
  4687     ret = Self->_ParkEvent->park(millis);
       
  4688   }
       
  4689 
       
  4690   // If thread still resides on the waitset then unlink it.
       
  4691   // Double-checked locking -- the usage is safe in this context
       
  4692   // as we TState is volatile and the lock-unlock operators are
       
  4693   // serializing (barrier-equivalent).
       
  4694 
       
  4695   if (Node.TState == ObjectWaiter::TS_WAIT) {
       
  4696     RawMonitor_lock->lock_without_safepoint_check() ;
       
  4697     if (Node.TState == ObjectWaiter::TS_WAIT) {
       
  4698       // Simple O(n) unlink, but performance isn't critical here.
       
  4699       ObjectWaiter * p ;
       
  4700       ObjectWaiter * q = NULL ;
       
  4701       for (p = _WaitSet ; p != &Node; p = p->_next) {
       
  4702          q = p ;
       
  4703       }
       
  4704       guarantee (p == &Node, "invariant") ;
       
  4705       if (q == NULL) {
       
  4706         guarantee (p == _WaitSet, "invariant") ;
       
  4707         _WaitSet = p->_next ;
       
  4708       } else {
       
  4709         guarantee (p == q->_next, "invariant") ;
       
  4710         q->_next = p->_next ;
       
  4711       }
       
  4712       Node.TState = ObjectWaiter::TS_RUN ;
       
  4713     }
       
  4714     RawMonitor_lock->unlock() ;
       
  4715   }
       
  4716 
       
  4717   guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
       
  4718   SimpleEnter (Self) ;
       
  4719 
       
  4720   guarantee (_owner == Self, "invariant") ;
       
  4721   guarantee (_recursions == 0, "invariant") ;
       
  4722   return ret ;
       
  4723 }
       
  4724 
       
  4725 int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
       
  4726   guarantee (_owner == Self, "invariant") ;
       
  4727   if (_WaitSet == NULL) return OS_OK ;
       
  4728 
       
  4729   // We have two options:
       
  4730   // A. Transfer the threads from the WaitSet to the EntryList
       
  4731   // B. Remove the thread from the WaitSet and unpark() it.
       
  4732   //
       
  4733   // We use (B), which is crude and results in lots of futile
       
  4734   // context switching.  In particular (B) induces lots of contention.
       
  4735 
       
  4736   ParkEvent * ev = NULL ;       // consider using a small auto array ...
       
  4737   RawMonitor_lock->lock_without_safepoint_check() ;
       
  4738   for (;;) {
       
  4739       ObjectWaiter * w = _WaitSet ;
       
  4740       if (w == NULL) break ;
       
  4741       _WaitSet = w->_next ;
       
  4742       if (ev != NULL) { ev->unpark(); ev = NULL; }
       
  4743       ev = w->_event ;
       
  4744       OrderAccess::loadstore() ;
       
  4745       w->TState = ObjectWaiter::TS_RUN ;
       
  4746       OrderAccess::storeload();
       
  4747       if (!All) break ;
       
  4748   }
       
  4749   RawMonitor_lock->unlock() ;
       
  4750   if (ev != NULL) ev->unpark();
       
  4751   return OS_OK ;
       
  4752 }
       
  4753 
       
  4754 // Any JavaThread will enter here with state _thread_blocked
       
  4755 int ObjectMonitor::raw_enter(TRAPS) {
       
  4756   TEVENT (raw_enter) ;
       
  4757   void * Contended ;
       
  4758 
       
  4759   // don't enter raw monitor if thread is being externally suspended, it will
       
  4760   // surprise the suspender if a "suspended" thread can still enter monitor
       
  4761   JavaThread * jt = (JavaThread *)THREAD;
       
  4762   if (THREAD->is_Java_thread()) {
       
  4763     jt->SR_lock()->lock_without_safepoint_check();
       
  4764     while (jt->is_external_suspend()) {
       
  4765       jt->SR_lock()->unlock();
       
  4766       jt->java_suspend_self();
       
  4767       jt->SR_lock()->lock_without_safepoint_check();
       
  4768     }
       
  4769     // guarded by SR_lock to avoid racing with new external suspend requests.
       
  4770     Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
       
  4771     jt->SR_lock()->unlock();
       
  4772   } else {
       
  4773     Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
       
  4774   }
       
  4775 
       
  4776   if (Contended == THREAD) {
       
  4777      _recursions ++ ;
       
  4778      return OM_OK ;
       
  4779   }
       
  4780 
       
  4781   if (Contended == NULL) {
       
  4782      guarantee (_owner == THREAD, "invariant") ;
       
  4783      guarantee (_recursions == 0, "invariant") ;
       
  4784      return OM_OK ;
       
  4785   }
       
  4786 
       
  4787   THREAD->set_current_pending_monitor(this);
       
  4788 
       
  4789   if (!THREAD->is_Java_thread()) {
       
  4790      // No other non-Java threads besides VM thread would acquire
       
  4791      // a raw monitor.
       
  4792      assert(THREAD->is_VM_thread(), "must be VM thread");
       
  4793      SimpleEnter (THREAD) ;
       
  4794    } else {
       
  4795      guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
       
  4796      for (;;) {
       
  4797        jt->set_suspend_equivalent();
       
  4798        // cleared by handle_special_suspend_equivalent_condition() or
       
  4799        // java_suspend_self()
       
  4800        SimpleEnter (THREAD) ;
       
  4801 
       
  4802        // were we externally suspended while we were waiting?
       
  4803        if (!jt->handle_special_suspend_equivalent_condition()) break ;
       
  4804 
       
  4805        // This thread was externally suspended
       
  4806        //
       
  4807        // This logic isn't needed for JVMTI raw monitors,
       
  4808        // but doesn't hurt just in case the suspend rules change. This
       
  4809            // logic is needed for the ObjectMonitor.wait() reentry phase.
       
  4810            // We have reentered the contended monitor, but while we were
       
  4811            // waiting another thread suspended us. We don't want to reenter
       
  4812            // the monitor while suspended because that would surprise the
       
  4813            // thread that suspended us.
       
  4814            //
       
  4815            // Drop the lock -
       
  4816        SimpleExit (THREAD) ;
       
  4817 
       
  4818            jt->java_suspend_self();
       
  4819          }
       
  4820 
       
  4821      assert(_owner == THREAD, "Fatal error with monitor owner!");
       
  4822      assert(_recursions == 0, "Fatal error with monitor recursions!");
       
  4823   }
       
  4824 
       
  4825   THREAD->set_current_pending_monitor(NULL);
       
  4826   guarantee (_recursions == 0, "invariant") ;
       
  4827   return OM_OK;
       
  4828 }
       
  4829 
       
  4830 // Used mainly for JVMTI raw monitor implementation
       
  4831 // Also used for ObjectMonitor::wait().
       
  4832 int ObjectMonitor::raw_exit(TRAPS) {
       
  4833   TEVENT (raw_exit) ;
       
  4834   if (THREAD != _owner) {
       
  4835     return OM_ILLEGAL_MONITOR_STATE;
       
  4836   }
       
  4837   if (_recursions > 0) {
       
  4838     --_recursions ;
       
  4839     return OM_OK ;
       
  4840   }
       
  4841 
       
  4842   void * List = _EntryList ;
       
  4843   SimpleExit (THREAD) ;
       
  4844 
       
  4845   return OM_OK;
       
  4846 }
       
  4847 
       
  4848 // Used for JVMTI raw monitor implementation.
       
  4849 // All JavaThreads will enter here with state _thread_blocked
       
  4850 
       
  4851 int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
       
  4852   TEVENT (raw_wait) ;
       
  4853   if (THREAD != _owner) {
       
  4854     return OM_ILLEGAL_MONITOR_STATE;
       
  4855   }
       
  4856 
       
  4857   // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
       
  4858   // The caller must be able to tolerate spurious returns from raw_wait().
       
  4859   THREAD->_ParkEvent->reset() ;
       
  4860   OrderAccess::fence() ;
       
  4861 
       
  4862   // check interrupt event
       
  4863   if (interruptible && Thread::is_interrupted(THREAD, true)) {
       
  4864     return OM_INTERRUPTED;
       
  4865   }
       
  4866 
       
  4867   intptr_t save = _recursions ;
       
  4868   _recursions = 0 ;
       
  4869   _waiters ++ ;
       
  4870   if (THREAD->is_Java_thread()) {
       
  4871     guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
       
  4872     ((JavaThread *)THREAD)->set_suspend_equivalent();
       
  4873   }
       
  4874   int rv = SimpleWait (THREAD, millis) ;
       
  4875   _recursions = save ;
       
  4876   _waiters -- ;
       
  4877 
       
  4878   guarantee (THREAD == _owner, "invariant") ;
       
  4879   if (THREAD->is_Java_thread()) {
       
  4880      JavaThread * jSelf = (JavaThread *) THREAD ;
       
  4881      for (;;) {
       
  4882         if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
       
  4883         SimpleExit (THREAD) ;
       
  4884         jSelf->java_suspend_self();
       
  4885         SimpleEnter (THREAD) ;
       
  4886         jSelf->set_suspend_equivalent() ;
       
  4887      }
       
  4888   }
       
  4889   guarantee (THREAD == _owner, "invariant") ;
       
  4890 
       
  4891   if (interruptible && Thread::is_interrupted(THREAD, true)) {
       
  4892     return OM_INTERRUPTED;
       
  4893   }
       
  4894   return OM_OK ;
       
  4895 }
       
  4896 
       
  4897 int ObjectMonitor::raw_notify(TRAPS) {
       
  4898   TEVENT (raw_notify) ;
       
  4899   if (THREAD != _owner) {
       
  4900     return OM_ILLEGAL_MONITOR_STATE;
       
  4901   }
       
  4902   SimpleNotify (THREAD, false) ;
       
  4903   return OM_OK;
       
  4904 }
       
  4905 
       
  4906 int ObjectMonitor::raw_notifyAll(TRAPS) {
       
  4907   TEVENT (raw_notifyAll) ;
       
  4908   if (THREAD != _owner) {
       
  4909     return OM_ILLEGAL_MONITOR_STATE;
       
  4910   }
       
  4911   SimpleNotify (THREAD, true) ;
       
  4912   return OM_OK;
       
  4913 }
       
  4914 
       
  4915 #ifndef PRODUCT
       
  4916 void ObjectMonitor::verify() {
       
  4917 }
       
  4918 
       
  4919 void ObjectMonitor::print() {
       
  4920 }
       
  4921 #endif
       
  4922 
  1581 
  4923 //------------------------------------------------------------------------------
  1582 //------------------------------------------------------------------------------
  4924 // Non-product code
  1583 // Non-product code
  4925 
  1584 
  4926 #ifndef PRODUCT
  1585 #ifndef PRODUCT