src/hotspot/share/runtime/synchronizer.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54786 ebf733a324d4
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
    30 #include "memory/allocation.inline.hpp"
    30 #include "memory/allocation.inline.hpp"
    31 #include "memory/metaspaceShared.hpp"
    31 #include "memory/metaspaceShared.hpp"
    32 #include "memory/padded.hpp"
    32 #include "memory/padded.hpp"
    33 #include "memory/resourceArea.hpp"
    33 #include "memory/resourceArea.hpp"
    34 #include "memory/universe.hpp"
    34 #include "memory/universe.hpp"
    35 #include "oops/markOop.hpp"
    35 #include "oops/markWord.hpp"
    36 #include "oops/oop.inline.hpp"
    36 #include "oops/oop.inline.hpp"
    37 #include "runtime/atomic.hpp"
    37 #include "runtime/atomic.hpp"
    38 #include "runtime/biasedLocking.hpp"
    38 #include "runtime/biasedLocking.hpp"
    39 #include "runtime/handles.inline.hpp"
    39 #include "runtime/handles.inline.hpp"
    40 #include "runtime/interfaceSupport.inline.hpp"
    40 #include "runtime/interfaceSupport.inline.hpp"
   115 
   115 
   116 #define NINFLATIONLOCKS 256
   116 #define NINFLATIONLOCKS 256
   117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
   117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
   118 
   118 
   119 // global list of blocks of monitors
   119 // global list of blocks of monitors
   120 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
   120 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
   121 // global monitor free list
   121 // Global ObjectMonitor free list. Newly allocated and deflated
   122 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
   122 // ObjectMonitors are prepended here.
   123 // global monitor in-use list, for moribund threads,
   123 ObjectMonitor* volatile ObjectSynchronizer::g_free_list = NULL;
   124 // monitors they inflated need to be scanned for deflation
   124 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
   125 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
   125 // ObjectMonitors on its per-thread in-use list are prepended here.
   126 // count of entries in gOmInUseList
   126 ObjectMonitor* volatile ObjectSynchronizer::g_om_in_use_list = NULL;
   127 int ObjectSynchronizer::gOmInUseCount = 0;
   127 int ObjectSynchronizer::g_om_in_use_count = 0;  // # on g_om_in_use_list
   128 
   128 
   129 static volatile intptr_t gListLock = 0;      // protects global monitor lists
   129 static volatile intptr_t gListLock = 0;   // protects global monitor lists
   130 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
   130 static volatile int g_om_free_count = 0;  // # on g_free_list
   131 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
   131 static volatile int g_om_population = 0;  // # Extant -- in circulation
   132 
   132 
   133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
   133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
   134 
   134 
   135 
   135 
   136 // =====================> Quick functions
   136 // =====================> Quick functions
   153 //   synchronized (someobj) { .... ; notify(); }
   153 //   synchronized (someobj) { .... ; notify(); }
   154 // That is, we find a notify() or notifyAll() call that immediately precedes
   154 // That is, we find a notify() or notifyAll() call that immediately precedes
   155 // the monitorexit operation.  In that case the JIT could fuse the operations
   155 // the monitorexit operation.  In that case the JIT could fuse the operations
   156 // into a single notifyAndExit() runtime primitive.
   156 // into a single notifyAndExit() runtime primitive.
   157 
   157 
   158 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
   158 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) {
   159   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   159   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   160   assert(self->is_Java_thread(), "invariant");
   160   assert(self->is_Java_thread(), "invariant");
   161   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
   161   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
   162   NoSafepointVerifier nsv;
   162   NoSafepointVerifier nsv;
   163   if (obj == NULL) return false;  // slow-path for invalid obj
   163   if (obj == NULL) return false;  // slow-path for invalid obj
   164   const markOop mark = obj->mark();
   164   const markWord mark = obj->mark();
   165 
   165 
   166   if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
   166   if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) {
   167     // Degenerate notify
   167     // Degenerate notify
   168     // stack-locked by caller so by definition the implied waitset is empty.
   168     // stack-locked by caller so by definition the implied waitset is empty.
   169     return true;
   169     return true;
   170   }
   170   }
   171 
   171 
   172   if (mark->has_monitor()) {
   172   if (mark.has_monitor()) {
   173     ObjectMonitor * const mon = mark->monitor();
   173     ObjectMonitor* const mon = mark.monitor();
   174     assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
   174     assert(mon->object() == obj, "invariant");
   175     if (mon->owner() != self) return false;  // slow-path for IMS exception
   175     if (mon->owner() != self) return false;  // slow-path for IMS exception
   176 
   176 
   177     if (mon->first_waiter() != NULL) {
   177     if (mon->first_waiter() != NULL) {
   178       // We have one or more waiters. Since this is an inflated monitor
   178       // We have one or more waiters. Since this is an inflated monitor
   179       // that we own, we can transfer one or more threads from the waitset
   179       // that we own, we can transfer one or more threads from the waitset
   181       if (all) {
   181       if (all) {
   182         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
   182         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
   183       } else {
   183       } else {
   184         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
   184         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
   185       }
   185       }
   186       int tally = 0;
   186       int free_count = 0;
   187       do {
   187       do {
   188         mon->INotify(self);
   188         mon->INotify(self);
   189         ++tally;
   189         ++free_count;
   190       } while (mon->first_waiter() != NULL && all);
   190       } while (mon->first_waiter() != NULL && all);
   191       OM_PERFDATA_OP(Notifications, inc(tally));
   191       OM_PERFDATA_OP(Notifications, inc(free_count));
   192     }
   192     }
   193     return true;
   193     return true;
   194   }
   194   }
   195 
   195 
   196   // biased locking and any other IMS exception states take the slow-path
   196   // biased locking and any other IMS exception states take the slow-path
   202 // been too big if it were to have included support for the cases of inflated
   202 // been too big if it were to have included support for the cases of inflated
   203 // recursive enter and exit, so they go here instead.
   203 // recursive enter and exit, so they go here instead.
   204 // Note that we can't safely call AsyncPrintJavaStack() from within
   204 // Note that we can't safely call AsyncPrintJavaStack() from within
   205 // quick_enter() as our thread state remains _in_Java.
   205 // quick_enter() as our thread state remains _in_Java.
   206 
   206 
   207 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
   207 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
   208                                      BasicLock * lock) {
   208                                      BasicLock * lock) {
   209   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   209   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   210   assert(Self->is_Java_thread(), "invariant");
   210   assert(self->is_Java_thread(), "invariant");
   211   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
   211   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
   212   NoSafepointVerifier nsv;
   212   NoSafepointVerifier nsv;
   213   if (obj == NULL) return false;       // Need to throw NPE
   213   if (obj == NULL) return false;       // Need to throw NPE
   214   const markOop mark = obj->mark();
   214   const markWord mark = obj->mark();
   215 
   215 
   216   if (mark->has_monitor()) {
   216   if (mark.has_monitor()) {
   217     ObjectMonitor * const m = mark->monitor();
   217     ObjectMonitor* const m = mark.monitor();
   218     assert(oopDesc::equals((oop) m->object(), obj), "invariant");
   218     assert(m->object() == obj, "invariant");
   219     Thread * const owner = (Thread *) m->_owner;
   219     Thread* const owner = (Thread *) m->_owner;
   220 
   220 
   221     // Lock contention and Transactional Lock Elision (TLE) diagnostics
   221     // Lock contention and Transactional Lock Elision (TLE) diagnostics
   222     // and observability
   222     // and observability
   223     // Case: light contention possibly amenable to TLE
   223     // Case: light contention possibly amenable to TLE
   224     // Case: TLE inimical operations such as nested/recursive synchronization
   224     // Case: TLE inimical operations such as nested/recursive synchronization
   225 
   225 
   226     if (owner == Self) {
   226     if (owner == self) {
   227       m->_recursions++;
   227       m->_recursions++;
   228       return true;
   228       return true;
   229     }
   229     }
   230 
   230 
   231     // This Java Monitor is inflated so obj's header will never be
   231     // This Java Monitor is inflated so obj's header will never be
   236     // performance reasons, stack walkers generally first check for
   236     // performance reasons, stack walkers generally first check for
   237     // Biased Locking in the object's header, the second check is for
   237     // Biased Locking in the object's header, the second check is for
   238     // stack-locking in the object's header, the third check is for
   238     // stack-locking in the object's header, the third check is for
   239     // recursive stack-locking in the displaced header in the BasicLock,
   239     // recursive stack-locking in the displaced header in the BasicLock,
   240     // and last are the inflated Java Monitor (ObjectMonitor) checks.
   240     // and last are the inflated Java Monitor (ObjectMonitor) checks.
   241     lock->set_displaced_header(markOopDesc::unused_mark());
   241     lock->set_displaced_header(markWord::unused_mark());
   242 
   242 
   243     if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
   243     if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) {
   244       assert(m->_recursions == 0, "invariant");
   244       assert(m->_recursions == 0, "invariant");
   245       assert(m->_owner == Self, "invariant");
       
   246       return true;
   245       return true;
   247     }
   246     }
   248   }
   247   }
   249 
   248 
   250   // Note that we could inflate in quick_enter.
   249   // Note that we could inflate in quick_enter.
   256 
   255 
   257   return false;        // revert to slow-path
   256   return false;        // revert to slow-path
   258 }
   257 }
   259 
   258 
   260 // -----------------------------------------------------------------------------
   259 // -----------------------------------------------------------------------------
   261 //  Fast Monitor Enter/Exit
   260 // Monitor Enter/Exit
   262 // This the fast monitor enter. The interpreter and compiler use
   261 // The interpreter and compiler assembly code tries to lock using the fast path
   263 // some assembly copies of this code. Make sure update those code
   262 // of this algorithm. Make sure to update that code if the following function is
   264 // if the following function is changed. The implementation is
   263 // changed. The implementation is extremely sensitive to race condition. Be careful.
   265 // extremely sensitive to race condition. Be careful.
   264 
   266 
   265 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
   267 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
       
   268                                     bool attempt_rebias, TRAPS) {
       
   269   if (UseBiasedLocking) {
   266   if (UseBiasedLocking) {
   270     if (!SafepointSynchronize::is_at_safepoint()) {
   267     if (!SafepointSynchronize::is_at_safepoint()) {
   271       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
   268       BiasedLocking::revoke(obj, THREAD);
   272       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
       
   273         return;
       
   274       }
       
   275     } else {
   269     } else {
   276       assert(!attempt_rebias, "can not rebias toward VM thread");
       
   277       BiasedLocking::revoke_at_safepoint(obj);
   270       BiasedLocking::revoke_at_safepoint(obj);
   278     }
   271     }
   279     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   272   }
   280   }
   273 
   281 
   274   markWord mark = obj->mark();
   282   slow_enter(obj, lock, THREAD);
   275   assert(!mark.has_bias_pattern(), "should not see bias pattern here");
   283 }
   276 
   284 
   277   if (mark.is_neutral()) {
   285 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
   278     // Anticipate successful CAS -- the ST of the displaced mark must
   286   markOop mark = object->mark();
   279     // be visible <= the ST performed by the CAS.
       
   280     lock->set_displaced_header(mark);
       
   281     if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
       
   282       return;
       
   283     }
       
   284     // Fall through to inflate() ...
       
   285   } else if (mark.has_locker() &&
       
   286              THREAD->is_lock_owned((address)mark.locker())) {
       
   287     assert(lock != mark.locker(), "must not re-lock the same lock");
       
   288     assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
       
   289     lock->set_displaced_header(markWord::from_pointer(NULL));
       
   290     return;
       
   291   }
       
   292 
       
   293   // The object header will never be displaced to this lock,
       
   294   // so it does not matter what the value is, except that it
       
   295   // must be non-zero to avoid looking like a re-entrant lock,
       
   296   // and must not look locked either.
       
   297   lock->set_displaced_header(markWord::unused_mark());
       
   298   inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
       
   299 }
       
   300 
       
   301 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
       
   302   markWord mark = object->mark();
   287   // We cannot check for Biased Locking if we are racing an inflation.
   303   // We cannot check for Biased Locking if we are racing an inflation.
   288   assert(mark == markOopDesc::INFLATING() ||
   304   assert(mark == markWord::INFLATING() ||
   289          !mark->has_bias_pattern(), "should not see bias pattern here");
   305          !mark.has_bias_pattern(), "should not see bias pattern here");
   290 
   306 
   291   markOop dhw = lock->displaced_header();
   307   markWord dhw = lock->displaced_header();
   292   if (dhw == NULL) {
   308   if (dhw.value() == 0) {
   293     // If the displaced header is NULL, then this exit matches up with
   309     // If the displaced header is NULL, then this exit matches up with
   294     // a recursive enter. No real work to do here except for diagnostics.
   310     // a recursive enter. No real work to do here except for diagnostics.
   295 #ifndef PRODUCT
   311 #ifndef PRODUCT
   296     if (mark != markOopDesc::INFLATING()) {
   312     if (mark != markWord::INFLATING()) {
   297       // Only do diagnostics if we are not racing an inflation. Simply
   313       // Only do diagnostics if we are not racing an inflation. Simply
   298       // exiting a recursive enter of a Java Monitor that is being
   314       // exiting a recursive enter of a Java Monitor that is being
   299       // inflated is safe; see the has_monitor() comment below.
   315       // inflated is safe; see the has_monitor() comment below.
   300       assert(!mark->is_neutral(), "invariant");
   316       assert(!mark.is_neutral(), "invariant");
   301       assert(!mark->has_locker() ||
   317       assert(!mark.has_locker() ||
   302              THREAD->is_lock_owned((address)mark->locker()), "invariant");
   318              THREAD->is_lock_owned((address)mark.locker()), "invariant");
   303       if (mark->has_monitor()) {
   319       if (mark.has_monitor()) {
   304         // The BasicLock's displaced_header is marked as a recursive
   320         // The BasicLock's displaced_header is marked as a recursive
   305         // enter and we have an inflated Java Monitor (ObjectMonitor).
   321         // enter and we have an inflated Java Monitor (ObjectMonitor).
   306         // This is a special case where the Java Monitor was inflated
   322         // This is a special case where the Java Monitor was inflated
   307         // after this thread entered the stack-lock recursively. When a
   323         // after this thread entered the stack-lock recursively. When a
   308         // Java Monitor is inflated, we cannot safely walk the Java
   324         // Java Monitor is inflated, we cannot safely walk the Java
   309         // Monitor owner's stack and update the BasicLocks because a
   325         // Monitor owner's stack and update the BasicLocks because a
   310         // Java Monitor can be asynchronously inflated by a thread that
   326         // Java Monitor can be asynchronously inflated by a thread that
   311         // does not own the Java Monitor.
   327         // does not own the Java Monitor.
   312         ObjectMonitor * m = mark->monitor();
   328         ObjectMonitor* m = mark.monitor();
   313         assert(((oop)(m->object()))->mark() == mark, "invariant");
   329         assert(((oop)(m->object()))->mark() == mark, "invariant");
   314         assert(m->is_entered(THREAD), "invariant");
   330         assert(m->is_entered(THREAD), "invariant");
   315       }
   331       }
   316     }
   332     }
   317 #endif
   333 #endif
   318     return;
   334     return;
   319   }
   335   }
   320 
   336 
   321   if (mark == (markOop) lock) {
   337   if (mark == markWord::from_pointer(lock)) {
   322     // If the object is stack-locked by the current thread, try to
   338     // If the object is stack-locked by the current thread, try to
   323     // swing the displaced header from the BasicLock back to the mark.
   339     // swing the displaced header from the BasicLock back to the mark.
   324     assert(dhw->is_neutral(), "invariant");
   340     assert(dhw.is_neutral(), "invariant");
   325     if (object->cas_set_mark(dhw, mark) == mark) {
   341     if (object->cas_set_mark(dhw, mark) == mark) {
   326       return;
   342       return;
   327     }
   343     }
   328   }
   344   }
   329 
   345 
   330   // We have to take the slow-path of possible inflation and then exit.
   346   // We have to take the slow-path of possible inflation and then exit.
   331   inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
   347   inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
   332 }
       
   333 
       
   334 // -----------------------------------------------------------------------------
       
   335 // Interpreter/Compiler Slow Case
       
   336 // This routine is used to handle interpreter/compiler slow case
       
   337 // We don't need to use fast path here, because it must have been
       
   338 // failed in the interpreter/compiler code.
       
   339 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
       
   340   markOop mark = obj->mark();
       
   341   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
       
   342 
       
   343   if (mark->is_neutral()) {
       
   344     // Anticipate successful CAS -- the ST of the displaced mark must
       
   345     // be visible <= the ST performed by the CAS.
       
   346     lock->set_displaced_header(mark);
       
   347     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
       
   348       return;
       
   349     }
       
   350     // Fall through to inflate() ...
       
   351   } else if (mark->has_locker() &&
       
   352              THREAD->is_lock_owned((address)mark->locker())) {
       
   353     assert(lock != mark->locker(), "must not re-lock the same lock");
       
   354     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
       
   355     lock->set_displaced_header(NULL);
       
   356     return;
       
   357   }
       
   358 
       
   359   // The object header will never be displaced to this lock,
       
   360   // so it does not matter what the value is, except that it
       
   361   // must be non-zero to avoid looking like a re-entrant lock,
       
   362   // and must not look locked either.
       
   363   lock->set_displaced_header(markOopDesc::unused_mark());
       
   364   inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
       
   365 }
       
   366 
       
   367 // This routine is used to handle interpreter/compiler slow case
       
   368 // We don't need to use fast path here, because it must have
       
   369 // failed in the interpreter/compiler code. Simply use the heavy
       
   370 // weight monitor should be ok, unless someone find otherwise.
       
   371 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
       
   372   fast_exit(object, lock, THREAD);
       
   373 }
   348 }
   374 
   349 
   375 // -----------------------------------------------------------------------------
   350 // -----------------------------------------------------------------------------
   376 // Class Loader  support to workaround deadlocks on the class loader lock objects
   351 // Class Loader  support to workaround deadlocks on the class loader lock objects
   377 // Also used by GC
   352 // Also used by GC
   384 //  4) reenter lock1 with original recursion count
   359 //  4) reenter lock1 with original recursion count
   385 //  5) lock lock2
   360 //  5) lock lock2
   386 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
   361 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
   387 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
   362 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
   388   if (UseBiasedLocking) {
   363   if (UseBiasedLocking) {
   389     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   364     BiasedLocking::revoke(obj, THREAD);
   390     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   365     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   391   }
   366   }
   392 
   367 
   393   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
   368   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
   394 
   369 
   395   return monitor->complete_exit(THREAD);
   370   return monitor->complete_exit(THREAD);
   396 }
   371 }
   397 
   372 
   398 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
   373 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
   399 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
   374 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
   400   if (UseBiasedLocking) {
   375   if (UseBiasedLocking) {
   401     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   376     BiasedLocking::revoke(obj, THREAD);
   402     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   377     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   403   }
   378   }
   404 
   379 
   405   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
   380   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
   406 
   381 
   407   monitor->reenter(recursion, THREAD);
   382   monitor->reenter(recursion, THREAD);
   410 // JNI locks on java objects
   385 // JNI locks on java objects
   411 // NOTE: must use heavy weight monitor to handle jni monitor enter
   386 // NOTE: must use heavy weight monitor to handle jni monitor enter
   412 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
   387 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
   413   // the current locking is from JNI instead of Java code
   388   // the current locking is from JNI instead of Java code
   414   if (UseBiasedLocking) {
   389   if (UseBiasedLocking) {
   415     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   390     BiasedLocking::revoke(obj, THREAD);
   416     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   391     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   417   }
   392   }
   418   THREAD->set_current_pending_monitor_is_from_java(false);
   393   THREAD->set_current_pending_monitor_is_from_java(false);
   419   inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
   394   inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
   420   THREAD->set_current_pending_monitor_is_from_java(true);
   395   THREAD->set_current_pending_monitor_is_from_java(true);
   421 }
   396 }
   422 
   397 
   423 // NOTE: must use heavy weight monitor to handle jni monitor exit
   398 // NOTE: must use heavy weight monitor to handle jni monitor exit
   424 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
   399 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
   425   if (UseBiasedLocking) {
   400   if (UseBiasedLocking) {
   426     Handle h_obj(THREAD, obj);
   401     Handle h_obj(THREAD, obj);
   427     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
   402     BiasedLocking::revoke(h_obj, THREAD);
   428     obj = h_obj();
   403     obj = h_obj();
   429   }
   404   }
   430   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   405   assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   431 
   406 
   432   ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
   407   ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
   433   // If this thread has locked the object, exit the monitor.  Note:  can't use
   408   // If this thread has locked the object, exit the monitor. We
   434   // monitor->check(CHECK); must exit even if an exception is pending.
   409   // intentionally do not use CHECK here because we must exit the
   435   if (monitor->check(THREAD)) {
   410   // monitor even if an exception is pending.
       
   411   if (monitor->check_owner(THREAD)) {
   436     monitor->exit(true, THREAD);
   412     monitor->exit(true, THREAD);
   437   }
   413   }
   438 }
   414 }
   439 
   415 
   440 // -----------------------------------------------------------------------------
   416 // -----------------------------------------------------------------------------
   441 // Internal VM locks on java objects
   417 // Internal VM locks on java objects
   442 // standard constructor, allows locking failures
   418 // standard constructor, allows locking failures
   443 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
   419 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
   444   _dolock = doLock;
   420   _dolock = do_lock;
   445   _thread = thread;
   421   _thread = thread;
   446   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
   422   _thread->check_for_valid_safepoint_state();
   447   _obj = obj;
   423   _obj = obj;
   448 
   424 
   449   if (_dolock) {
   425   if (_dolock) {
   450     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
   426     ObjectSynchronizer::enter(_obj, &_lock, _thread);
   451   }
   427   }
   452 }
   428 }
   453 
   429 
   454 ObjectLocker::~ObjectLocker() {
   430 ObjectLocker::~ObjectLocker() {
   455   if (_dolock) {
   431   if (_dolock) {
   456     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
   432     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
   457   }
   433   }
   458 }
   434 }
   459 
   435 
   460 
   436 
   461 // -----------------------------------------------------------------------------
   437 // -----------------------------------------------------------------------------
   462 //  Wait/Notify/NotifyAll
   438 //  Wait/Notify/NotifyAll
   463 // NOTE: must use heavy weight monitor to handle wait()
   439 // NOTE: must use heavy weight monitor to handle wait()
   464 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
   440 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
   465   if (UseBiasedLocking) {
   441   if (UseBiasedLocking) {
   466     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   442     BiasedLocking::revoke(obj, THREAD);
   467     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   443     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   468   }
   444   }
   469   if (millis < 0) {
   445   if (millis < 0) {
   470     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   446     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   471   }
   447   }
   472   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
   448   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
   479   // and change this function back into a "void" func.
   455   // and change this function back into a "void" func.
   480   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
   456   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
   481   return dtrace_waited_probe(monitor, obj, THREAD);
   457   return dtrace_waited_probe(monitor, obj, THREAD);
   482 }
   458 }
   483 
   459 
   484 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
   460 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
   485   if (UseBiasedLocking) {
   461   if (UseBiasedLocking) {
   486     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   462     BiasedLocking::revoke(obj, THREAD);
   487     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   463     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   488   }
   464   }
   489   if (millis < 0) {
   465   if (millis < 0) {
   490     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   466     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   491   }
   467   }
   492   inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
   468   inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
   493 }
   469 }
   494 
   470 
   495 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
   471 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
   496   if (UseBiasedLocking) {
   472   if (UseBiasedLocking) {
   497     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   473     BiasedLocking::revoke(obj, THREAD);
   498     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   474     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   499   }
   475   }
   500 
   476 
   501   markOop mark = obj->mark();
   477   markWord mark = obj->mark();
   502   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
   478   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
   503     return;
   479     return;
   504   }
   480   }
   505   inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
   481   inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
   506 }
   482 }
   507 
   483 
   508 // NOTE: see comment of notify()
   484 // NOTE: see comment of notify()
   509 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
   485 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
   510   if (UseBiasedLocking) {
   486   if (UseBiasedLocking) {
   511     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   487     BiasedLocking::revoke(obj, THREAD);
   512     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   488     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   513   }
   489   }
   514 
   490 
   515   markOop mark = obj->mark();
   491   markWord mark = obj->mark();
   516   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
   492   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
   517     return;
   493     return;
   518   }
   494   }
   519   inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
   495   inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
   520 }
   496 }
   521 
   497 
   542 
   518 
   543 struct SharedGlobals {
   519 struct SharedGlobals {
   544   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
   520   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
   545   // These are highly shared mostly-read variables.
   521   // These are highly shared mostly-read variables.
   546   // To avoid false-sharing they need to be the sole occupants of a cache line.
   522   // To avoid false-sharing they need to be the sole occupants of a cache line.
   547   volatile int stwRandom;
   523   volatile int stw_random;
   548   volatile int stwCycle;
   524   volatile int stw_cycle;
   549   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
   525   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
   550   // Hot RW variable -- Sequester to avoid false-sharing
   526   // Hot RW variable -- Sequester to avoid false-sharing
   551   volatile int hcSequence;
   527   volatile int hc_sequence;
   552   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
   528   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
   553 };
   529 };
   554 
   530 
   555 static SharedGlobals GVars;
   531 static SharedGlobals GVars;
   556 static int MonitorScavengeThreshold = 1000000;
   532 static int MonitorScavengeThreshold = 1000000;
   557 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
   533 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
   558 
   534 
   559 static markOop ReadStableMark(oop obj) {
   535 static markWord read_stable_mark(oop obj) {
   560   markOop mark = obj->mark();
   536   markWord mark = obj->mark();
   561   if (!mark->is_being_inflated()) {
   537   if (!mark.is_being_inflated()) {
   562     return mark;       // normal fast-path return
   538     return mark;       // normal fast-path return
   563   }
   539   }
   564 
   540 
   565   int its = 0;
   541   int its = 0;
   566   for (;;) {
   542   for (;;) {
   567     markOop mark = obj->mark();
   543     markWord mark = obj->mark();
   568     if (!mark->is_being_inflated()) {
   544     if (!mark.is_being_inflated()) {
   569       return mark;    // normal fast-path return
   545       return mark;    // normal fast-path return
   570     }
   546     }
   571 
   547 
   572     // The object is being inflated by some other thread.
   548     // The object is being inflated by some other thread.
   573     // The caller of ReadStableMark() must wait for inflation to complete.
   549     // The caller of read_stable_mark() must wait for inflation to complete.
   574     // Avoid live-lock
   550     // Avoid live-lock
   575     // TODO: consider calling SafepointSynchronize::do_call_back() while
   551     // TODO: consider calling SafepointSynchronize::do_call_back() while
   576     // spinning to see if there's a safepoint pending.  If so, immediately
   552     // spinning to see if there's a safepoint pending.  If so, immediately
   577     // yielding or blocking would be appropriate.  Avoid spinning while
   553     // yielding or blocking would be appropriate.  Avoid spinning while
   578     // there is a safepoint pending.
   554     // there is a safepoint pending.
   602         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
   578         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
   603         int YieldThenBlock = 0;
   579         int YieldThenBlock = 0;
   604         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
   580         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
   605         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
   581         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
   606         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
   582         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
   607         while (obj->mark() == markOopDesc::INFLATING()) {
   583         while (obj->mark() == markWord::INFLATING()) {
   608           // Beware: NakedYield() is advisory and has almost no effect on some platforms
   584           // Beware: NakedYield() is advisory and has almost no effect on some platforms
   609           // so we periodically call Self->_ParkEvent->park(1).
   585           // so we periodically call self->_ParkEvent->park(1).
   610           // We use a mixed spin/yield/block mechanism.
   586           // We use a mixed spin/yield/block mechanism.
   611           if ((YieldThenBlock++) >= 16) {
   587           if ((YieldThenBlock++) >= 16) {
   612             Thread::current()->_ParkEvent->park(1);
   588             Thread::current()->_ParkEvent->park(1);
   613           } else {
   589           } else {
   614             os::naked_yield();
   590             os::naked_yield();
   623 }
   599 }
   624 
   600 
   625 // hashCode() generation :
   601 // hashCode() generation :
   626 //
   602 //
   627 // Possibilities:
   603 // Possibilities:
   628 // * MD5Digest of {obj,stwRandom}
   604 // * MD5Digest of {obj,stw_random}
   629 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
   605 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
   630 // * A DES- or AES-style SBox[] mechanism
   606 // * A DES- or AES-style SBox[] mechanism
   631 // * One of the Phi-based schemes, such as:
   607 // * One of the Phi-based schemes, such as:
   632 //   2654435761 = 2^32 * Phi (golden ratio)
   608 //   2654435761 = 2^32 * Phi (golden ratio)
   633 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
   609 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
   634 // * A variation of Marsaglia's shift-xor RNG scheme.
   610 // * A variation of Marsaglia's shift-xor RNG scheme.
   635 // * (obj ^ stwRandom) is appealing, but can result
   611 // * (obj ^ stw_random) is appealing, but can result
   636 //   in undesirable regularity in the hashCode values of adjacent objects
   612 //   in undesirable regularity in the hashCode values of adjacent objects
   637 //   (objects allocated back-to-back, in particular).  This could potentially
   613 //   (objects allocated back-to-back, in particular).  This could potentially
   638 //   result in hashtable collisions and reduced hashtable efficiency.
   614 //   result in hashtable collisions and reduced hashtable efficiency.
   639 //   There are simple ways to "diffuse" the middle address bits over the
   615 //   There are simple ways to "diffuse" the middle address bits over the
   640 //   generated hashCode values:
   616 //   generated hashCode values:
   641 
   617 
   642 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
   618 static inline intptr_t get_next_hash(Thread* self, oop obj) {
   643   intptr_t value = 0;
   619   intptr_t value = 0;
   644   if (hashCode == 0) {
   620   if (hashCode == 0) {
   645     // This form uses global Park-Miller RNG.
   621     // This form uses global Park-Miller RNG.
   646     // On MP system we'll have lots of RW access to a global, so the
   622     // On MP system we'll have lots of RW access to a global, so the
   647     // mechanism induces lots of coherency traffic.
   623     // mechanism induces lots of coherency traffic.
   648     value = os::random();
   624     value = os::random();
   649   } else if (hashCode == 1) {
   625   } else if (hashCode == 1) {
   650     // This variation has the property of being stable (idempotent)
   626     // This variation has the property of being stable (idempotent)
   651     // between STW operations.  This can be useful in some of the 1-0
   627     // between STW operations.  This can be useful in some of the 1-0
   652     // synchronization schemes.
   628     // synchronization schemes.
   653     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
   629     intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
   654     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
   630     value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
   655   } else if (hashCode == 2) {
   631   } else if (hashCode == 2) {
   656     value = 1;            // for sensitivity testing
   632     value = 1;            // for sensitivity testing
   657   } else if (hashCode == 3) {
   633   } else if (hashCode == 3) {
   658     value = ++GVars.hcSequence;
   634     value = ++GVars.hc_sequence;
   659   } else if (hashCode == 4) {
   635   } else if (hashCode == 4) {
   660     value = cast_from_oop<intptr_t>(obj);
   636     value = cast_from_oop<intptr_t>(obj);
   661   } else {
   637   } else {
   662     // Marsaglia's xor-shift scheme with thread-specific state
   638     // Marsaglia's xor-shift scheme with thread-specific state
   663     // This is probably the best overall implementation -- we'll
   639     // This is probably the best overall implementation -- we'll
   664     // likely make this the default in future releases.
   640     // likely make this the default in future releases.
   665     unsigned t = Self->_hashStateX;
   641     unsigned t = self->_hashStateX;
   666     t ^= (t << 11);
   642     t ^= (t << 11);
   667     Self->_hashStateX = Self->_hashStateY;
   643     self->_hashStateX = self->_hashStateY;
   668     Self->_hashStateY = Self->_hashStateZ;
   644     self->_hashStateY = self->_hashStateZ;
   669     Self->_hashStateZ = Self->_hashStateW;
   645     self->_hashStateZ = self->_hashStateW;
   670     unsigned v = Self->_hashStateW;
   646     unsigned v = self->_hashStateW;
   671     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
   647     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
   672     Self->_hashStateW = v;
   648     self->_hashStateW = v;
   673     value = v;
   649     value = v;
   674   }
   650   }
   675 
   651 
   676   value &= markOopDesc::hash_mask;
   652   value &= markWord::hash_mask;
   677   if (value == 0) value = 0xBAD;
   653   if (value == 0) value = 0xBAD;
   678   assert(value != markOopDesc::no_hash, "invariant");
   654   assert(value != markWord::no_hash, "invariant");
   679   return value;
   655   return value;
   680 }
   656 }
   681 
   657 
   682 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
   658 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
   683   if (UseBiasedLocking) {
   659   if (UseBiasedLocking) {
   684     // NOTE: many places throughout the JVM do not expect a safepoint
   660     // NOTE: many places throughout the JVM do not expect a safepoint
   685     // to be taken here, in particular most operations on perm gen
   661     // to be taken here, in particular most operations on perm gen
   686     // objects. However, we only ever bias Java instances and all of
   662     // objects. However, we only ever bias Java instances and all of
   687     // the call sites of identity_hash that might revoke biases have
   663     // the call sites of identity_hash that might revoke biases have
   688     // been checked to make sure they can handle a safepoint. The
   664     // been checked to make sure they can handle a safepoint. The
   689     // added check of the bias pattern is to avoid useless calls to
   665     // added check of the bias pattern is to avoid useless calls to
   690     // thread-local storage.
   666     // thread-local storage.
   691     if (obj->mark()->has_bias_pattern()) {
   667     if (obj->mark().has_bias_pattern()) {
   692       // Handle for oop obj in case of STW safepoint
   668       // Handle for oop obj in case of STW safepoint
   693       Handle hobj(Self, obj);
   669       Handle hobj(self, obj);
   694       // Relaxing assertion for bug 6320749.
   670       // Relaxing assertion for bug 6320749.
   695       assert(Universe::verify_in_progress() ||
   671       assert(Universe::verify_in_progress() ||
   696              !SafepointSynchronize::is_at_safepoint(),
   672              !SafepointSynchronize::is_at_safepoint(),
   697              "biases should not be seen by VM thread here");
   673              "biases should not be seen by VM thread here");
   698       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
   674       BiasedLocking::revoke(hobj, JavaThread::current());
   699       obj = hobj();
   675       obj = hobj();
   700       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   676       assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   701     }
   677     }
   702   }
   678   }
   703 
   679 
   704   // hashCode() is a heap mutator ...
   680   // hashCode() is a heap mutator ...
   705   // Relaxing assertion for bug 6320749.
   681   // Relaxing assertion for bug 6320749.
   706   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   682   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   707          !SafepointSynchronize::is_at_safepoint(), "invariant");
   683          !SafepointSynchronize::is_at_safepoint(), "invariant");
   708   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   684   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   709          Self->is_Java_thread() , "invariant");
   685          self->is_Java_thread() , "invariant");
   710   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   686   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   711          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
   687          ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
   712 
   688 
   713   ObjectMonitor* monitor = NULL;
   689   ObjectMonitor* monitor = NULL;
   714   markOop temp, test;
   690   markWord temp, test;
   715   intptr_t hash;
   691   intptr_t hash;
   716   markOop mark = ReadStableMark(obj);
   692   markWord mark = read_stable_mark(obj);
   717 
   693 
   718   // object should remain ineligible for biased locking
   694   // object should remain ineligible for biased locking
   719   assert(!mark->has_bias_pattern(), "invariant");
   695   assert(!mark.has_bias_pattern(), "invariant");
   720 
   696 
   721   if (mark->is_neutral()) {
   697   if (mark.is_neutral()) {
   722     hash = mark->hash();              // this is a normal header
   698     hash = mark.hash();               // this is a normal header
   723     if (hash != 0) {                  // if it has hash, just return it
   699     if (hash != 0) {                  // if it has hash, just return it
   724       return hash;
   700       return hash;
   725     }
   701     }
   726     hash = get_next_hash(Self, obj);  // allocate a new hash code
   702     hash = get_next_hash(self, obj);  // allocate a new hash code
   727     temp = mark->copy_set_hash(hash); // merge the hash code into header
   703     temp = mark.copy_set_hash(hash);  // merge the hash code into header
   728     // use (machine word version) atomic operation to install the hash
   704     // use (machine word version) atomic operation to install the hash
   729     test = obj->cas_set_mark(temp, mark);
   705     test = obj->cas_set_mark(temp, mark);
   730     if (test == mark) {
   706     if (test == mark) {
   731       return hash;
   707       return hash;
   732     }
   708     }
   733     // If atomic operation failed, we must inflate the header
   709     // If atomic operation failed, we must inflate the header
   734     // into heavy weight monitor. We could add more code here
   710     // into heavy weight monitor. We could add more code here
   735     // for fast path, but it does not worth the complexity.
   711     // for fast path, but it does not worth the complexity.
   736   } else if (mark->has_monitor()) {
   712   } else if (mark.has_monitor()) {
   737     monitor = mark->monitor();
   713     monitor = mark.monitor();
   738     temp = monitor->header();
   714     temp = monitor->header();
   739     assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
   715     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
   740     hash = temp->hash();
   716     hash = temp.hash();
   741     if (hash != 0) {
   717     if (hash != 0) {
   742       return hash;
   718       return hash;
   743     }
   719     }
   744     // Skip to the following code to reduce code size
   720     // Skip to the following code to reduce code size
   745   } else if (Self->is_lock_owned((address)mark->locker())) {
   721   } else if (self->is_lock_owned((address)mark.locker())) {
   746     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
   722     temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned
   747     assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
   723     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
   748     hash = temp->hash();              // by current thread, check if the displaced
   724     hash = temp.hash();                  // by current thread, check if the displaced
   749     if (hash != 0) {                  // header contains hash code
   725     if (hash != 0) {                     // header contains hash code
   750       return hash;
   726       return hash;
   751     }
   727     }
   752     // WARNING:
   728     // WARNING:
   753     // The displaced header in the BasicLock on a thread's stack
   729     // The displaced header in the BasicLock on a thread's stack
   754     // is strictly immutable. It CANNOT be changed in ANY cases.
   730     // is strictly immutable. It CANNOT be changed in ANY cases.
   758     // during an inflate() call so any change to that stack memory
   734     // during an inflate() call so any change to that stack memory
   759     // may not propagate to other threads correctly.
   735     // may not propagate to other threads correctly.
   760   }
   736   }
   761 
   737 
   762   // Inflate the monitor to set hash code
   738   // Inflate the monitor to set hash code
   763   monitor = inflate(Self, obj, inflate_cause_hash_code);
   739   monitor = inflate(self, obj, inflate_cause_hash_code);
   764   // Load displaced header and check it has hash code
   740   // Load displaced header and check it has hash code
   765   mark = monitor->header();
   741   mark = monitor->header();
   766   assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
   742   assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
   767   hash = mark->hash();
   743   hash = mark.hash();
   768   if (hash == 0) {
   744   if (hash == 0) {
   769     hash = get_next_hash(Self, obj);
   745     hash = get_next_hash(self, obj);
   770     temp = mark->copy_set_hash(hash); // merge hash code into header
   746     temp = mark.copy_set_hash(hash); // merge hash code into header
   771     assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
   747     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
   772     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
   748     uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
       
   749     test = markWord(v);
   773     if (test != mark) {
   750     if (test != mark) {
   774       // The only update to the ObjectMonitor's header/dmw field
   751       // The only non-deflation update to the ObjectMonitor's
   775       // is to merge in the hash code. If someone adds a new usage
   752       // header/dmw field is to merge in the hash code. If someone
   776       // of the header/dmw field, please update this code.
   753       // adds a new usage of the header/dmw field, please update
   777       hash = test->hash();
   754       // this code.
   778       assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test));
   755       hash = test.hash();
       
   756       assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
   779       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
   757       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
   780     }
   758     }
   781   }
   759   }
   782   // We finally get the hash
   760   // We finally get the hash
   783   return hash;
   761   return hash;
   791 
   769 
   792 
   770 
   793 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
   771 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
   794                                                    Handle h_obj) {
   772                                                    Handle h_obj) {
   795   if (UseBiasedLocking) {
   773   if (UseBiasedLocking) {
   796     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
   774     BiasedLocking::revoke(h_obj, thread);
   797     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   775     assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
   798   }
   776   }
   799 
   777 
   800   assert(thread == JavaThread::current(), "Can only be called on current thread");
   778   assert(thread == JavaThread::current(), "Can only be called on current thread");
   801   oop obj = h_obj();
   779   oop obj = h_obj();
   802 
   780 
   803   markOop mark = ReadStableMark(obj);
   781   markWord mark = read_stable_mark(obj);
   804 
   782 
   805   // Uncontended case, header points to stack
   783   // Uncontended case, header points to stack
   806   if (mark->has_locker()) {
   784   if (mark.has_locker()) {
   807     return thread->is_lock_owned((address)mark->locker());
   785     return thread->is_lock_owned((address)mark.locker());
   808   }
   786   }
   809   // Contended case, header points to ObjectMonitor (tagged pointer)
   787   // Contended case, header points to ObjectMonitor (tagged pointer)
   810   if (mark->has_monitor()) {
   788   if (mark.has_monitor()) {
   811     ObjectMonitor* monitor = mark->monitor();
   789     ObjectMonitor* monitor = mark.monitor();
   812     return monitor->is_entered(thread) != 0;
   790     return monitor->is_entered(thread) != 0;
   813   }
   791   }
   814   // Unlocked case, header in place
   792   // Unlocked case, header in place
   815   assert(mark->is_neutral(), "sanity check");
   793   assert(mark.is_neutral(), "sanity check");
   816   return false;
   794   return false;
   817 }
   795 }
   818 
   796 
   819 // Be aware of this method could revoke bias of the lock object.
   797 // Be aware of this method could revoke bias of the lock object.
   820 // This method queries the ownership of the lock handle specified by 'h_obj'.
   798 // This method queries the ownership of the lock handle specified by 'h_obj'.
   828   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   806   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   829   assert(self->thread_state() != _thread_blocked, "invariant");
   807   assert(self->thread_state() != _thread_blocked, "invariant");
   830 
   808 
   831   // Possible mark states: neutral, biased, stack-locked, inflated
   809   // Possible mark states: neutral, biased, stack-locked, inflated
   832 
   810 
   833   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
   811   if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
   834     // CASE: biased
   812     // CASE: biased
   835     BiasedLocking::revoke_and_rebias(h_obj, false, self);
   813     BiasedLocking::revoke(h_obj, self);
   836     assert(!h_obj->mark()->has_bias_pattern(),
   814     assert(!h_obj->mark().has_bias_pattern(),
   837            "biases should be revoked by now");
   815            "biases should be revoked by now");
   838   }
   816   }
   839 
   817 
   840   assert(self == JavaThread::current(), "Can only be called on current thread");
   818   assert(self == JavaThread::current(), "Can only be called on current thread");
   841   oop obj = h_obj();
   819   oop obj = h_obj();
   842   markOop mark = ReadStableMark(obj);
   820   markWord mark = read_stable_mark(obj);
   843 
   821 
   844   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
   822   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
   845   if (mark->has_locker()) {
   823   if (mark.has_locker()) {
   846     return self->is_lock_owned((address)mark->locker()) ?
   824     return self->is_lock_owned((address)mark.locker()) ?
   847       owner_self : owner_other;
   825       owner_self : owner_other;
   848   }
   826   }
   849 
   827 
   850   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
   828   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
   851   // The Object:ObjectMonitor relationship is stable as long as we're
   829   // The Object:ObjectMonitor relationship is stable as long as we're
   852   // not at a safepoint.
   830   // not at a safepoint.
   853   if (mark->has_monitor()) {
   831   if (mark.has_monitor()) {
   854     void * owner = mark->monitor()->_owner;
   832     void* owner = mark.monitor()->_owner;
   855     if (owner == NULL) return owner_none;
   833     if (owner == NULL) return owner_none;
   856     return (owner == self ||
   834     return (owner == self ||
   857             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
   835             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
   858   }
   836   }
   859 
   837 
   860   // CASE: neutral
   838   // CASE: neutral
   861   assert(mark->is_neutral(), "sanity check");
   839   assert(mark.is_neutral(), "sanity check");
   862   return owner_none;           // it's unlocked
   840   return owner_none;           // it's unlocked
   863 }
   841 }
   864 
   842 
   865 // FIXME: jvmti should call this
   843 // FIXME: jvmti should call this
   866 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
   844 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
   867   if (UseBiasedLocking) {
   845   if (UseBiasedLocking) {
   868     if (SafepointSynchronize::is_at_safepoint()) {
   846     if (SafepointSynchronize::is_at_safepoint()) {
   869       BiasedLocking::revoke_at_safepoint(h_obj);
   847       BiasedLocking::revoke_at_safepoint(h_obj);
   870     } else {
   848     } else {
   871       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
   849       BiasedLocking::revoke(h_obj, JavaThread::current());
   872     }
   850     }
   873     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   851     assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
   874   }
   852   }
   875 
   853 
   876   oop obj = h_obj();
   854   oop obj = h_obj();
   877   address owner = NULL;
   855   address owner = NULL;
   878 
   856 
   879   markOop mark = ReadStableMark(obj);
   857   markWord mark = read_stable_mark(obj);
   880 
   858 
   881   // Uncontended case, header points to stack
   859   // Uncontended case, header points to stack
   882   if (mark->has_locker()) {
   860   if (mark.has_locker()) {
   883     owner = (address) mark->locker();
   861     owner = (address) mark.locker();
   884   }
   862   }
   885 
   863 
   886   // Contended case, header points to ObjectMonitor (tagged pointer)
   864   // Contended case, header points to ObjectMonitor (tagged pointer)
   887   else if (mark->has_monitor()) {
   865   else if (mark.has_monitor()) {
   888     ObjectMonitor* monitor = mark->monitor();
   866     ObjectMonitor* monitor = mark.monitor();
   889     assert(monitor != NULL, "monitor should be non-null");
   867     assert(monitor != NULL, "monitor should be non-null");
   890     owner = (address) monitor->owner();
   868     owner = (address) monitor->owner();
   891   }
   869   }
   892 
   870 
   893   if (owner != NULL) {
   871   if (owner != NULL) {
   896   }
   874   }
   897 
   875 
   898   // Unlocked case, header in place
   876   // Unlocked case, header in place
   899   // Cannot have assertion since this object may have been
   877   // Cannot have assertion since this object may have been
   900   // locked by another thread when reaching here.
   878   // locked by another thread when reaching here.
   901   // assert(mark->is_neutral(), "sanity check");
   879   // assert(mark.is_neutral(), "sanity check");
   902 
   880 
   903   return NULL;
   881   return NULL;
   904 }
   882 }
   905 
   883 
   906 // Visitors ...
   884 // Visitors ...
   907 
   885 
   908 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
   886 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
   909   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
   887   PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
   910   while (block != NULL) {
   888   while (block != NULL) {
   911     assert(block->object() == CHAINMARKER, "must be a block header");
   889     assert(block->object() == CHAINMARKER, "must be a block header");
   912     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
   890     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
   913       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
   891       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
   914       oop object = (oop)mid->object();
   892       oop object = (oop)mid->object();
   915       if (object != NULL) {
   893       if (object != NULL) {
       
   894         // Only process with closure if the object is set.
   916         closure->do_monitor(mid);
   895         closure->do_monitor(mid);
   917       }
   896       }
   918     }
   897     }
   919     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
   898     block = (PaddedObjectMonitor*)block->_next_om;
   920   }
   899   }
   921 }
       
   922 
       
   923 // Get the next block in the block list.
       
   924 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
       
   925   assert(block->object() == CHAINMARKER, "must be a block header");
       
   926   block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
       
   927   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
       
   928   return block;
       
   929 }
   900 }
   930 
   901 
   931 static bool monitors_used_above_threshold() {
   902 static bool monitors_used_above_threshold() {
   932   if (gMonitorPopulation == 0) {
   903   if (g_om_population == 0) {
   933     return false;
   904     return false;
   934   }
   905   }
   935   int monitors_used = gMonitorPopulation - gMonitorFreeCount;
   906   int monitors_used = g_om_population - g_om_free_count;
   936   int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
   907   int monitor_usage = (monitors_used * 100LL) / g_om_population;
   937   return monitor_usage > MonitorUsedDeflationThreshold;
   908   return monitor_usage > MonitorUsedDeflationThreshold;
   938 }
   909 }
   939 
   910 
   940 bool ObjectSynchronizer::is_cleanup_needed() {
   911 bool ObjectSynchronizer::is_cleanup_needed() {
   941   if (MonitorUsedDeflationThreshold > 0) {
   912   if (MonitorUsedDeflationThreshold > 0) {
   950   global_used_oops_do(f);
   921   global_used_oops_do(f);
   951 }
   922 }
   952 
   923 
   953 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
   924 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
   954   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   925   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   955   list_oops_do(gOmInUseList, f);
   926   list_oops_do(g_om_in_use_list, f);
   956 }
   927 }
   957 
   928 
   958 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
   929 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
   959   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   930   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   960   list_oops_do(thread->omInUseList, f);
   931   list_oops_do(thread->om_in_use_list, f);
   961 }
   932 }
   962 
   933 
   963 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
   934 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
   964   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   935   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   965   ObjectMonitor* mid;
   936   ObjectMonitor* mid;
   966   for (mid = list; mid != NULL; mid = mid->FreeNext) {
   937   for (mid = list; mid != NULL; mid = mid->_next_om) {
   967     if (mid->object() != NULL) {
   938     if (mid->object() != NULL) {
   968       f->do_oop((oop*)mid->object_addr());
   939       f->do_oop((oop*)mid->object_addr());
   969     }
   940     }
   970   }
   941   }
   971 }
   942 }
   972 
   943 
   973 
   944 
   974 // -----------------------------------------------------------------------------
   945 // -----------------------------------------------------------------------------
   975 // ObjectMonitor Lifecycle
   946 // ObjectMonitor Lifecycle
   976 // -----------------------
   947 // -----------------------
   977 // Inflation unlinks monitors from the global gFreeList and
   948 // Inflation unlinks monitors from the global g_free_list and
   978 // associates them with objects.  Deflation -- which occurs at
   949 // associates them with objects.  Deflation -- which occurs at
   979 // STW-time -- disassociates idle monitors from objects.  Such
   950 // STW-time -- disassociates idle monitors from objects.  Such
   980 // scavenged monitors are returned to the gFreeList.
   951 // scavenged monitors are returned to the g_free_list.
   981 //
   952 //
   982 // The global list is protected by gListLock.  All the critical sections
   953 // The global list is protected by gListLock.  All the critical sections
   983 // are short and operate in constant-time.
   954 // are short and operate in constant-time.
   984 //
   955 //
   985 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
   956 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
   986 //
   957 //
   987 // Lifecycle:
   958 // Lifecycle:
   988 // --   unassigned and on the global free list
   959 // --   unassigned and on the global free list
   989 // --   unassigned and on a thread's private omFreeList
   960 // --   unassigned and on a thread's private om_free_list
   990 // --   assigned to an object.  The object is inflated and the mark refers
   961 // --   assigned to an object.  The object is inflated and the mark refers
   991 //      to the objectmonitor.
   962 //      to the objectmonitor.
   992 
   963 
   993 
   964 
   994 // Constraining monitor pool growth via MonitorBound ...
   965 // Constraining monitor pool growth via MonitorBound ...
       
   966 //
       
   967 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
   995 //
   968 //
   996 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
   969 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
   997 // the rate of scavenging is driven primarily by GC.  As such,  we can find
   970 // the rate of scavenging is driven primarily by GC.  As such,  we can find
   998 // an inordinate number of monitors in circulation.
   971 // an inordinate number of monitors in circulation.
   999 // To avoid that scenario we can artificially induce a STW safepoint
   972 // To avoid that scenario we can artificially induce a STW safepoint
  1004 // we could just loop. In addition, if MonitorBound is set to a low value
   977 // we could just loop. In addition, if MonitorBound is set to a low value
  1005 // we'll incur more safepoints, which are harmful to performance.
   978 // we'll incur more safepoints, which are harmful to performance.
  1006 // See also: GuaranteedSafepointInterval
   979 // See also: GuaranteedSafepointInterval
  1007 //
   980 //
  1008 // The current implementation uses asynchronous VM operations.
   981 // The current implementation uses asynchronous VM operations.
  1009 
   982 //
  1010 static void InduceScavenge(Thread * Self, const char * Whence) {
   983 // If MonitorBound is set, the boundry applies to
       
   984 //     (g_om_population - g_om_free_count)
       
   985 // i.e., if there are not enough ObjectMonitors on the global free list,
       
   986 // then a safepoint deflation is induced. Picking a good MonitorBound value
       
   987 // is non-trivial.
       
   988 
       
   989 static void InduceScavenge(Thread* self, const char * Whence) {
  1011   // Induce STW safepoint to trim monitors
   990   // Induce STW safepoint to trim monitors
  1012   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
   991   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
  1013   // More precisely, trigger an asynchronous STW safepoint as the number
   992   // More precisely, trigger an asynchronous STW safepoint as the number
  1014   // of active monitors passes the specified threshold.
   993   // of active monitors passes the specified threshold.
  1015   // TODO: assert thread state is reasonable
   994   // TODO: assert thread state is reasonable
  1021     // The VMThread will delete the op when completed.
  1000     // The VMThread will delete the op when completed.
  1022     VMThread::execute(new VM_ScavengeMonitors());
  1001     VMThread::execute(new VM_ScavengeMonitors());
  1023   }
  1002   }
  1024 }
  1003 }
  1025 
  1004 
  1026 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
  1005 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
  1027   // A large MAXPRIVATE value reduces both list lock contention
  1006   // A large MAXPRIVATE value reduces both list lock contention
  1028   // and list coherency traffic, but also tends to increase the
  1007   // and list coherency traffic, but also tends to increase the
  1029   // number of objectMonitors in circulation as well as the STW
  1008   // number of ObjectMonitors in circulation as well as the STW
  1030   // scavenge costs.  As usual, we lean toward time in space-time
  1009   // scavenge costs.  As usual, we lean toward time in space-time
  1031   // tradeoffs.
  1010   // tradeoffs.
  1032   const int MAXPRIVATE = 1024;
  1011   const int MAXPRIVATE = 1024;
       
  1012   stringStream ss;
  1033   for (;;) {
  1013   for (;;) {
  1034     ObjectMonitor * m;
  1014     ObjectMonitor* m;
  1035 
  1015 
  1036     // 1: try to allocate from the thread's local omFreeList.
  1016     // 1: try to allocate from the thread's local om_free_list.
  1037     // Threads will attempt to allocate first from their local list, then
  1017     // Threads will attempt to allocate first from their local list, then
  1038     // from the global list, and only after those attempts fail will the thread
  1018     // from the global list, and only after those attempts fail will the thread
  1039     // attempt to instantiate new monitors.   Thread-local free lists take
  1019     // attempt to instantiate new monitors.   Thread-local free lists take
  1040     // heat off the gListLock and improve allocation latency, as well as reducing
  1020     // heat off the gListLock and improve allocation latency, as well as reducing
  1041     // coherency traffic on the shared global list.
  1021     // coherency traffic on the shared global list.
  1042     m = Self->omFreeList;
  1022     m = self->om_free_list;
  1043     if (m != NULL) {
  1023     if (m != NULL) {
  1044       Self->omFreeList = m->FreeNext;
  1024       self->om_free_list = m->_next_om;
  1045       Self->omFreeCount--;
  1025       self->om_free_count--;
  1046       guarantee(m->object() == NULL, "invariant");
  1026       guarantee(m->object() == NULL, "invariant");
  1047       m->FreeNext = Self->omInUseList;
  1027       m->_next_om = self->om_in_use_list;
  1048       Self->omInUseList = m;
  1028       self->om_in_use_list = m;
  1049       Self->omInUseCount++;
  1029       self->om_in_use_count++;
  1050       return m;
  1030       return m;
  1051     }
  1031     }
  1052 
  1032 
  1053     // 2: try to allocate from the global gFreeList
  1033     // 2: try to allocate from the global g_free_list
  1054     // CONSIDER: use muxTry() instead of muxAcquire().
  1034     // CONSIDER: use muxTry() instead of muxAcquire().
  1055     // If the muxTry() fails then drop immediately into case 3.
  1035     // If the muxTry() fails then drop immediately into case 3.
  1056     // If we're using thread-local free lists then try
  1036     // If we're using thread-local free lists then try
  1057     // to reprovision the caller's free list.
  1037     // to reprovision the caller's free list.
  1058     if (gFreeList != NULL) {
  1038     if (g_free_list != NULL) {
  1059       // Reprovision the thread's omFreeList.
  1039       // Reprovision the thread's om_free_list.
  1060       // Use bulk transfers to reduce the allocation rate and heat
  1040       // Use bulk transfers to reduce the allocation rate and heat
  1061       // on various locks.
  1041       // on various locks.
  1062       Thread::muxAcquire(&gListLock, "omAlloc(1)");
  1042       Thread::muxAcquire(&gListLock, "om_alloc(1)");
  1063       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
  1043       for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) {
  1064         gMonitorFreeCount--;
  1044         g_om_free_count--;
  1065         ObjectMonitor * take = gFreeList;
  1045         ObjectMonitor* take = g_free_list;
  1066         gFreeList = take->FreeNext;
  1046         g_free_list = take->_next_om;
  1067         guarantee(take->object() == NULL, "invariant");
  1047         guarantee(take->object() == NULL, "invariant");
  1068         guarantee(!take->is_busy(), "invariant");
       
  1069         take->Recycle();
  1048         take->Recycle();
  1070         omRelease(Self, take, false);
  1049         om_release(self, take, false);
  1071       }
  1050       }
  1072       Thread::muxRelease(&gListLock);
  1051       Thread::muxRelease(&gListLock);
  1073       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
  1052       self->om_free_provision += 1 + (self->om_free_provision/2);
  1074       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
  1053       if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
  1075 
  1054 
  1076       const int mx = MonitorBound;
  1055       const int mx = MonitorBound;
  1077       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
  1056       if (mx > 0 && (g_om_population-g_om_free_count) > mx) {
  1078         // We can't safely induce a STW safepoint from omAlloc() as our thread
  1057         // Not enough ObjectMonitors on the global free list.
       
  1058         // We can't safely induce a STW safepoint from om_alloc() as our thread
  1079         // state may not be appropriate for such activities and callers may hold
  1059         // state may not be appropriate for such activities and callers may hold
  1080         // naked oops, so instead we defer the action.
  1060         // naked oops, so instead we defer the action.
  1081         InduceScavenge(Self, "omAlloc");
  1061         InduceScavenge(self, "om_alloc");
  1082       }
  1062       }
  1083       continue;
  1063       continue;
  1084     }
  1064     }
  1085 
  1065 
  1086     // 3: allocate a block of new ObjectMonitors
  1066     // 3: allocate a block of new ObjectMonitors
  1087     // Both the local and global free lists are empty -- resort to malloc().
  1067     // Both the local and global free lists are empty -- resort to malloc().
  1088     // In the current implementation objectMonitors are TSM - immortal.
  1068     // In the current implementation ObjectMonitors are TSM - immortal.
  1089     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
  1069     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
  1090     // each ObjectMonitor to start at the beginning of a cache line,
  1070     // each ObjectMonitor to start at the beginning of a cache line,
  1091     // so we use align_up().
  1071     // so we use align_up().
  1092     // A better solution would be to use C++ placement-new.
  1072     // A better solution would be to use C++ placement-new.
  1093     // BEWARE: As it stands currently, we don't run the ctors!
  1073     // BEWARE: As it stands currently, we don't run the ctors!
  1094     assert(_BLOCKSIZE > 1, "invariant");
  1074     assert(_BLOCKSIZE > 1, "invariant");
  1095     size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
  1075     size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
  1096     PaddedEnd<ObjectMonitor> * temp;
  1076     PaddedObjectMonitor* temp;
  1097     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
  1077     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
  1098     void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
  1078     void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
  1099                                                       mtInternal);
  1079     temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
  1100     temp = (PaddedEnd<ObjectMonitor> *)
       
  1101              align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
       
  1102 
       
  1103     // NOTE: (almost) no way to recover if allocation failed.
       
  1104     // We might be able to induce a STW safepoint and scavenge enough
       
  1105     // objectMonitors to permit progress.
       
  1106     if (temp == NULL) {
       
  1107       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
       
  1108                             "Allocate ObjectMonitors");
       
  1109     }
       
  1110     (void)memset((void *) temp, 0, neededsize);
  1080     (void)memset((void *) temp, 0, neededsize);
  1111 
  1081 
  1112     // Format the block.
  1082     // Format the block.
  1113     // initialize the linked list, each monitor points to its next
  1083     // initialize the linked list, each monitor points to its next
  1114     // forming the single linked free list, the very first monitor
  1084     // forming the single linked free list, the very first monitor
  1115     // will points to next block, which forms the block list.
  1085     // will points to next block, which forms the block list.
  1116     // The trick of using the 1st element in the block as gBlockList
  1086     // The trick of using the 1st element in the block as g_block_list
  1117     // linkage should be reconsidered.  A better implementation would
  1087     // linkage should be reconsidered.  A better implementation would
  1118     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
  1088     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
  1119 
  1089 
  1120     for (int i = 1; i < _BLOCKSIZE; i++) {
  1090     for (int i = 1; i < _BLOCKSIZE; i++) {
  1121       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
  1091       temp[i]._next_om = (ObjectMonitor *)&temp[i+1];
  1122     }
  1092     }
  1123 
  1093 
  1124     // terminate the last monitor as the end of list
  1094     // terminate the last monitor as the end of list
  1125     temp[_BLOCKSIZE - 1].FreeNext = NULL;
  1095     temp[_BLOCKSIZE - 1]._next_om = NULL;
  1126 
  1096 
  1127     // Element [0] is reserved for global list linkage
  1097     // Element [0] is reserved for global list linkage
  1128     temp[0].set_object(CHAINMARKER);
  1098     temp[0].set_object(CHAINMARKER);
  1129 
  1099 
  1130     // Consider carving out this thread's current request from the
  1100     // Consider carving out this thread's current request from the
  1131     // block in hand.  This avoids some lock traffic and redundant
  1101     // block in hand.  This avoids some lock traffic and redundant
  1132     // list activity.
  1102     // list activity.
  1133 
  1103 
  1134     // Acquire the gListLock to manipulate gBlockList and gFreeList.
  1104     // Acquire the gListLock to manipulate g_block_list and g_free_list.
  1135     // An Oyama-Taura-Yonezawa scheme might be more efficient.
  1105     // An Oyama-Taura-Yonezawa scheme might be more efficient.
  1136     Thread::muxAcquire(&gListLock, "omAlloc(2)");
  1106     Thread::muxAcquire(&gListLock, "om_alloc(2)");
  1137     gMonitorPopulation += _BLOCKSIZE-1;
  1107     g_om_population += _BLOCKSIZE-1;
  1138     gMonitorFreeCount += _BLOCKSIZE-1;
  1108     g_om_free_count += _BLOCKSIZE-1;
  1139 
  1109 
  1140     // Add the new block to the list of extant blocks (gBlockList).
  1110     // Add the new block to the list of extant blocks (g_block_list).
  1141     // The very first objectMonitor in a block is reserved and dedicated.
  1111     // The very first ObjectMonitor in a block is reserved and dedicated.
  1142     // It serves as blocklist "next" linkage.
  1112     // It serves as blocklist "next" linkage.
  1143     temp[0].FreeNext = gBlockList;
  1113     temp[0]._next_om = g_block_list;
  1144     // There are lock-free uses of gBlockList so make sure that
  1114     // There are lock-free uses of g_block_list so make sure that
  1145     // the previous stores happen before we update gBlockList.
  1115     // the previous stores happen before we update g_block_list.
  1146     OrderAccess::release_store(&gBlockList, temp);
  1116     OrderAccess::release_store(&g_block_list, temp);
  1147 
  1117 
  1148     // Add the new string of objectMonitors to the global free list
  1118     // Add the new string of ObjectMonitors to the global free list
  1149     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
  1119     temp[_BLOCKSIZE - 1]._next_om = g_free_list;
  1150     gFreeList = temp + 1;
  1120     g_free_list = temp + 1;
  1151     Thread::muxRelease(&gListLock);
  1121     Thread::muxRelease(&gListLock);
  1152   }
  1122   }
  1153 }
  1123 }
  1154 
  1124 
  1155 // Place "m" on the caller's private per-thread omFreeList.
  1125 // Place "m" on the caller's private per-thread om_free_list.
  1156 // In practice there's no need to clamp or limit the number of
  1126 // In practice there's no need to clamp or limit the number of
  1157 // monitors on a thread's omFreeList as the only time we'll call
  1127 // monitors on a thread's om_free_list as the only non-allocation time
  1158 // omRelease is to return a monitor to the free list after a CAS
  1128 // we'll call om_release() is to return a monitor to the free list after
  1159 // attempt failed.  This doesn't allow unbounded #s of monitors to
  1129 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
  1160 // accumulate on a thread's free list.
  1130 // accumulate on a thread's free list.
  1161 //
  1131 //
  1162 // Key constraint: all ObjectMonitors on a thread's free list and the global
  1132 // Key constraint: all ObjectMonitors on a thread's free list and the global
  1163 // free list must have their object field set to null. This prevents the
  1133 // free list must have their object field set to null. This prevents the
  1164 // scavenger -- deflate_monitor_list() -- from reclaiming them.
  1134 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
  1165 
  1135 // are trying to release them.
  1166 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
  1136 
  1167                                    bool fromPerThreadAlloc) {
  1137 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
  1168   guarantee(m->header() == NULL, "invariant");
  1138                                     bool from_per_thread_alloc) {
       
  1139   guarantee(m->header().value() == 0, "invariant");
  1169   guarantee(m->object() == NULL, "invariant");
  1140   guarantee(m->object() == NULL, "invariant");
  1170   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
  1141   stringStream ss;
  1171   // Remove from omInUseList
  1142   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
  1172   if (fromPerThreadAlloc) {
  1143             "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss),
       
  1144             m->_recursions);
       
  1145   // _next_om is used for both per-thread in-use and free lists so
       
  1146   // we have to remove 'm' from the in-use list first (as needed).
       
  1147   if (from_per_thread_alloc) {
       
  1148     // Need to remove 'm' from om_in_use_list.
  1173     ObjectMonitor* cur_mid_in_use = NULL;
  1149     ObjectMonitor* cur_mid_in_use = NULL;
  1174     bool extracted = false;
  1150     bool extracted = false;
  1175     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
  1151     for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) {
  1176       if (m == mid) {
  1152       if (m == mid) {
  1177         // extract from per-thread in-use list
  1153         // extract from per-thread in-use list
  1178         if (mid == Self->omInUseList) {
  1154         if (mid == self->om_in_use_list) {
  1179           Self->omInUseList = mid->FreeNext;
  1155           self->om_in_use_list = mid->_next_om;
  1180         } else if (cur_mid_in_use != NULL) {
  1156         } else if (cur_mid_in_use != NULL) {
  1181           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
  1157           cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
  1182         }
  1158         }
  1183         extracted = true;
  1159         extracted = true;
  1184         Self->omInUseCount--;
  1160         self->om_in_use_count--;
  1185         break;
  1161         break;
  1186       }
  1162       }
  1187     }
  1163     }
  1188     assert(extracted, "Should have extracted from in-use list");
  1164     assert(extracted, "Should have extracted from in-use list");
  1189   }
  1165   }
  1190 
  1166 
  1191   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
  1167   m->_next_om = self->om_free_list;
  1192   m->FreeNext = Self->omFreeList;
  1168   self->om_free_list = m;
  1193   Self->omFreeList = m;
  1169   self->om_free_count++;
  1194   Self->omFreeCount++;
  1170 }
  1195 }
  1171 
  1196 
  1172 // Return ObjectMonitors on a moribund thread's free and in-use
  1197 // Return the monitors of a moribund thread's local free list to
  1173 // lists to the appropriate global lists. The ObjectMonitors on the
  1198 // the global free list.  Typically a thread calls omFlush() when
  1174 // per-thread in-use list may still be in use by other threads.
  1199 // it's dying.  We could also consider having the VM thread steal
       
  1200 // monitors from threads that have not run java code over a few
       
  1201 // consecutive STW safepoints.  Relatedly, we might decay
       
  1202 // omFreeProvision at STW safepoints.
       
  1203 //
  1175 //
  1204 // Also return the monitors of a moribund thread's omInUseList to
  1176 // We currently call om_flush() from Threads::remove() before the
  1205 // a global gOmInUseList under the global list lock so these
  1177 // thread has been excised from the thread list and is no longer a
  1206 // will continue to be scanned.
  1178 // mutator. This means that om_flush() cannot run concurrently with
  1207 //
  1179 // a safepoint and interleave with deflate_idle_monitors(). In
  1208 // We currently call omFlush() from Threads::remove() _before the thread
  1180 // particular, this ensures that the thread's in-use monitors are
  1209 // has been excised from the thread list and is no longer a mutator.
  1181 // scanned by a GC safepoint, either via Thread::oops_do() (before
  1210 // This means that omFlush() cannot run concurrently with a safepoint and
  1182 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
  1211 // interleave with the deflate_idle_monitors scavenge operator. In particular,
  1183 // om_flush() is called).
  1212 // this ensures that the thread's monitors are scanned by a GC safepoint,
  1184 
  1213 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via
  1185 void ObjectSynchronizer::om_flush(Thread* self) {
  1214 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
  1186   ObjectMonitor* free_list = self->om_free_list;
  1215 // monitors have been transferred to the global in-use list).
  1187   ObjectMonitor* free_tail = NULL;
  1216 
  1188   int free_count = 0;
  1217 void ObjectSynchronizer::omFlush(Thread * Self) {
  1189   if (free_list != NULL) {
  1218   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
  1190     ObjectMonitor* s;
  1219   ObjectMonitor * tail = NULL;
  1191     // The thread is going away. Set 'free_tail' to the last per-thread free
  1220   int tally = 0;
  1192     // monitor which will be linked to g_free_list below under the gListLock.
  1221   if (list != NULL) {
  1193     stringStream ss;
  1222     ObjectMonitor * s;
  1194     for (s = free_list; s != NULL; s = s->_next_om) {
  1223     // The thread is going away, the per-thread free monitors
  1195       free_count++;
  1224     // are freed via set_owner(NULL)
  1196       free_tail = s;
  1225     // Link them to tail, which will be linked into the global free list
       
  1226     // gFreeList below, under the gListLock
       
  1227     for (s = list; s != NULL; s = s->FreeNext) {
       
  1228       tally++;
       
  1229       tail = s;
       
  1230       guarantee(s->object() == NULL, "invariant");
  1197       guarantee(s->object() == NULL, "invariant");
  1231       guarantee(!s->is_busy(), "invariant");
  1198       guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
  1232       s->set_owner(NULL);   // redundant but good hygiene
  1199     }
  1233     }
  1200     guarantee(free_tail != NULL, "invariant");
  1234     guarantee(tail != NULL, "invariant");
  1201     assert(self->om_free_count == free_count, "free-count off");
  1235     assert(Self->omFreeCount == tally, "free-count off");
  1202     self->om_free_list = NULL;
  1236     Self->omFreeList = NULL;
  1203     self->om_free_count = 0;
  1237     Self->omFreeCount = 0;
  1204   }
  1238   }
  1205 
  1239 
  1206   ObjectMonitor* in_use_list = self->om_in_use_list;
  1240   ObjectMonitor * inUseList = Self->omInUseList;
  1207   ObjectMonitor* in_use_tail = NULL;
  1241   ObjectMonitor * inUseTail = NULL;
  1208   int in_use_count = 0;
  1242   int inUseTally = 0;
  1209   if (in_use_list != NULL) {
  1243   if (inUseList != NULL) {
  1210     // The thread is going away, however the ObjectMonitors on the
       
  1211     // om_in_use_list may still be in-use by other threads. Link
       
  1212     // them to in_use_tail, which will be linked into the global
       
  1213     // in-use list g_om_in_use_list below, under the gListLock.
  1244     ObjectMonitor *cur_om;
  1214     ObjectMonitor *cur_om;
  1245     // The thread is going away, however the omInUseList inflated
  1215     for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
  1246     // monitors may still be in-use by other threads.
  1216       in_use_tail = cur_om;
  1247     // Link them to inUseTail, which will be linked into the global in-use list
  1217       in_use_count++;
  1248     // gOmInUseList below, under the gListLock
  1218     }
  1249     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
  1219     guarantee(in_use_tail != NULL, "invariant");
  1250       inUseTail = cur_om;
  1220     assert(self->om_in_use_count == in_use_count, "in-use count off");
  1251       inUseTally++;
  1221     self->om_in_use_list = NULL;
  1252     }
  1222     self->om_in_use_count = 0;
  1253     guarantee(inUseTail != NULL, "invariant");
  1223   }
  1254     assert(Self->omInUseCount == inUseTally, "in-use count off");
  1224 
  1255     Self->omInUseList = NULL;
  1225   Thread::muxAcquire(&gListLock, "om_flush");
  1256     Self->omInUseCount = 0;
  1226   if (free_tail != NULL) {
  1257   }
  1227     free_tail->_next_om = g_free_list;
  1258 
  1228     g_free_list = free_list;
  1259   Thread::muxAcquire(&gListLock, "omFlush");
  1229     g_om_free_count += free_count;
  1260   if (tail != NULL) {
  1230   }
  1261     tail->FreeNext = gFreeList;
  1231 
  1262     gFreeList = list;
  1232   if (in_use_tail != NULL) {
  1263     gMonitorFreeCount += tally;
  1233     in_use_tail->_next_om = g_om_in_use_list;
  1264   }
  1234     g_om_in_use_list = in_use_list;
  1265 
  1235     g_om_in_use_count += in_use_count;
  1266   if (inUseTail != NULL) {
       
  1267     inUseTail->FreeNext = gOmInUseList;
       
  1268     gOmInUseList = inUseList;
       
  1269     gOmInUseCount += inUseTally;
       
  1270   }
  1236   }
  1271 
  1237 
  1272   Thread::muxRelease(&gListLock);
  1238   Thread::muxRelease(&gListLock);
  1273 
  1239 
  1274   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1240   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1275   LogStreamHandle(Info, monitorinflation) lsh_info;
  1241   LogStreamHandle(Info, monitorinflation) lsh_info;
  1276   LogStream * ls = NULL;
  1242   LogStream* ls = NULL;
  1277   if (log_is_enabled(Debug, monitorinflation)) {
  1243   if (log_is_enabled(Debug, monitorinflation)) {
  1278     ls = &lsh_debug;
  1244     ls = &lsh_debug;
  1279   } else if ((tally != 0 || inUseTally != 0) &&
  1245   } else if ((free_count != 0 || in_use_count != 0) &&
  1280              log_is_enabled(Info, monitorinflation)) {
  1246              log_is_enabled(Info, monitorinflation)) {
  1281     ls = &lsh_info;
  1247     ls = &lsh_info;
  1282   }
  1248   }
  1283   if (ls != NULL) {
  1249   if (ls != NULL) {
  1284     ls->print_cr("omFlush: jt=" INTPTR_FORMAT ", free_monitor_tally=%d"
  1250     ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
  1285                  ", in_use_monitor_tally=%d" ", omFreeProvision=%d",
  1251                  ", in_use_count=%d" ", om_free_provision=%d",
  1286                  p2i(Self), tally, inUseTally, Self->omFreeProvision);
  1252                  p2i(self), free_count, in_use_count, self->om_free_provision);
  1287   }
  1253   }
  1288 }
  1254 }
  1289 
  1255 
  1290 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
  1256 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
  1291                                        const oop obj,
  1257                                        const oop obj,
  1298   event->commit();
  1264   event->commit();
  1299 }
  1265 }
  1300 
  1266 
  1301 // Fast path code shared by multiple functions
  1267 // Fast path code shared by multiple functions
  1302 void ObjectSynchronizer::inflate_helper(oop obj) {
  1268 void ObjectSynchronizer::inflate_helper(oop obj) {
  1303   markOop mark = obj->mark();
  1269   markWord mark = obj->mark();
  1304   if (mark->has_monitor()) {
  1270   if (mark.has_monitor()) {
  1305     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
  1271     assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
  1306     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
  1272     assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
  1307     return;
  1273     return;
  1308   }
  1274   }
  1309   inflate(Thread::current(), obj, inflate_cause_vm_internal);
  1275   inflate(Thread::current(), obj, inflate_cause_vm_internal);
  1310 }
  1276 }
  1311 
  1277 
  1312 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
  1278 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
  1313                                            oop object,
  1279                                            oop object,
  1314                                            const InflateCause cause) {
  1280                                            const InflateCause cause) {
  1315   // Inflate mutates the heap ...
  1281   // Inflate mutates the heap ...
  1316   // Relaxing assertion for bug 6320749.
  1282   // Relaxing assertion for bug 6320749.
  1317   assert(Universe::verify_in_progress() ||
  1283   assert(Universe::verify_in_progress() ||
  1318          !SafepointSynchronize::is_at_safepoint(), "invariant");
  1284          !SafepointSynchronize::is_at_safepoint(), "invariant");
  1319 
  1285 
  1320   EventJavaMonitorInflate event;
  1286   EventJavaMonitorInflate event;
  1321 
  1287 
  1322   for (;;) {
  1288   for (;;) {
  1323     const markOop mark = object->mark();
  1289     const markWord mark = object->mark();
  1324     assert(!mark->has_bias_pattern(), "invariant");
  1290     assert(!mark.has_bias_pattern(), "invariant");
  1325 
  1291 
  1326     // The mark can be in one of the following states:
  1292     // The mark can be in one of the following states:
  1327     // *  Inflated     - just return
  1293     // *  Inflated     - just return
  1328     // *  Stack-locked - coerce it to inflated
  1294     // *  Stack-locked - coerce it to inflated
  1329     // *  INFLATING    - busy wait for conversion to complete
  1295     // *  INFLATING    - busy wait for conversion to complete
  1330     // *  Neutral      - aggressively inflate the object.
  1296     // *  Neutral      - aggressively inflate the object.
  1331     // *  BIASED       - Illegal.  We should never see this
  1297     // *  BIASED       - Illegal.  We should never see this
  1332 
  1298 
  1333     // CASE: inflated
  1299     // CASE: inflated
  1334     if (mark->has_monitor()) {
  1300     if (mark.has_monitor()) {
  1335       ObjectMonitor * inf = mark->monitor();
  1301       ObjectMonitor* inf = mark.monitor();
  1336       markOop dmw = inf->header();
  1302       markWord dmw = inf->header();
  1337       assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
  1303       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
  1338       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
  1304       assert(inf->object() == object, "invariant");
  1339       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
  1305       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
  1340       return inf;
  1306       return inf;
  1341     }
  1307     }
  1342 
  1308 
  1343     // CASE: inflation in progress - inflating over a stack-lock.
  1309     // CASE: inflation in progress - inflating over a stack-lock.
  1344     // Some other thread is converting from stack-locked to inflated.
  1310     // Some other thread is converting from stack-locked to inflated.
  1345     // Only that thread can complete inflation -- other threads must wait.
  1311     // Only that thread can complete inflation -- other threads must wait.
  1346     // The INFLATING value is transient.
  1312     // The INFLATING value is transient.
  1347     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
  1313     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
  1348     // We could always eliminate polling by parking the thread on some auxiliary list.
  1314     // We could always eliminate polling by parking the thread on some auxiliary list.
  1349     if (mark == markOopDesc::INFLATING()) {
  1315     if (mark == markWord::INFLATING()) {
  1350       ReadStableMark(object);
  1316       read_stable_mark(object);
  1351       continue;
  1317       continue;
  1352     }
  1318     }
  1353 
  1319 
  1354     // CASE: stack-locked
  1320     // CASE: stack-locked
  1355     // Could be stack-locked either by this thread or by some other thread.
  1321     // Could be stack-locked either by this thread or by some other thread.
  1364     // We now use per-thread private objectmonitor free lists.
  1330     // We now use per-thread private objectmonitor free lists.
  1365     // These list are reprovisioned from the global free list outside the
  1331     // These list are reprovisioned from the global free list outside the
  1366     // critical INFLATING...ST interval.  A thread can transfer
  1332     // critical INFLATING...ST interval.  A thread can transfer
  1367     // multiple objectmonitors en-mass from the global free list to its local free list.
  1333     // multiple objectmonitors en-mass from the global free list to its local free list.
  1368     // This reduces coherency traffic and lock contention on the global free list.
  1334     // This reduces coherency traffic and lock contention on the global free list.
  1369     // Using such local free lists, it doesn't matter if the omAlloc() call appears
  1335     // Using such local free lists, it doesn't matter if the om_alloc() call appears
  1370     // before or after the CAS(INFLATING) operation.
  1336     // before or after the CAS(INFLATING) operation.
  1371     // See the comments in omAlloc().
  1337     // See the comments in om_alloc().
  1372 
  1338 
  1373     LogStreamHandle(Trace, monitorinflation) lsh;
  1339     LogStreamHandle(Trace, monitorinflation) lsh;
  1374 
  1340 
  1375     if (mark->has_locker()) {
  1341     if (mark.has_locker()) {
  1376       ObjectMonitor * m = omAlloc(Self);
  1342       ObjectMonitor* m = om_alloc(self);
  1377       // Optimistically prepare the objectmonitor - anticipate successful CAS
  1343       // Optimistically prepare the objectmonitor - anticipate successful CAS
  1378       // We do this before the CAS in order to minimize the length of time
  1344       // We do this before the CAS in order to minimize the length of time
  1379       // in which INFLATING appears in the mark.
  1345       // in which INFLATING appears in the mark.
  1380       m->Recycle();
  1346       m->Recycle();
  1381       m->_Responsible  = NULL;
  1347       m->_Responsible  = NULL;
  1382       m->_recursions   = 0;
       
  1383       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
  1348       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
  1384 
  1349 
  1385       markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
  1350       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
  1386       if (cmp != mark) {
  1351       if (cmp != mark) {
  1387         omRelease(Self, m, true);
  1352         om_release(self, m, true);
  1388         continue;       // Interference -- just retry
  1353         continue;       // Interference -- just retry
  1389       }
  1354       }
  1390 
  1355 
  1391       // We've successfully installed INFLATING (0) into the mark-word.
  1356       // We've successfully installed INFLATING (0) into the mark-word.
  1392       // This is the only case where 0 will appear in a mark-word.
  1357       // This is the only case where 0 will appear in a mark-word.
  1395       //
  1360       //
  1396       // Why do we CAS a 0 into the mark-word instead of just CASing the
  1361       // Why do we CAS a 0 into the mark-word instead of just CASing the
  1397       // mark-word from the stack-locked value directly to the new inflated state?
  1362       // mark-word from the stack-locked value directly to the new inflated state?
  1398       // Consider what happens when a thread unlocks a stack-locked object.
  1363       // Consider what happens when a thread unlocks a stack-locked object.
  1399       // It attempts to use CAS to swing the displaced header value from the
  1364       // It attempts to use CAS to swing the displaced header value from the
  1400       // on-stack basiclock back into the object header.  Recall also that the
  1365       // on-stack BasicLock back into the object header.  Recall also that the
  1401       // header value (hash code, etc) can reside in (a) the object header, or
  1366       // header value (hash code, etc) can reside in (a) the object header, or
  1402       // (b) a displaced header associated with the stack-lock, or (c) a displaced
  1367       // (b) a displaced header associated with the stack-lock, or (c) a displaced
  1403       // header in an objectMonitor.  The inflate() routine must copy the header
  1368       // header in an ObjectMonitor.  The inflate() routine must copy the header
  1404       // value from the basiclock on the owner's stack to the objectMonitor, all
  1369       // value from the BasicLock on the owner's stack to the ObjectMonitor, all
  1405       // the while preserving the hashCode stability invariants.  If the owner
  1370       // the while preserving the hashCode stability invariants.  If the owner
  1406       // decides to release the lock while the value is 0, the unlock will fail
  1371       // decides to release the lock while the value is 0, the unlock will fail
  1407       // and control will eventually pass from slow_exit() to inflate.  The owner
  1372       // and control will eventually pass from slow_exit() to inflate.  The owner
  1408       // will then spin, waiting for the 0 value to disappear.   Put another way,
  1373       // will then spin, waiting for the 0 value to disappear.   Put another way,
  1409       // the 0 causes the owner to stall if the owner happens to try to
  1374       // the 0 causes the owner to stall if the owner happens to try to
  1410       // drop the lock (restoring the header from the basiclock to the object)
  1375       // drop the lock (restoring the header from the BasicLock to the object)
  1411       // while inflation is in-progress.  This protocol avoids races that might
  1376       // while inflation is in-progress.  This protocol avoids races that might
  1412       // would otherwise permit hashCode values to change or "flicker" for an object.
  1377       // would otherwise permit hashCode values to change or "flicker" for an object.
  1413       // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
  1378       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
  1414       // 0 serves as a "BUSY" inflate-in-progress indicator.
  1379       // 0 serves as a "BUSY" inflate-in-progress indicator.
  1415 
  1380 
  1416 
  1381 
  1417       // fetch the displaced mark from the owner's stack.
  1382       // fetch the displaced mark from the owner's stack.
  1418       // The owner can't die or unwind past the lock while our INFLATING
  1383       // The owner can't die or unwind past the lock while our INFLATING
  1419       // object is in the mark.  Furthermore the owner can't complete
  1384       // object is in the mark.  Furthermore the owner can't complete
  1420       // an unlock on the object, either.
  1385       // an unlock on the object, either.
  1421       markOop dmw = mark->displaced_mark_helper();
  1386       markWord dmw = mark.displaced_mark_helper();
  1422       // Catch if the object's header is not neutral (not locked and
  1387       // Catch if the object's header is not neutral (not locked and
  1423       // not marked is what we care about here).
  1388       // not marked is what we care about here).
  1424       assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
  1389       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
  1425 
  1390 
  1426       // Setup monitor fields to proper values -- prepare the monitor
  1391       // Setup monitor fields to proper values -- prepare the monitor
  1427       m->set_header(dmw);
  1392       m->set_header(dmw);
  1428 
  1393 
  1429       // Optimization: if the mark->locker stack address is associated
  1394       // Optimization: if the mark.locker stack address is associated
  1430       // with this thread we could simply set m->_owner = Self.
  1395       // with this thread we could simply set m->_owner = self.
  1431       // Note that a thread can inflate an object
  1396       // Note that a thread can inflate an object
  1432       // that it has stack-locked -- as might happen in wait() -- directly
  1397       // that it has stack-locked -- as might happen in wait() -- directly
  1433       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
  1398       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
  1434       m->set_owner(mark->locker());
  1399       m->set_owner(mark.locker());
  1435       m->set_object(object);
  1400       m->set_object(object);
  1436       // TODO-FIXME: assert BasicLock->dhw != 0.
  1401       // TODO-FIXME: assert BasicLock->dhw != 0.
  1437 
  1402 
  1438       // Must preserve store ordering. The monitor state must
  1403       // Must preserve store ordering. The monitor state must
  1439       // be stable at the time of publishing the monitor address.
  1404       // be stable at the time of publishing the monitor address.
  1440       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
  1405       guarantee(object->mark() == markWord::INFLATING(), "invariant");
  1441       object->release_set_mark(markOopDesc::encode(m));
  1406       object->release_set_mark(markWord::encode(m));
  1442 
  1407 
  1443       // Hopefully the performance counters are allocated on distinct cache lines
  1408       // Hopefully the performance counters are allocated on distinct cache lines
  1444       // to avoid false sharing on MP systems ...
  1409       // to avoid false sharing on MP systems ...
  1445       OM_PERFDATA_OP(Inflations, inc());
  1410       OM_PERFDATA_OP(Inflations, inc());
  1446       if (log_is_enabled(Trace, monitorinflation)) {
  1411       if (log_is_enabled(Trace, monitorinflation)) {
  1447         ResourceMark rm(Self);
  1412         ResourceMark rm(self);
  1448         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
  1413         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
  1449                      INTPTR_FORMAT ", type='%s'", p2i(object),
  1414                      INTPTR_FORMAT ", type='%s'", p2i(object),
  1450                      p2i(object->mark()), object->klass()->external_name());
  1415                      object->mark().value(), object->klass()->external_name());
  1451       }
  1416       }
  1452       if (event.should_commit()) {
  1417       if (event.should_commit()) {
  1453         post_monitor_inflate_event(&event, object, cause);
  1418         post_monitor_inflate_event(&event, object, cause);
  1454       }
  1419       }
  1455       return m;
  1420       return m;
  1456     }
  1421     }
  1457 
  1422 
  1458     // CASE: neutral
  1423     // CASE: neutral
  1459     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
  1424     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
  1460     // If we know we're inflating for entry it's better to inflate by swinging a
  1425     // If we know we're inflating for entry it's better to inflate by swinging a
  1461     // pre-locked objectMonitor pointer into the object header.   A successful
  1426     // pre-locked ObjectMonitor pointer into the object header.   A successful
  1462     // CAS inflates the object *and* confers ownership to the inflating thread.
  1427     // CAS inflates the object *and* confers ownership to the inflating thread.
  1463     // In the current implementation we use a 2-step mechanism where we CAS()
  1428     // In the current implementation we use a 2-step mechanism where we CAS()
  1464     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
  1429     // to inflate and then CAS() again to try to swing _owner from NULL to self.
  1465     // An inflateTry() method that we could call from fast_enter() and slow_enter()
  1430     // An inflateTry() method that we could call from enter() would be useful.
  1466     // would be useful.
       
  1467 
  1431 
  1468     // Catch if the object's header is not neutral (not locked and
  1432     // Catch if the object's header is not neutral (not locked and
  1469     // not marked is what we care about here).
  1433     // not marked is what we care about here).
  1470     assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
  1434     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
  1471     ObjectMonitor * m = omAlloc(Self);
  1435     ObjectMonitor* m = om_alloc(self);
  1472     // prepare m for installation - set monitor to initial state
  1436     // prepare m for installation - set monitor to initial state
  1473     m->Recycle();
  1437     m->Recycle();
  1474     m->set_header(mark);
  1438     m->set_header(mark);
  1475     m->set_owner(NULL);
       
  1476     m->set_object(object);
  1439     m->set_object(object);
  1477     m->_recursions   = 0;
       
  1478     m->_Responsible  = NULL;
  1440     m->_Responsible  = NULL;
  1479     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
  1441     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
  1480 
  1442 
  1481     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
  1443     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
  1482       m->set_header(NULL);
  1444       m->set_header(markWord::zero());
  1483       m->set_object(NULL);
  1445       m->set_object(NULL);
  1484       m->Recycle();
  1446       m->Recycle();
  1485       omRelease(Self, m, true);
  1447       om_release(self, m, true);
  1486       m = NULL;
  1448       m = NULL;
  1487       continue;
  1449       continue;
  1488       // interference - the markword changed - just retry.
  1450       // interference - the markword changed - just retry.
  1489       // The state-transitions are one-way, so there's no chance of
  1451       // The state-transitions are one-way, so there's no chance of
  1490       // live-lock -- "Inflated" is an absorbing state.
  1452       // live-lock -- "Inflated" is an absorbing state.
  1492 
  1454 
  1493     // Hopefully the performance counters are allocated on distinct
  1455     // Hopefully the performance counters are allocated on distinct
  1494     // cache lines to avoid false sharing on MP systems ...
  1456     // cache lines to avoid false sharing on MP systems ...
  1495     OM_PERFDATA_OP(Inflations, inc());
  1457     OM_PERFDATA_OP(Inflations, inc());
  1496     if (log_is_enabled(Trace, monitorinflation)) {
  1458     if (log_is_enabled(Trace, monitorinflation)) {
  1497       ResourceMark rm(Self);
  1459       ResourceMark rm(self);
  1498       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
  1460       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
  1499                    INTPTR_FORMAT ", type='%s'", p2i(object),
  1461                    INTPTR_FORMAT ", type='%s'", p2i(object),
  1500                    p2i(object->mark()), object->klass()->external_name());
  1462                    object->mark().value(), object->klass()->external_name());
  1501     }
  1463     }
  1502     if (event.should_commit()) {
  1464     if (event.should_commit()) {
  1503       post_monitor_inflate_event(&event, object, cause);
  1465       post_monitor_inflate_event(&event, object, cause);
  1504     }
  1466     }
  1505     return m;
  1467     return m;
  1509 
  1471 
  1510 // We maintain a list of in-use monitors for each thread.
  1472 // We maintain a list of in-use monitors for each thread.
  1511 //
  1473 //
  1512 // deflate_thread_local_monitors() scans a single thread's in-use list, while
  1474 // deflate_thread_local_monitors() scans a single thread's in-use list, while
  1513 // deflate_idle_monitors() scans only a global list of in-use monitors which
  1475 // deflate_idle_monitors() scans only a global list of in-use monitors which
  1514 // is populated only as a thread dies (see omFlush()).
  1476 // is populated only as a thread dies (see om_flush()).
  1515 //
  1477 //
  1516 // These operations are called at all safepoints, immediately after mutators
  1478 // These operations are called at all safepoints, immediately after mutators
  1517 // are stopped, but before any objects have moved. Collectively they traverse
  1479 // are stopped, but before any objects have moved. Collectively they traverse
  1518 // the population of in-use monitors, deflating where possible. The scavenged
  1480 // the population of in-use monitors, deflating where possible. The scavenged
  1519 // monitors are returned to the global monitor free list.
  1481 // monitors are returned to the global monitor free list.
  1529 // This is an unfortunate aspect of this design.
  1491 // This is an unfortunate aspect of this design.
  1530 
  1492 
  1531 // Deflate a single monitor if not in-use
  1493 // Deflate a single monitor if not in-use
  1532 // Return true if deflated, false if in-use
  1494 // Return true if deflated, false if in-use
  1533 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
  1495 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
  1534                                          ObjectMonitor** freeHeadp,
  1496                                          ObjectMonitor** free_head_p,
  1535                                          ObjectMonitor** freeTailp) {
  1497                                          ObjectMonitor** free_tail_p) {
  1536   bool deflated;
  1498   bool deflated;
  1537   // Normal case ... The monitor is associated with obj.
  1499   // Normal case ... The monitor is associated with obj.
  1538   const markOop mark = obj->mark();
  1500   const markWord mark = obj->mark();
  1539   guarantee(mark == markOopDesc::encode(mid), "should match: mark="
  1501   guarantee(mark == markWord::encode(mid), "should match: mark="
  1540             INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
  1502             INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
  1541             p2i(markOopDesc::encode(mid)));
  1503             markWord::encode(mid).value());
  1542   // Make sure that mark->monitor() and markOopDesc::encode() agree:
  1504   // Make sure that mark.monitor() and markWord::encode() agree:
  1543   guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
  1505   guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
  1544             ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
  1506             ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
  1545   const markOop dmw = mid->header();
  1507   const markWord dmw = mid->header();
  1546   guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
  1508   guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
  1547 
  1509 
  1548   if (mid->is_busy()) {
  1510   if (mid->is_busy()) {
  1549     deflated = false;
  1511     deflated = false;
  1550   } else {
  1512   } else {
  1551     // Deflate the monitor if it is no longer being used
  1513     // Deflate the monitor if it is no longer being used
  1554     if (log_is_enabled(Trace, monitorinflation)) {
  1516     if (log_is_enabled(Trace, monitorinflation)) {
  1555       ResourceMark rm;
  1517       ResourceMark rm;
  1556       log_trace(monitorinflation)("deflate_monitor: "
  1518       log_trace(monitorinflation)("deflate_monitor: "
  1557                                   "object=" INTPTR_FORMAT ", mark="
  1519                                   "object=" INTPTR_FORMAT ", mark="
  1558                                   INTPTR_FORMAT ", type='%s'", p2i(obj),
  1520                                   INTPTR_FORMAT ", type='%s'", p2i(obj),
  1559                                   p2i(mark), obj->klass()->external_name());
  1521                                   mark.value(), obj->klass()->external_name());
  1560     }
  1522     }
  1561 
  1523 
  1562     // Restore the header back to obj
  1524     // Restore the header back to obj
  1563     obj->release_set_mark(dmw);
  1525     obj->release_set_mark(dmw);
  1564     mid->clear();
  1526     mid->clear();
  1565 
  1527 
  1566     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
  1528     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
  1567            p2i(mid->object()));
  1529            p2i(mid->object()));
  1568 
  1530 
  1569     // Move the object to the working free list defined by freeHeadp, freeTailp
  1531     // Move the deflated ObjectMonitor to the working free list
  1570     if (*freeHeadp == NULL) *freeHeadp = mid;
  1532     // defined by free_head_p and free_tail_p.
  1571     if (*freeTailp != NULL) {
  1533     if (*free_head_p == NULL) *free_head_p = mid;
  1572       ObjectMonitor * prevtail = *freeTailp;
  1534     if (*free_tail_p != NULL) {
  1573       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
  1535       // We append to the list so the caller can use mid->_next_om
  1574       prevtail->FreeNext = mid;
  1536       // to fix the linkages in its context.
  1575     }
  1537       ObjectMonitor* prevtail = *free_tail_p;
  1576     *freeTailp = mid;
  1538       // Should have been cleaned up by the caller:
       
  1539       assert(prevtail->_next_om == NULL, "cleaned up deflated?");
       
  1540       prevtail->_next_om = mid;
       
  1541     }
       
  1542     *free_tail_p = mid;
       
  1543     // At this point, mid->_next_om still refers to its current
       
  1544     // value and another ObjectMonitor's _next_om field still
       
  1545     // refers to this ObjectMonitor. Those linkages have to be
       
  1546     // cleaned up by the caller who has the complete context.
  1577     deflated = true;
  1547     deflated = true;
  1578   }
  1548   }
  1579   return deflated;
  1549   return deflated;
  1580 }
  1550 }
  1581 
  1551 
  1590 // process the same monitor lists concurrently.
  1560 // process the same monitor lists concurrently.
  1591 //
  1561 //
  1592 // See also ParallelSPCleanupTask and
  1562 // See also ParallelSPCleanupTask and
  1593 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
  1563 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
  1594 // Threads::parallel_java_threads_do() in thread.cpp.
  1564 // Threads::parallel_java_threads_do() in thread.cpp.
  1595 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
  1565 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
  1596                                              ObjectMonitor** freeHeadp,
  1566                                              ObjectMonitor** free_head_p,
  1597                                              ObjectMonitor** freeTailp) {
  1567                                              ObjectMonitor** free_tail_p) {
  1598   ObjectMonitor* mid;
  1568   ObjectMonitor* mid;
  1599   ObjectMonitor* next;
  1569   ObjectMonitor* next;
  1600   ObjectMonitor* cur_mid_in_use = NULL;
  1570   ObjectMonitor* cur_mid_in_use = NULL;
  1601   int deflated_count = 0;
  1571   int deflated_count = 0;
  1602 
  1572 
  1603   for (mid = *listHeadp; mid != NULL;) {
  1573   for (mid = *list_p; mid != NULL;) {
  1604     oop obj = (oop) mid->object();
  1574     oop obj = (oop) mid->object();
  1605     if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) {
  1575     if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
  1606       // if deflate_monitor succeeded,
  1576       // Deflation succeeded and already updated free_head_p and
  1607       // extract from per-thread in-use list
  1577       // free_tail_p as needed. Finish the move to the local free list
  1608       if (mid == *listHeadp) {
  1578       // by unlinking mid from the global or per-thread in-use list.
  1609         *listHeadp = mid->FreeNext;
  1579       if (mid == *list_p) {
       
  1580         *list_p = mid->_next_om;
  1610       } else if (cur_mid_in_use != NULL) {
  1581       } else if (cur_mid_in_use != NULL) {
  1611         cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
  1582         cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
  1612       }
  1583       }
  1613       next = mid->FreeNext;
  1584       next = mid->_next_om;
  1614       mid->FreeNext = NULL;  // This mid is current tail in the freeHeadp list
  1585       mid->_next_om = NULL;  // This mid is current tail in the free_head_p list
  1615       mid = next;
  1586       mid = next;
  1616       deflated_count++;
  1587       deflated_count++;
  1617     } else {
  1588     } else {
  1618       cur_mid_in_use = mid;
  1589       cur_mid_in_use = mid;
  1619       mid = mid->FreeNext;
  1590       mid = mid->_next_om;
  1620     }
  1591     }
  1621   }
  1592   }
  1622   return deflated_count;
  1593   return deflated_count;
  1623 }
  1594 }
  1624 
  1595 
  1625 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1596 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1626   counters->nInuse = 0;              // currently associated with objects
  1597   counters->n_in_use = 0;              // currently associated with objects
  1627   counters->nInCirculation = 0;      // extant
  1598   counters->n_in_circulation = 0;      // extant
  1628   counters->nScavenged = 0;          // reclaimed (global and per-thread)
  1599   counters->n_scavenged = 0;           // reclaimed (global and per-thread)
  1629   counters->perThreadScavenged = 0;  // per-thread scavenge total
  1600   counters->per_thread_scavenged = 0;  // per-thread scavenge total
  1630   counters->perThreadTimes = 0.0;    // per-thread scavenge times
  1601   counters->per_thread_times = 0.0;    // per-thread scavenge times
  1631 }
  1602 }
  1632 
  1603 
  1633 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1604 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1634   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1605   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1635   bool deflated = false;
  1606   bool deflated = false;
  1636 
  1607 
  1637   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
  1608   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
  1638   ObjectMonitor * freeTailp = NULL;
  1609   ObjectMonitor* free_tail_p = NULL;
  1639   elapsedTimer timer;
  1610   elapsedTimer timer;
  1640 
  1611 
  1641   if (log_is_enabled(Info, monitorinflation)) {
  1612   if (log_is_enabled(Info, monitorinflation)) {
  1642     timer.start();
  1613     timer.start();
  1643   }
  1614   }
  1644 
  1615 
  1645   // Prevent omFlush from changing mids in Thread dtor's during deflation
  1616   // Prevent om_flush from changing mids in Thread dtor's during deflation
  1646   // And in case the vm thread is acquiring a lock during a safepoint
  1617   // And in case the vm thread is acquiring a lock during a safepoint
  1647   // See e.g. 6320749
  1618   // See e.g. 6320749
  1648   Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
  1619   Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
  1649 
  1620 
  1650   // Note: the thread-local monitors lists get deflated in
  1621   // Note: the thread-local monitors lists get deflated in
  1651   // a separate pass. See deflate_thread_local_monitors().
  1622   // a separate pass. See deflate_thread_local_monitors().
  1652 
  1623 
  1653   // For moribund threads, scan gOmInUseList
  1624   // For moribund threads, scan g_om_in_use_list
  1654   int deflated_count = 0;
  1625   int deflated_count = 0;
  1655   if (gOmInUseList) {
  1626   if (g_om_in_use_list) {
  1656     counters->nInCirculation += gOmInUseCount;
  1627     counters->n_in_circulation += g_om_in_use_count;
  1657     deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
  1628     deflated_count = deflate_monitor_list((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p);
  1658     gOmInUseCount -= deflated_count;
  1629     g_om_in_use_count -= deflated_count;
  1659     counters->nScavenged += deflated_count;
  1630     counters->n_scavenged += deflated_count;
  1660     counters->nInuse += gOmInUseCount;
  1631     counters->n_in_use += g_om_in_use_count;
  1661   }
  1632   }
  1662 
  1633 
  1663   // Move the scavenged monitors back to the global free list.
  1634   if (free_head_p != NULL) {
  1664   if (freeHeadp != NULL) {
  1635     // Move the deflated ObjectMonitors back to the global free list.
  1665     guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant");
  1636     guarantee(free_tail_p != NULL && counters->n_scavenged > 0, "invariant");
  1666     assert(freeTailp->FreeNext == NULL, "invariant");
  1637     assert(free_tail_p->_next_om == NULL, "invariant");
  1667     // constant-time list splice - prepend scavenged segment to gFreeList
  1638     // constant-time list splice - prepend scavenged segment to g_free_list
  1668     freeTailp->FreeNext = gFreeList;
  1639     free_tail_p->_next_om = g_free_list;
  1669     gFreeList = freeHeadp;
  1640     g_free_list = free_head_p;
  1670   }
  1641   }
  1671   Thread::muxRelease(&gListLock);
  1642   Thread::muxRelease(&gListLock);
  1672   timer.stop();
  1643   timer.stop();
  1673 
  1644 
  1674   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1645   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1675   LogStreamHandle(Info, monitorinflation) lsh_info;
  1646   LogStreamHandle(Info, monitorinflation) lsh_info;
  1676   LogStream * ls = NULL;
  1647   LogStream* ls = NULL;
  1677   if (log_is_enabled(Debug, monitorinflation)) {
  1648   if (log_is_enabled(Debug, monitorinflation)) {
  1678     ls = &lsh_debug;
  1649     ls = &lsh_debug;
  1679   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1650   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1680     ls = &lsh_info;
  1651     ls = &lsh_info;
  1681   }
  1652   }
  1687 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1658 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1688   // Report the cumulative time for deflating each thread's idle
  1659   // Report the cumulative time for deflating each thread's idle
  1689   // monitors. Note: if the work is split among more than one
  1660   // monitors. Note: if the work is split among more than one
  1690   // worker thread, then the reported time will likely be more
  1661   // worker thread, then the reported time will likely be more
  1691   // than a beginning to end measurement of the phase.
  1662   // than a beginning to end measurement of the phase.
  1692   log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged);
  1663   log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
  1693 
  1664 
  1694   gMonitorFreeCount += counters->nScavenged;
  1665   g_om_free_count += counters->n_scavenged;
  1695 
  1666 
  1696   if (log_is_enabled(Debug, monitorinflation)) {
  1667   if (log_is_enabled(Debug, monitorinflation)) {
  1697     // exit_globals()'s call to audit_and_print_stats() is done
  1668     // exit_globals()'s call to audit_and_print_stats() is done
  1698     // at the Info level.
  1669     // at the Info level.
  1699     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
  1670     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
  1700   } else if (log_is_enabled(Info, monitorinflation)) {
  1671   } else if (log_is_enabled(Info, monitorinflation)) {
  1701     Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
  1672     Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
  1702     log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, "
  1673     log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
  1703                                "gMonitorFreeCount=%d", gMonitorPopulation,
  1674                                "g_om_free_count=%d", g_om_population,
  1704                                gOmInUseCount, gMonitorFreeCount);
  1675                                g_om_in_use_count, g_om_free_count);
  1705     Thread::muxRelease(&gListLock);
  1676     Thread::muxRelease(&gListLock);
  1706   }
  1677   }
  1707 
  1678 
  1708   ForceMonitorScavenge = 0;    // Reset
  1679   ForceMonitorScavenge = 0;    // Reset
  1709 
  1680 
  1710   OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
  1681   OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
  1711   OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation));
  1682   OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
  1712 
  1683 
  1713   GVars.stwRandom = os::random();
  1684   GVars.stw_random = os::random();
  1714   GVars.stwCycle++;
  1685   GVars.stw_cycle++;
  1715 }
  1686 }
  1716 
  1687 
  1717 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
  1688 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
  1718   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1689   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1719 
  1690 
  1720   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
  1691   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
  1721   ObjectMonitor * freeTailp = NULL;
  1692   ObjectMonitor* free_tail_p = NULL;
  1722   elapsedTimer timer;
  1693   elapsedTimer timer;
  1723 
  1694 
  1724   if (log_is_enabled(Info, safepoint, cleanup) ||
  1695   if (log_is_enabled(Info, safepoint, cleanup) ||
  1725       log_is_enabled(Info, monitorinflation)) {
  1696       log_is_enabled(Info, monitorinflation)) {
  1726     timer.start();
  1697     timer.start();
  1727   }
  1698   }
  1728 
  1699 
  1729   int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
  1700   int deflated_count = deflate_monitor_list(thread->om_in_use_list_addr(), &free_head_p, &free_tail_p);
  1730 
  1701 
  1731   Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
  1702   Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
  1732 
  1703 
  1733   // Adjust counters
  1704   // Adjust counters
  1734   counters->nInCirculation += thread->omInUseCount;
  1705   counters->n_in_circulation += thread->om_in_use_count;
  1735   thread->omInUseCount -= deflated_count;
  1706   thread->om_in_use_count -= deflated_count;
  1736   counters->nScavenged += deflated_count;
  1707   counters->n_scavenged += deflated_count;
  1737   counters->nInuse += thread->omInUseCount;
  1708   counters->n_in_use += thread->om_in_use_count;
  1738   counters->perThreadScavenged += deflated_count;
  1709   counters->per_thread_scavenged += deflated_count;
  1739 
  1710 
  1740   // Move the scavenged monitors back to the global free list.
  1711   if (free_head_p != NULL) {
  1741   if (freeHeadp != NULL) {
  1712     // Move the deflated ObjectMonitors back to the global free list.
  1742     guarantee(freeTailp != NULL && deflated_count > 0, "invariant");
  1713     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
  1743     assert(freeTailp->FreeNext == NULL, "invariant");
  1714     assert(free_tail_p->_next_om == NULL, "invariant");
  1744 
  1715 
  1745     // constant-time list splice - prepend scavenged segment to gFreeList
  1716     // constant-time list splice - prepend scavenged segment to g_free_list
  1746     freeTailp->FreeNext = gFreeList;
  1717     free_tail_p->_next_om = g_free_list;
  1747     gFreeList = freeHeadp;
  1718     g_free_list = free_head_p;
  1748   }
  1719   }
  1749 
  1720 
  1750   timer.stop();
  1721   timer.stop();
  1751   // Safepoint logging cares about cumulative perThreadTimes and
  1722   // Safepoint logging cares about cumulative per_thread_times and
  1752   // we'll capture most of the cost, but not the muxRelease() which
  1723   // we'll capture most of the cost, but not the muxRelease() which
  1753   // should be cheap.
  1724   // should be cheap.
  1754   counters->perThreadTimes += timer.seconds();
  1725   counters->per_thread_times += timer.seconds();
  1755 
  1726 
  1756   Thread::muxRelease(&gListLock);
  1727   Thread::muxRelease(&gListLock);
  1757 
  1728 
  1758   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1729   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1759   LogStreamHandle(Info, monitorinflation) lsh_info;
  1730   LogStreamHandle(Info, monitorinflation) lsh_info;
  1760   LogStream * ls = NULL;
  1731   LogStream* ls = NULL;
  1761   if (log_is_enabled(Debug, monitorinflation)) {
  1732   if (log_is_enabled(Debug, monitorinflation)) {
  1762     ls = &lsh_debug;
  1733     ls = &lsh_debug;
  1763   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1734   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1764     ls = &lsh_info;
  1735     ls = &lsh_info;
  1765   }
  1736   }
  1831 
  1802 
  1832 u_char* ObjectSynchronizer::get_gvars_addr() {
  1803 u_char* ObjectSynchronizer::get_gvars_addr() {
  1833   return (u_char*)&GVars;
  1804   return (u_char*)&GVars;
  1834 }
  1805 }
  1835 
  1806 
  1836 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() {
  1807 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
  1837   return (u_char*)&GVars.hcSequence;
  1808   return (u_char*)&GVars.hc_sequence;
  1838 }
  1809 }
  1839 
  1810 
  1840 size_t ObjectSynchronizer::get_gvars_size() {
  1811 size_t ObjectSynchronizer::get_gvars_size() {
  1841   return sizeof(SharedGlobals);
  1812   return sizeof(SharedGlobals);
  1842 }
  1813 }
  1843 
  1814 
  1844 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() {
  1815 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
  1845   return (u_char*)&GVars.stwRandom;
  1816   return (u_char*)&GVars.stw_random;
  1846 }
  1817 }
  1847 
  1818 
  1848 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
  1819 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
  1849   assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
  1820   assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
  1850 
  1821 
  1851   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1822   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1852   LogStreamHandle(Info, monitorinflation) lsh_info;
  1823   LogStreamHandle(Info, monitorinflation) lsh_info;
  1853   LogStreamHandle(Trace, monitorinflation) lsh_trace;
  1824   LogStreamHandle(Trace, monitorinflation) lsh_trace;
  1854   LogStream * ls = NULL;
  1825   LogStream* ls = NULL;
  1855   if (log_is_enabled(Trace, monitorinflation)) {
  1826   if (log_is_enabled(Trace, monitorinflation)) {
  1856     ls = &lsh_trace;
  1827     ls = &lsh_trace;
  1857   } else if (log_is_enabled(Debug, monitorinflation)) {
  1828   } else if (log_is_enabled(Debug, monitorinflation)) {
  1858     ls = &lsh_debug;
  1829     ls = &lsh_debug;
  1859   } else if (log_is_enabled(Info, monitorinflation)) {
  1830   } else if (log_is_enabled(Info, monitorinflation)) {
  1865     // Not at VM exit so grab the global list lock.
  1836     // Not at VM exit so grab the global list lock.
  1866     Thread::muxAcquire(&gListLock, "audit_and_print_stats");
  1837     Thread::muxAcquire(&gListLock, "audit_and_print_stats");
  1867   }
  1838   }
  1868 
  1839 
  1869   // Log counts for the global and per-thread monitor lists:
  1840   // Log counts for the global and per-thread monitor lists:
  1870   int chkMonitorPopulation = log_monitor_list_counts(ls);
  1841   int chk_om_population = log_monitor_list_counts(ls);
  1871   int error_cnt = 0;
  1842   int error_cnt = 0;
  1872 
  1843 
  1873   ls->print_cr("Checking global lists:");
  1844   ls->print_cr("Checking global lists:");
  1874 
  1845 
  1875   // Check gMonitorPopulation:
  1846   // Check g_om_population:
  1876   if (gMonitorPopulation == chkMonitorPopulation) {
  1847   if (g_om_population == chk_om_population) {
  1877     ls->print_cr("gMonitorPopulation=%d equals chkMonitorPopulation=%d",
  1848     ls->print_cr("g_om_population=%d equals chk_om_population=%d",
  1878                  gMonitorPopulation, chkMonitorPopulation);
  1849                  g_om_population, chk_om_population);
  1879   } else {
  1850   } else {
  1880     ls->print_cr("ERROR: gMonitorPopulation=%d is not equal to "
  1851     ls->print_cr("ERROR: g_om_population=%d is not equal to "
  1881                  "chkMonitorPopulation=%d", gMonitorPopulation,
  1852                  "chk_om_population=%d", g_om_population,
  1882                  chkMonitorPopulation);
  1853                  chk_om_population);
  1883     error_cnt++;
  1854     error_cnt++;
  1884   }
  1855   }
  1885 
  1856 
  1886   // Check gOmInUseList and gOmInUseCount:
  1857   // Check g_om_in_use_list and g_om_in_use_count:
  1887   chk_global_in_use_list_and_count(ls, &error_cnt);
  1858   chk_global_in_use_list_and_count(ls, &error_cnt);
  1888 
  1859 
  1889   // Check gFreeList and gMonitorFreeCount:
  1860   // Check g_free_list and g_om_free_count:
  1890   chk_global_free_list_and_count(ls, &error_cnt);
  1861   chk_global_free_list_and_count(ls, &error_cnt);
  1891 
  1862 
  1892   if (!on_exit) {
  1863   if (!on_exit) {
  1893     Thread::muxRelease(&gListLock);
  1864     Thread::muxRelease(&gListLock);
  1894   }
  1865   }
  1895 
  1866 
  1896   ls->print_cr("Checking per-thread lists:");
  1867   ls->print_cr("Checking per-thread lists:");
  1897 
  1868 
  1898   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  1869   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  1899     // Check omInUseList and omInUseCount:
  1870     // Check om_in_use_list and om_in_use_count:
  1900     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
  1871     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
  1901 
  1872 
  1902     // Check omFreeList and omFreeCount:
  1873     // Check om_free_list and om_free_count:
  1903     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
  1874     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
  1904   }
  1875   }
  1905 
  1876 
  1906   if (error_cnt == 0) {
  1877   if (error_cnt == 0) {
  1907     ls->print_cr("No errors found in monitor list checks.");
  1878     ls->print_cr("No errors found in monitor list checks.");
  1921 
  1892 
  1922   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
  1893   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
  1923 }
  1894 }
  1924 
  1895 
  1925 // Check a free monitor entry; log any errors.
  1896 // Check a free monitor entry; log any errors.
  1926 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
  1897 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
  1927                                         outputStream * out, int *error_cnt_p) {
  1898                                         outputStream * out, int *error_cnt_p) {
       
  1899   stringStream ss;
  1928   if (n->is_busy()) {
  1900   if (n->is_busy()) {
  1929     if (jt != NULL) {
  1901     if (jt != NULL) {
  1930       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1902       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1931                     ": free per-thread monitor must not be busy.", p2i(jt),
  1903                     ": free per-thread monitor must not be busy: %s", p2i(jt),
  1932                     p2i(n));
  1904                     p2i(n), n->is_busy_to_string(&ss));
  1933     } else {
  1905     } else {
  1934       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
  1906       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
  1935                     "must not be busy.", p2i(n));
  1907                     "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
  1936     }
  1908     }
  1937     *error_cnt_p = *error_cnt_p + 1;
  1909     *error_cnt_p = *error_cnt_p + 1;
  1938   }
  1910   }
  1939   if (n->header() != NULL) {
  1911   if (n->header().value() != 0) {
  1940     if (jt != NULL) {
  1912     if (jt != NULL) {
  1941       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1913       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1942                     ": free per-thread monitor must have NULL _header "
  1914                     ": free per-thread monitor must have NULL _header "
  1943                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
  1915                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
  1944                     p2i(n->header()));
  1916                     n->header().value());
  1945     } else {
  1917     } else {
  1946       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
  1918       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
  1947                     "must have NULL _header field: _header=" INTPTR_FORMAT,
  1919                     "must have NULL _header field: _header=" INTPTR_FORMAT,
  1948                     p2i(n), p2i(n->header()));
  1920                     p2i(n), n->header().value());
  1949     }
  1921     }
  1950     *error_cnt_p = *error_cnt_p + 1;
  1922     *error_cnt_p = *error_cnt_p + 1;
  1951   }
  1923   }
  1952   if (n->object() != NULL) {
  1924   if (n->object() != NULL) {
  1953     if (jt != NULL) {
  1925     if (jt != NULL) {
  1965 }
  1937 }
  1966 
  1938 
  1967 // Check the global free list and count; log the results of the checks.
  1939 // Check the global free list and count; log the results of the checks.
  1968 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
  1940 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
  1969                                                         int *error_cnt_p) {
  1941                                                         int *error_cnt_p) {
  1970   int chkMonitorFreeCount = 0;
  1942   int chk_om_free_count = 0;
  1971   for (ObjectMonitor * n = gFreeList; n != NULL; n = n->FreeNext) {
  1943   for (ObjectMonitor* n = g_free_list; n != NULL; n = n->_next_om) {
  1972     chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
  1944     chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
  1973     chkMonitorFreeCount++;
  1945     chk_om_free_count++;
  1974   }
  1946   }
  1975   if (gMonitorFreeCount == chkMonitorFreeCount) {
  1947   if (g_om_free_count == chk_om_free_count) {
  1976     out->print_cr("gMonitorFreeCount=%d equals chkMonitorFreeCount=%d",
  1948     out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
  1977                   gMonitorFreeCount, chkMonitorFreeCount);
  1949                   g_om_free_count, chk_om_free_count);
  1978   } else {
  1950   } else {
  1979     out->print_cr("ERROR: gMonitorFreeCount=%d is not equal to "
  1951     out->print_cr("ERROR: g_om_free_count=%d is not equal to "
  1980                   "chkMonitorFreeCount=%d", gMonitorFreeCount,
  1952                   "chk_om_free_count=%d", g_om_free_count,
  1981                   chkMonitorFreeCount);
  1953                   chk_om_free_count);
  1982     *error_cnt_p = *error_cnt_p + 1;
  1954     *error_cnt_p = *error_cnt_p + 1;
  1983   }
  1955   }
  1984 }
  1956 }
  1985 
  1957 
  1986 // Check the global in-use list and count; log the results of the checks.
  1958 // Check the global in-use list and count; log the results of the checks.
  1987 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
  1959 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
  1988                                                           int *error_cnt_p) {
  1960                                                           int *error_cnt_p) {
  1989   int chkOmInUseCount = 0;
  1961   int chk_om_in_use_count = 0;
  1990   for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
  1962   for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
  1991     chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
  1963     chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
  1992     chkOmInUseCount++;
  1964     chk_om_in_use_count++;
  1993   }
  1965   }
  1994   if (gOmInUseCount == chkOmInUseCount) {
  1966   if (g_om_in_use_count == chk_om_in_use_count) {
  1995     out->print_cr("gOmInUseCount=%d equals chkOmInUseCount=%d", gOmInUseCount,
  1967     out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", g_om_in_use_count,
  1996                   chkOmInUseCount);
  1968                   chk_om_in_use_count);
  1997   } else {
  1969   } else {
  1998     out->print_cr("ERROR: gOmInUseCount=%d is not equal to chkOmInUseCount=%d",
  1970     out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
  1999                   gOmInUseCount, chkOmInUseCount);
  1971                   g_om_in_use_count, chk_om_in_use_count);
  2000     *error_cnt_p = *error_cnt_p + 1;
  1972     *error_cnt_p = *error_cnt_p + 1;
  2001   }
  1973   }
  2002 }
  1974 }
  2003 
  1975 
  2004 // Check an in-use monitor entry; log any errors.
  1976 // Check an in-use monitor entry; log any errors.
  2005 void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n,
  1977 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
  2006                                           outputStream * out, int *error_cnt_p) {
  1978                                           outputStream * out, int *error_cnt_p) {
  2007   if (n->header() == NULL) {
  1979   if (n->header().value() == 0) {
  2008     if (jt != NULL) {
  1980     if (jt != NULL) {
  2009       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1981       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  2010                     ": in-use per-thread monitor must have non-NULL _header "
  1982                     ": in-use per-thread monitor must have non-NULL _header "
  2011                     "field.", p2i(jt), p2i(n));
  1983                     "field.", p2i(jt), p2i(n));
  2012     } else {
  1984     } else {
  2025                     "must have non-NULL _object field.", p2i(n));
  1997                     "must have non-NULL _object field.", p2i(n));
  2026     }
  1998     }
  2027     *error_cnt_p = *error_cnt_p + 1;
  1999     *error_cnt_p = *error_cnt_p + 1;
  2028   }
  2000   }
  2029   const oop obj = (oop)n->object();
  2001   const oop obj = (oop)n->object();
  2030   const markOop mark = obj->mark();
  2002   const markWord mark = obj->mark();
  2031   if (!mark->has_monitor()) {
  2003   if (!mark.has_monitor()) {
  2032     if (jt != NULL) {
  2004     if (jt != NULL) {
  2033       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  2005       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  2034                     ": in-use per-thread monitor's object does not think "
  2006                     ": in-use per-thread monitor's object does not think "
  2035                     "it has a monitor: obj=" INTPTR_FORMAT ", mark="
  2007                     "it has a monitor: obj=" INTPTR_FORMAT ", mark="
  2036                     INTPTR_FORMAT,  p2i(jt), p2i(n), p2i(obj), p2i(mark));
  2008                     INTPTR_FORMAT,  p2i(jt), p2i(n), p2i(obj), mark.value());
  2037     } else {
  2009     } else {
  2038       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
  2010       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
  2039                     "monitor's object does not think it has a monitor: obj="
  2011                     "monitor's object does not think it has a monitor: obj="
  2040                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
  2012                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
  2041                     p2i(obj), p2i(mark));
  2013                     p2i(obj), mark.value());
  2042     }
  2014     }
  2043     *error_cnt_p = *error_cnt_p + 1;
  2015     *error_cnt_p = *error_cnt_p + 1;
  2044   }
  2016   }
  2045   ObjectMonitor * const obj_mon = mark->monitor();
  2017   ObjectMonitor* const obj_mon = mark.monitor();
  2046   if (n != obj_mon) {
  2018   if (n != obj_mon) {
  2047     if (jt != NULL) {
  2019     if (jt != NULL) {
  2048       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  2020       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  2049                     ": in-use per-thread monitor's object does not refer "
  2021                     ": in-use per-thread monitor's object does not refer "
  2050                     "to the same monitor: obj=" INTPTR_FORMAT ", mark="
  2022                     "to the same monitor: obj=" INTPTR_FORMAT ", mark="
  2051                     INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
  2023                     INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
  2052                     p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
  2024                     p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
  2053     } else {
  2025     } else {
  2054       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
  2026       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
  2055                     "monitor's object does not refer to the same monitor: obj="
  2027                     "monitor's object does not refer to the same monitor: obj="
  2056                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
  2028                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
  2057                     INTPTR_FORMAT, p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
  2029                     INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
  2058     }
  2030     }
  2059     *error_cnt_p = *error_cnt_p + 1;
  2031     *error_cnt_p = *error_cnt_p + 1;
  2060   }
  2032   }
  2061 }
  2033 }
  2062 
  2034 
  2063 // Check the thread's free list and count; log the results of the checks.
  2035 // Check the thread's free list and count; log the results of the checks.
  2064 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
  2036 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
  2065                                                             outputStream * out,
  2037                                                             outputStream * out,
  2066                                                             int *error_cnt_p) {
  2038                                                             int *error_cnt_p) {
  2067   int chkOmFreeCount = 0;
  2039   int chk_om_free_count = 0;
  2068   for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) {
  2040   for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) {
  2069     chk_free_entry(jt, n, out, error_cnt_p);
  2041     chk_free_entry(jt, n, out, error_cnt_p);
  2070     chkOmFreeCount++;
  2042     chk_om_free_count++;
  2071   }
  2043   }
  2072   if (jt->omFreeCount == chkOmFreeCount) {
  2044   if (jt->om_free_count == chk_om_free_count) {
  2073     out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals "
  2045     out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
  2074                   "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount);
  2046                   "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count);
  2075   } else {
  2047   } else {
  2076     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not "
  2048     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
  2077                   "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount,
  2049                   "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
  2078                   chkOmFreeCount);
  2050                   chk_om_free_count);
  2079     *error_cnt_p = *error_cnt_p + 1;
  2051     *error_cnt_p = *error_cnt_p + 1;
  2080   }
  2052   }
  2081 }
  2053 }
  2082 
  2054 
  2083 // Check the thread's in-use list and count; log the results of the checks.
  2055 // Check the thread's in-use list and count; log the results of the checks.
  2084 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
  2056 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
  2085                                                               outputStream * out,
  2057                                                               outputStream * out,
  2086                                                               int *error_cnt_p) {
  2058                                                               int *error_cnt_p) {
  2087   int chkOmInUseCount = 0;
  2059   int chk_om_in_use_count = 0;
  2088   for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
  2060   for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
  2089     chk_in_use_entry(jt, n, out, error_cnt_p);
  2061     chk_in_use_entry(jt, n, out, error_cnt_p);
  2090     chkOmInUseCount++;
  2062     chk_om_in_use_count++;
  2091   }
  2063   }
  2092   if (jt->omInUseCount == chkOmInUseCount) {
  2064   if (jt->om_in_use_count == chk_om_in_use_count) {
  2093     out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals "
  2065     out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
  2094                   "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
  2066                   "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
  2095                   chkOmInUseCount);
  2067                   chk_om_in_use_count);
  2096   } else {
  2068   } else {
  2097     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not "
  2069     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
  2098                   "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
  2070                   "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
  2099                   chkOmInUseCount);
  2071                   chk_om_in_use_count);
  2100     *error_cnt_p = *error_cnt_p + 1;
  2072     *error_cnt_p = *error_cnt_p + 1;
  2101   }
  2073   }
  2102 }
  2074 }
  2103 
  2075 
  2104 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
  2076 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
  2109   if (!on_exit) {
  2081   if (!on_exit) {
  2110     // Not at VM exit so grab the global list lock.
  2082     // Not at VM exit so grab the global list lock.
  2111     Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
  2083     Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
  2112   }
  2084   }
  2113 
  2085 
  2114   if (gOmInUseCount > 0) {
  2086   stringStream ss;
       
  2087   if (g_om_in_use_count > 0) {
  2115     out->print_cr("In-use global monitor info:");
  2088     out->print_cr("In-use global monitor info:");
  2116     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2089     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2117     out->print_cr("%18s  %s  %18s  %18s",
  2090     out->print_cr("%18s  %s  %18s  %18s",
  2118                   "monitor", "BHL", "object", "object type");
  2091                   "monitor", "BHL", "object", "object type");
  2119     out->print_cr("==================  ===  ==================  ==================");
  2092     out->print_cr("==================  ===  ==================  ==================");
  2120     for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
  2093     for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
  2121       const oop obj = (oop) n->object();
  2094       const oop obj = (oop) n->object();
  2122       const markOop mark = n->header();
  2095       const markWord mark = n->header();
  2123       ResourceMark rm;
  2096       ResourceMark rm;
  2124       out->print_cr(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(n),
  2097       out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(n),
  2125                     n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL,
  2098                  n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
  2126                     p2i(obj), obj->klass()->external_name());
  2099                  p2i(obj), obj->klass()->external_name());
       
  2100       if (n->is_busy() != 0) {
       
  2101         out->print(" (%s)", n->is_busy_to_string(&ss));
       
  2102         ss.reset();
       
  2103       }
       
  2104       out->cr();
  2127     }
  2105     }
  2128   }
  2106   }
  2129 
  2107 
  2130   if (!on_exit) {
  2108   if (!on_exit) {
  2131     Thread::muxRelease(&gListLock);
  2109     Thread::muxRelease(&gListLock);
  2135   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2113   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2136   out->print_cr("%18s  %18s  %s  %18s  %18s",
  2114   out->print_cr("%18s  %18s  %s  %18s  %18s",
  2137                 "jt", "monitor", "BHL", "object", "object type");
  2115                 "jt", "monitor", "BHL", "object", "object type");
  2138   out->print_cr("==================  ==================  ===  ==================  ==================");
  2116   out->print_cr("==================  ==================  ===  ==================  ==================");
  2139   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2117   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2140     for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
  2118     for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
  2141       const oop obj = (oop) n->object();
  2119       const oop obj = (oop) n->object();
  2142       const markOop mark = n->header();
  2120       const markWord mark = n->header();
  2143       ResourceMark rm;
  2121       ResourceMark rm;
  2144       out->print_cr(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
  2122       out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
  2145                     "  %s", p2i(jt), p2i(n), n->is_busy() != 0,
  2123                  "  %s", p2i(jt), p2i(n), n->is_busy() != 0,
  2146                     mark->hash() != 0, n->owner() != NULL, p2i(obj),
  2124                  mark.hash() != 0, n->owner() != NULL, p2i(obj),
  2147                     obj->klass()->external_name());
  2125                  obj->klass()->external_name());
       
  2126       if (n->is_busy() != 0) {
       
  2127         out->print(" (%s)", n->is_busy_to_string(&ss));
       
  2128         ss.reset();
       
  2129       }
       
  2130       out->cr();
  2148     }
  2131     }
  2149   }
  2132   }
  2150 
  2133 
  2151   out->flush();
  2134   out->flush();
  2152 }
  2135 }
  2153 
  2136 
  2154 // Log counts for the global and per-thread monitor lists and return
  2137 // Log counts for the global and per-thread monitor lists and return
  2155 // the population count.
  2138 // the population count.
  2156 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
  2139 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
  2157   int popCount = 0;
  2140   int pop_count = 0;
  2158   out->print_cr("%18s  %10s  %10s  %10s",
  2141   out->print_cr("%18s  %10s  %10s  %10s",
  2159                 "Global Lists:", "InUse", "Free", "Total");
  2142                 "Global Lists:", "InUse", "Free", "Total");
  2160   out->print_cr("==================  ==========  ==========  ==========");
  2143   out->print_cr("==================  ==========  ==========  ==========");
  2161   out->print_cr("%18s  %10d  %10d  %10d", "",
  2144   out->print_cr("%18s  %10d  %10d  %10d", "",
  2162                 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation);
  2145                 g_om_in_use_count, g_om_free_count, g_om_population);
  2163   popCount += gOmInUseCount + gMonitorFreeCount;
  2146   pop_count += g_om_in_use_count + g_om_free_count;
  2164 
  2147 
  2165   out->print_cr("%18s  %10s  %10s  %10s",
  2148   out->print_cr("%18s  %10s  %10s  %10s",
  2166                 "Per-Thread Lists:", "InUse", "Free", "Provision");
  2149                 "Per-Thread Lists:", "InUse", "Free", "Provision");
  2167   out->print_cr("==================  ==========  ==========  ==========");
  2150   out->print_cr("==================  ==========  ==========  ==========");
  2168 
  2151 
  2169   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2152   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2170     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
  2153     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
  2171                   jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision);
  2154                   jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
  2172     popCount += jt->omInUseCount + jt->omFreeCount;
  2155     pop_count += jt->om_in_use_count + jt->om_free_count;
  2173   }
  2156   }
  2174   return popCount;
  2157   return pop_count;
  2175 }
  2158 }
  2176 
  2159 
  2177 #ifndef PRODUCT
  2160 #ifndef PRODUCT
  2178 
  2161 
  2179 // Check if monitor belongs to the monitor cache
  2162 // Check if monitor belongs to the monitor cache
  2180 // The list is grow-only so it's *relatively* safe to traverse
  2163 // The list is grow-only so it's *relatively* safe to traverse
  2181 // the list of extant blocks without taking a lock.
  2164 // the list of extant blocks without taking a lock.
  2182 
  2165 
  2183 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
  2166 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
  2184   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
  2167   PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
  2185   while (block != NULL) {
  2168   while (block != NULL) {
  2186     assert(block->object() == CHAINMARKER, "must be a block header");
  2169     assert(block->object() == CHAINMARKER, "must be a block header");
  2187     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
  2170     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
  2188       address mon = (address)monitor;
  2171       address mon = (address)monitor;
  2189       address blk = (address)block;
  2172       address blk = (address)block;
  2190       size_t diff = mon - blk;
  2173       size_t diff = mon - blk;
  2191       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
  2174       assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
  2192       return 1;
  2175       return 1;
  2193     }
  2176     }
  2194     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
  2177     block = (PaddedObjectMonitor*)block->_next_om;
  2195   }
  2178   }
  2196   return 0;
  2179   return 0;
  2197 }
  2180 }
  2198 
  2181 
  2199 #endif
  2182 #endif