src/hotspot/share/runtime/synchronizer.cpp
changeset 57906 e17f768b3b71
parent 57893 49fea19f0726
child 58083 9046db64ca39
equal deleted inserted replaced
57905:55723932d06e 57906:e17f768b3b71
   115 
   115 
   116 #define NINFLATIONLOCKS 256
   116 #define NINFLATIONLOCKS 256
   117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
   117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
   118 
   118 
   119 // global list of blocks of monitors
   119 // global list of blocks of monitors
   120 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
   120 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
   121 // global monitor free list
   121 // Global ObjectMonitor free list. Newly allocated and deflated
   122 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
   122 // ObjectMonitors are prepended here.
   123 // global monitor in-use list, for moribund threads,
   123 ObjectMonitor* volatile ObjectSynchronizer::g_free_list = NULL;
   124 // monitors they inflated need to be scanned for deflation
   124 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
   125 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
   125 // ObjectMonitors on its per-thread in-use list are prepended here.
   126 // count of entries in gOmInUseList
   126 ObjectMonitor* volatile ObjectSynchronizer::g_om_in_use_list = NULL;
   127 int ObjectSynchronizer::gOmInUseCount = 0;
   127 int ObjectSynchronizer::g_om_in_use_count = 0;  // # on g_om_in_use_list
   128 
   128 
   129 static volatile intptr_t gListLock = 0;      // protects global monitor lists
   129 static volatile intptr_t gListLock = 0;   // protects global monitor lists
   130 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
   130 static volatile int g_om_free_count = 0;  // # on g_free_list
   131 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
   131 static volatile int g_om_population = 0;  // # Extant -- in circulation
   132 
   132 
   133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
   133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
   134 
   134 
   135 
   135 
   136 // =====================> Quick functions
   136 // =====================> Quick functions
   153 //   synchronized (someobj) { .... ; notify(); }
   153 //   synchronized (someobj) { .... ; notify(); }
   154 // That is, we find a notify() or notifyAll() call that immediately precedes
   154 // That is, we find a notify() or notifyAll() call that immediately precedes
   155 // the monitorexit operation.  In that case the JIT could fuse the operations
   155 // the monitorexit operation.  In that case the JIT could fuse the operations
   156 // into a single notifyAndExit() runtime primitive.
   156 // into a single notifyAndExit() runtime primitive.
   157 
   157 
   158 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
   158 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) {
   159   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   159   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   160   assert(self->is_Java_thread(), "invariant");
   160   assert(self->is_Java_thread(), "invariant");
   161   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
   161   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
   162   NoSafepointVerifier nsv;
   162   NoSafepointVerifier nsv;
   163   if (obj == NULL) return false;  // slow-path for invalid obj
   163   if (obj == NULL) return false;  // slow-path for invalid obj
   168     // stack-locked by caller so by definition the implied waitset is empty.
   168     // stack-locked by caller so by definition the implied waitset is empty.
   169     return true;
   169     return true;
   170   }
   170   }
   171 
   171 
   172   if (mark.has_monitor()) {
   172   if (mark.has_monitor()) {
   173     ObjectMonitor * const mon = mark.monitor();
   173     ObjectMonitor* const mon = mark.monitor();
   174     assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
   174     assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
   175     if (mon->owner() != self) return false;  // slow-path for IMS exception
   175     if (mon->owner() != self) return false;  // slow-path for IMS exception
   176 
   176 
   177     if (mon->first_waiter() != NULL) {
   177     if (mon->first_waiter() != NULL) {
   178       // We have one or more waiters. Since this is an inflated monitor
   178       // We have one or more waiters. Since this is an inflated monitor
   181       if (all) {
   181       if (all) {
   182         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
   182         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
   183       } else {
   183       } else {
   184         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
   184         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
   185       }
   185       }
   186       int tally = 0;
   186       int free_count = 0;
   187       do {
   187       do {
   188         mon->INotify(self);
   188         mon->INotify(self);
   189         ++tally;
   189         ++free_count;
   190       } while (mon->first_waiter() != NULL && all);
   190       } while (mon->first_waiter() != NULL && all);
   191       OM_PERFDATA_OP(Notifications, inc(tally));
   191       OM_PERFDATA_OP(Notifications, inc(free_count));
   192     }
   192     }
   193     return true;
   193     return true;
   194   }
   194   }
   195 
   195 
   196   // biased locking and any other IMS exception states take the slow-path
   196   // biased locking and any other IMS exception states take the slow-path
   202 // been too big if it were to have included support for the cases of inflated
   202 // been too big if it were to have included support for the cases of inflated
   203 // recursive enter and exit, so they go here instead.
   203 // recursive enter and exit, so they go here instead.
   204 // Note that we can't safely call AsyncPrintJavaStack() from within
   204 // Note that we can't safely call AsyncPrintJavaStack() from within
   205 // quick_enter() as our thread state remains _in_Java.
   205 // quick_enter() as our thread state remains _in_Java.
   206 
   206 
   207 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
   207 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
   208                                      BasicLock * lock) {
   208                                      BasicLock * lock) {
   209   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   209   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   210   assert(Self->is_Java_thread(), "invariant");
   210   assert(self->is_Java_thread(), "invariant");
   211   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
   211   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
   212   NoSafepointVerifier nsv;
   212   NoSafepointVerifier nsv;
   213   if (obj == NULL) return false;       // Need to throw NPE
   213   if (obj == NULL) return false;       // Need to throw NPE
   214   const markWord mark = obj->mark();
   214   const markWord mark = obj->mark();
   215 
   215 
   216   if (mark.has_monitor()) {
   216   if (mark.has_monitor()) {
   217     ObjectMonitor * const m = mark.monitor();
   217     ObjectMonitor* const m = mark.monitor();
   218     assert(oopDesc::equals((oop) m->object(), obj), "invariant");
   218     assert(oopDesc::equals((oop) m->object(), obj), "invariant");
   219     Thread * const owner = (Thread *) m->_owner;
   219     Thread* const owner = (Thread *) m->_owner;
   220 
   220 
   221     // Lock contention and Transactional Lock Elision (TLE) diagnostics
   221     // Lock contention and Transactional Lock Elision (TLE) diagnostics
   222     // and observability
   222     // and observability
   223     // Case: light contention possibly amenable to TLE
   223     // Case: light contention possibly amenable to TLE
   224     // Case: TLE inimical operations such as nested/recursive synchronization
   224     // Case: TLE inimical operations such as nested/recursive synchronization
   225 
   225 
   226     if (owner == Self) {
   226     if (owner == self) {
   227       m->_recursions++;
   227       m->_recursions++;
   228       return true;
   228       return true;
   229     }
   229     }
   230 
   230 
   231     // This Java Monitor is inflated so obj's header will never be
   231     // This Java Monitor is inflated so obj's header will never be
   238     // stack-locking in the object's header, the third check is for
   238     // stack-locking in the object's header, the third check is for
   239     // recursive stack-locking in the displaced header in the BasicLock,
   239     // recursive stack-locking in the displaced header in the BasicLock,
   240     // and last are the inflated Java Monitor (ObjectMonitor) checks.
   240     // and last are the inflated Java Monitor (ObjectMonitor) checks.
   241     lock->set_displaced_header(markWord::unused_mark());
   241     lock->set_displaced_header(markWord::unused_mark());
   242 
   242 
   243     if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
   243     if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) {
   244       assert(m->_recursions == 0, "invariant");
   244       assert(m->_recursions == 0, "invariant");
   245       return true;
   245       return true;
   246     }
   246     }
   247   }
   247   }
   248 
   248 
   323         // after this thread entered the stack-lock recursively. When a
   323         // after this thread entered the stack-lock recursively. When a
   324         // Java Monitor is inflated, we cannot safely walk the Java
   324         // Java Monitor is inflated, we cannot safely walk the Java
   325         // Monitor owner's stack and update the BasicLocks because a
   325         // Monitor owner's stack and update the BasicLocks because a
   326         // Java Monitor can be asynchronously inflated by a thread that
   326         // Java Monitor can be asynchronously inflated by a thread that
   327         // does not own the Java Monitor.
   327         // does not own the Java Monitor.
   328         ObjectMonitor * m = mark.monitor();
   328         ObjectMonitor* m = mark.monitor();
   329         assert(((oop)(m->object()))->mark() == mark, "invariant");
   329         assert(((oop)(m->object()))->mark() == mark, "invariant");
   330         assert(m->is_entered(THREAD), "invariant");
   330         assert(m->is_entered(THREAD), "invariant");
   331       }
   331       }
   332     }
   332     }
   333 #endif
   333 #endif
   414 }
   414 }
   415 
   415 
   416 // -----------------------------------------------------------------------------
   416 // -----------------------------------------------------------------------------
   417 // Internal VM locks on java objects
   417 // Internal VM locks on java objects
   418 // standard constructor, allows locking failures
   418 // standard constructor, allows locking failures
   419 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
   419 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
   420   _dolock = doLock;
   420   _dolock = do_lock;
   421   _thread = thread;
   421   _thread = thread;
   422   _thread->check_for_valid_safepoint_state(false);
   422   _thread->check_for_valid_safepoint_state(false);
   423   _obj = obj;
   423   _obj = obj;
   424 
   424 
   425   if (_dolock) {
   425   if (_dolock) {
   455   // and change this function back into a "void" func.
   455   // and change this function back into a "void" func.
   456   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
   456   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
   457   return dtrace_waited_probe(monitor, obj, THREAD);
   457   return dtrace_waited_probe(monitor, obj, THREAD);
   458 }
   458 }
   459 
   459 
   460 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
   460 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
   461   if (UseBiasedLocking) {
   461   if (UseBiasedLocking) {
   462     BiasedLocking::revoke(obj, THREAD);
   462     BiasedLocking::revoke(obj, THREAD);
   463     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   463     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   464   }
   464   }
   465   if (millis < 0) {
   465   if (millis < 0) {
   518 
   518 
   519 struct SharedGlobals {
   519 struct SharedGlobals {
   520   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
   520   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
   521   // These are highly shared mostly-read variables.
   521   // These are highly shared mostly-read variables.
   522   // To avoid false-sharing they need to be the sole occupants of a cache line.
   522   // To avoid false-sharing they need to be the sole occupants of a cache line.
   523   volatile int stwRandom;
   523   volatile int stw_random;
   524   volatile int stwCycle;
   524   volatile int stw_cycle;
   525   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
   525   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
   526   // Hot RW variable -- Sequester to avoid false-sharing
   526   // Hot RW variable -- Sequester to avoid false-sharing
   527   volatile int hcSequence;
   527   volatile int hc_sequence;
   528   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
   528   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
   529 };
   529 };
   530 
   530 
   531 static SharedGlobals GVars;
   531 static SharedGlobals GVars;
   532 static int MonitorScavengeThreshold = 1000000;
   532 static int MonitorScavengeThreshold = 1000000;
   533 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
   533 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
   534 
   534 
   535 static markWord ReadStableMark(oop obj) {
   535 static markWord read_stable_mark(oop obj) {
   536   markWord mark = obj->mark();
   536   markWord mark = obj->mark();
   537   if (!mark.is_being_inflated()) {
   537   if (!mark.is_being_inflated()) {
   538     return mark;       // normal fast-path return
   538     return mark;       // normal fast-path return
   539   }
   539   }
   540 
   540 
   544     if (!mark.is_being_inflated()) {
   544     if (!mark.is_being_inflated()) {
   545       return mark;    // normal fast-path return
   545       return mark;    // normal fast-path return
   546     }
   546     }
   547 
   547 
   548     // The object is being inflated by some other thread.
   548     // The object is being inflated by some other thread.
   549     // The caller of ReadStableMark() must wait for inflation to complete.
   549     // The caller of read_stable_mark() must wait for inflation to complete.
   550     // Avoid live-lock
   550     // Avoid live-lock
   551     // TODO: consider calling SafepointSynchronize::do_call_back() while
   551     // TODO: consider calling SafepointSynchronize::do_call_back() while
   552     // spinning to see if there's a safepoint pending.  If so, immediately
   552     // spinning to see if there's a safepoint pending.  If so, immediately
   553     // yielding or blocking would be appropriate.  Avoid spinning while
   553     // yielding or blocking would be appropriate.  Avoid spinning while
   554     // there is a safepoint pending.
   554     // there is a safepoint pending.
   580         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
   580         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
   581         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
   581         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
   582         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
   582         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
   583         while (obj->mark() == markWord::INFLATING()) {
   583         while (obj->mark() == markWord::INFLATING()) {
   584           // Beware: NakedYield() is advisory and has almost no effect on some platforms
   584           // Beware: NakedYield() is advisory and has almost no effect on some platforms
   585           // so we periodically call Self->_ParkEvent->park(1).
   585           // so we periodically call self->_ParkEvent->park(1).
   586           // We use a mixed spin/yield/block mechanism.
   586           // We use a mixed spin/yield/block mechanism.
   587           if ((YieldThenBlock++) >= 16) {
   587           if ((YieldThenBlock++) >= 16) {
   588             Thread::current()->_ParkEvent->park(1);
   588             Thread::current()->_ParkEvent->park(1);
   589           } else {
   589           } else {
   590             os::naked_yield();
   590             os::naked_yield();
   599 }
   599 }
   600 
   600 
   601 // hashCode() generation :
   601 // hashCode() generation :
   602 //
   602 //
   603 // Possibilities:
   603 // Possibilities:
   604 // * MD5Digest of {obj,stwRandom}
   604 // * MD5Digest of {obj,stw_random}
   605 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
   605 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
   606 // * A DES- or AES-style SBox[] mechanism
   606 // * A DES- or AES-style SBox[] mechanism
   607 // * One of the Phi-based schemes, such as:
   607 // * One of the Phi-based schemes, such as:
   608 //   2654435761 = 2^32 * Phi (golden ratio)
   608 //   2654435761 = 2^32 * Phi (golden ratio)
   609 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
   609 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
   610 // * A variation of Marsaglia's shift-xor RNG scheme.
   610 // * A variation of Marsaglia's shift-xor RNG scheme.
   611 // * (obj ^ stwRandom) is appealing, but can result
   611 // * (obj ^ stw_random) is appealing, but can result
   612 //   in undesirable regularity in the hashCode values of adjacent objects
   612 //   in undesirable regularity in the hashCode values of adjacent objects
   613 //   (objects allocated back-to-back, in particular).  This could potentially
   613 //   (objects allocated back-to-back, in particular).  This could potentially
   614 //   result in hashtable collisions and reduced hashtable efficiency.
   614 //   result in hashtable collisions and reduced hashtable efficiency.
   615 //   There are simple ways to "diffuse" the middle address bits over the
   615 //   There are simple ways to "diffuse" the middle address bits over the
   616 //   generated hashCode values:
   616 //   generated hashCode values:
   617 
   617 
   618 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
   618 static inline intptr_t get_next_hash(Thread* self, oop obj) {
   619   intptr_t value = 0;
   619   intptr_t value = 0;
   620   if (hashCode == 0) {
   620   if (hashCode == 0) {
   621     // This form uses global Park-Miller RNG.
   621     // This form uses global Park-Miller RNG.
   622     // On MP system we'll have lots of RW access to a global, so the
   622     // On MP system we'll have lots of RW access to a global, so the
   623     // mechanism induces lots of coherency traffic.
   623     // mechanism induces lots of coherency traffic.
   624     value = os::random();
   624     value = os::random();
   625   } else if (hashCode == 1) {
   625   } else if (hashCode == 1) {
   626     // This variation has the property of being stable (idempotent)
   626     // This variation has the property of being stable (idempotent)
   627     // between STW operations.  This can be useful in some of the 1-0
   627     // between STW operations.  This can be useful in some of the 1-0
   628     // synchronization schemes.
   628     // synchronization schemes.
   629     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
   629     intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
   630     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
   630     value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
   631   } else if (hashCode == 2) {
   631   } else if (hashCode == 2) {
   632     value = 1;            // for sensitivity testing
   632     value = 1;            // for sensitivity testing
   633   } else if (hashCode == 3) {
   633   } else if (hashCode == 3) {
   634     value = ++GVars.hcSequence;
   634     value = ++GVars.hc_sequence;
   635   } else if (hashCode == 4) {
   635   } else if (hashCode == 4) {
   636     value = cast_from_oop<intptr_t>(obj);
   636     value = cast_from_oop<intptr_t>(obj);
   637   } else {
   637   } else {
   638     // Marsaglia's xor-shift scheme with thread-specific state
   638     // Marsaglia's xor-shift scheme with thread-specific state
   639     // This is probably the best overall implementation -- we'll
   639     // This is probably the best overall implementation -- we'll
   640     // likely make this the default in future releases.
   640     // likely make this the default in future releases.
   641     unsigned t = Self->_hashStateX;
   641     unsigned t = self->_hashStateX;
   642     t ^= (t << 11);
   642     t ^= (t << 11);
   643     Self->_hashStateX = Self->_hashStateY;
   643     self->_hashStateX = self->_hashStateY;
   644     Self->_hashStateY = Self->_hashStateZ;
   644     self->_hashStateY = self->_hashStateZ;
   645     Self->_hashStateZ = Self->_hashStateW;
   645     self->_hashStateZ = self->_hashStateW;
   646     unsigned v = Self->_hashStateW;
   646     unsigned v = self->_hashStateW;
   647     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
   647     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
   648     Self->_hashStateW = v;
   648     self->_hashStateW = v;
   649     value = v;
   649     value = v;
   650   }
   650   }
   651 
   651 
   652   value &= markWord::hash_mask;
   652   value &= markWord::hash_mask;
   653   if (value == 0) value = 0xBAD;
   653   if (value == 0) value = 0xBAD;
   654   assert(value != markWord::no_hash, "invariant");
   654   assert(value != markWord::no_hash, "invariant");
   655   return value;
   655   return value;
   656 }
   656 }
   657 
   657 
   658 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
   658 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
   659   if (UseBiasedLocking) {
   659   if (UseBiasedLocking) {
   660     // NOTE: many places throughout the JVM do not expect a safepoint
   660     // NOTE: many places throughout the JVM do not expect a safepoint
   661     // to be taken here, in particular most operations on perm gen
   661     // to be taken here, in particular most operations on perm gen
   662     // objects. However, we only ever bias Java instances and all of
   662     // objects. However, we only ever bias Java instances and all of
   663     // the call sites of identity_hash that might revoke biases have
   663     // the call sites of identity_hash that might revoke biases have
   664     // been checked to make sure they can handle a safepoint. The
   664     // been checked to make sure they can handle a safepoint. The
   665     // added check of the bias pattern is to avoid useless calls to
   665     // added check of the bias pattern is to avoid useless calls to
   666     // thread-local storage.
   666     // thread-local storage.
   667     if (obj->mark().has_bias_pattern()) {
   667     if (obj->mark().has_bias_pattern()) {
   668       // Handle for oop obj in case of STW safepoint
   668       // Handle for oop obj in case of STW safepoint
   669       Handle hobj(Self, obj);
   669       Handle hobj(self, obj);
   670       // Relaxing assertion for bug 6320749.
   670       // Relaxing assertion for bug 6320749.
   671       assert(Universe::verify_in_progress() ||
   671       assert(Universe::verify_in_progress() ||
   672              !SafepointSynchronize::is_at_safepoint(),
   672              !SafepointSynchronize::is_at_safepoint(),
   673              "biases should not be seen by VM thread here");
   673              "biases should not be seen by VM thread here");
   674       BiasedLocking::revoke(hobj, JavaThread::current());
   674       BiasedLocking::revoke(hobj, JavaThread::current());
   680   // hashCode() is a heap mutator ...
   680   // hashCode() is a heap mutator ...
   681   // Relaxing assertion for bug 6320749.
   681   // Relaxing assertion for bug 6320749.
   682   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   682   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   683          !SafepointSynchronize::is_at_safepoint(), "invariant");
   683          !SafepointSynchronize::is_at_safepoint(), "invariant");
   684   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   684   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   685          Self->is_Java_thread() , "invariant");
   685          self->is_Java_thread() , "invariant");
   686   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   686   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
   687          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
   687          ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
   688 
   688 
   689   ObjectMonitor* monitor = NULL;
   689   ObjectMonitor* monitor = NULL;
   690   markWord temp, test;
   690   markWord temp, test;
   691   intptr_t hash;
   691   intptr_t hash;
   692   markWord mark = ReadStableMark(obj);
   692   markWord mark = read_stable_mark(obj);
   693 
   693 
   694   // object should remain ineligible for biased locking
   694   // object should remain ineligible for biased locking
   695   assert(!mark.has_bias_pattern(), "invariant");
   695   assert(!mark.has_bias_pattern(), "invariant");
   696 
   696 
   697   if (mark.is_neutral()) {
   697   if (mark.is_neutral()) {
   698     hash = mark.hash();               // this is a normal header
   698     hash = mark.hash();               // this is a normal header
   699     if (hash != 0) {                  // if it has hash, just return it
   699     if (hash != 0) {                  // if it has hash, just return it
   700       return hash;
   700       return hash;
   701     }
   701     }
   702     hash = get_next_hash(Self, obj);  // allocate a new hash code
   702     hash = get_next_hash(self, obj);  // allocate a new hash code
   703     temp = mark.copy_set_hash(hash);  // merge the hash code into header
   703     temp = mark.copy_set_hash(hash);  // merge the hash code into header
   704     // use (machine word version) atomic operation to install the hash
   704     // use (machine word version) atomic operation to install the hash
   705     test = obj->cas_set_mark(temp, mark);
   705     test = obj->cas_set_mark(temp, mark);
   706     if (test == mark) {
   706     if (test == mark) {
   707       return hash;
   707       return hash;
   716     hash = temp.hash();
   716     hash = temp.hash();
   717     if (hash != 0) {
   717     if (hash != 0) {
   718       return hash;
   718       return hash;
   719     }
   719     }
   720     // Skip to the following code to reduce code size
   720     // Skip to the following code to reduce code size
   721   } else if (Self->is_lock_owned((address)mark.locker())) {
   721   } else if (self->is_lock_owned((address)mark.locker())) {
   722     temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned
   722     temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned
   723     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
   723     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
   724     hash = temp.hash();                  // by current thread, check if the displaced
   724     hash = temp.hash();                  // by current thread, check if the displaced
   725     if (hash != 0) {                     // header contains hash code
   725     if (hash != 0) {                     // header contains hash code
   726       return hash;
   726       return hash;
   734     // during an inflate() call so any change to that stack memory
   734     // during an inflate() call so any change to that stack memory
   735     // may not propagate to other threads correctly.
   735     // may not propagate to other threads correctly.
   736   }
   736   }
   737 
   737 
   738   // Inflate the monitor to set hash code
   738   // Inflate the monitor to set hash code
   739   monitor = inflate(Self, obj, inflate_cause_hash_code);
   739   monitor = inflate(self, obj, inflate_cause_hash_code);
   740   // Load displaced header and check it has hash code
   740   // Load displaced header and check it has hash code
   741   mark = monitor->header();
   741   mark = monitor->header();
   742   assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
   742   assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
   743   hash = mark.hash();
   743   hash = mark.hash();
   744   if (hash == 0) {
   744   if (hash == 0) {
   745     hash = get_next_hash(Self, obj);
   745     hash = get_next_hash(self, obj);
   746     temp = mark.copy_set_hash(hash); // merge hash code into header
   746     temp = mark.copy_set_hash(hash); // merge hash code into header
   747     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
   747     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
   748     uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
   748     uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
   749     test = markWord(v);
   749     test = markWord(v);
   750     if (test != mark) {
   750     if (test != mark) {
   751       // The only update to the ObjectMonitor's header/dmw field
   751       // The only non-deflation update to the ObjectMonitor's
   752       // is to merge in the hash code. If someone adds a new usage
   752       // header/dmw field is to merge in the hash code. If someone
   753       // of the header/dmw field, please update this code.
   753       // adds a new usage of the header/dmw field, please update
       
   754       // this code.
   754       hash = test.hash();
   755       hash = test.hash();
   755       assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
   756       assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
   756       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
   757       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
   757     }
   758     }
   758   }
   759   }
   775   }
   776   }
   776 
   777 
   777   assert(thread == JavaThread::current(), "Can only be called on current thread");
   778   assert(thread == JavaThread::current(), "Can only be called on current thread");
   778   oop obj = h_obj();
   779   oop obj = h_obj();
   779 
   780 
   780   markWord mark = ReadStableMark(obj);
   781   markWord mark = read_stable_mark(obj);
   781 
   782 
   782   // Uncontended case, header points to stack
   783   // Uncontended case, header points to stack
   783   if (mark.has_locker()) {
   784   if (mark.has_locker()) {
   784     return thread->is_lock_owned((address)mark.locker());
   785     return thread->is_lock_owned((address)mark.locker());
   785   }
   786   }
   814            "biases should be revoked by now");
   815            "biases should be revoked by now");
   815   }
   816   }
   816 
   817 
   817   assert(self == JavaThread::current(), "Can only be called on current thread");
   818   assert(self == JavaThread::current(), "Can only be called on current thread");
   818   oop obj = h_obj();
   819   oop obj = h_obj();
   819   markWord mark = ReadStableMark(obj);
   820   markWord mark = read_stable_mark(obj);
   820 
   821 
   821   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
   822   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
   822   if (mark.has_locker()) {
   823   if (mark.has_locker()) {
   823     return self->is_lock_owned((address)mark.locker()) ?
   824     return self->is_lock_owned((address)mark.locker()) ?
   824       owner_self : owner_other;
   825       owner_self : owner_other;
   826 
   827 
   827   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
   828   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
   828   // The Object:ObjectMonitor relationship is stable as long as we're
   829   // The Object:ObjectMonitor relationship is stable as long as we're
   829   // not at a safepoint.
   830   // not at a safepoint.
   830   if (mark.has_monitor()) {
   831   if (mark.has_monitor()) {
   831     void * owner = mark.monitor()->_owner;
   832     void* owner = mark.monitor()->_owner;
   832     if (owner == NULL) return owner_none;
   833     if (owner == NULL) return owner_none;
   833     return (owner == self ||
   834     return (owner == self ||
   834             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
   835             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
   835   }
   836   }
   836 
   837 
   851   }
   852   }
   852 
   853 
   853   oop obj = h_obj();
   854   oop obj = h_obj();
   854   address owner = NULL;
   855   address owner = NULL;
   855 
   856 
   856   markWord mark = ReadStableMark(obj);
   857   markWord mark = read_stable_mark(obj);
   857 
   858 
   858   // Uncontended case, header points to stack
   859   // Uncontended case, header points to stack
   859   if (mark.has_locker()) {
   860   if (mark.has_locker()) {
   860     owner = (address) mark.locker();
   861     owner = (address) mark.locker();
   861   }
   862   }
   881 }
   882 }
   882 
   883 
   883 // Visitors ...
   884 // Visitors ...
   884 
   885 
   885 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
   886 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
   886   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
   887   PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
   887   while (block != NULL) {
   888   while (block != NULL) {
   888     assert(block->object() == CHAINMARKER, "must be a block header");
   889     assert(block->object() == CHAINMARKER, "must be a block header");
   889     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
   890     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
   890       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
   891       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
   891       oop object = (oop)mid->object();
   892       oop object = (oop)mid->object();
   892       if (object != NULL) {
   893       if (object != NULL) {
       
   894         // Only process with closure if the object is set.
   893         closure->do_monitor(mid);
   895         closure->do_monitor(mid);
   894       }
   896       }
   895     }
   897     }
   896     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
   898     block = (PaddedObjectMonitor*)block->_next_om;
   897   }
   899   }
   898 }
       
   899 
       
   900 // Get the next block in the block list.
       
   901 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
       
   902   assert(block->object() == CHAINMARKER, "must be a block header");
       
   903   block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
       
   904   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
       
   905   return block;
       
   906 }
   900 }
   907 
   901 
   908 static bool monitors_used_above_threshold() {
   902 static bool monitors_used_above_threshold() {
   909   if (gMonitorPopulation == 0) {
   903   if (g_om_population == 0) {
   910     return false;
   904     return false;
   911   }
   905   }
   912   int monitors_used = gMonitorPopulation - gMonitorFreeCount;
   906   int monitors_used = g_om_population - g_om_free_count;
   913   int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
   907   int monitor_usage = (monitors_used * 100LL) / g_om_population;
   914   return monitor_usage > MonitorUsedDeflationThreshold;
   908   return monitor_usage > MonitorUsedDeflationThreshold;
   915 }
   909 }
   916 
   910 
   917 bool ObjectSynchronizer::is_cleanup_needed() {
   911 bool ObjectSynchronizer::is_cleanup_needed() {
   918   if (MonitorUsedDeflationThreshold > 0) {
   912   if (MonitorUsedDeflationThreshold > 0) {
   927   global_used_oops_do(f);
   921   global_used_oops_do(f);
   928 }
   922 }
   929 
   923 
   930 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
   924 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
   931   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   925   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   932   list_oops_do(gOmInUseList, f);
   926   list_oops_do(g_om_in_use_list, f);
   933 }
   927 }
   934 
   928 
   935 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
   929 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
   936   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   930   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   937   list_oops_do(thread->omInUseList, f);
   931   list_oops_do(thread->om_in_use_list, f);
   938 }
   932 }
   939 
   933 
   940 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
   934 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
   941   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   935   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   942   ObjectMonitor* mid;
   936   ObjectMonitor* mid;
   943   for (mid = list; mid != NULL; mid = mid->FreeNext) {
   937   for (mid = list; mid != NULL; mid = mid->_next_om) {
   944     if (mid->object() != NULL) {
   938     if (mid->object() != NULL) {
   945       f->do_oop((oop*)mid->object_addr());
   939       f->do_oop((oop*)mid->object_addr());
   946     }
   940     }
   947   }
   941   }
   948 }
   942 }
   949 
   943 
   950 
   944 
   951 // -----------------------------------------------------------------------------
   945 // -----------------------------------------------------------------------------
   952 // ObjectMonitor Lifecycle
   946 // ObjectMonitor Lifecycle
   953 // -----------------------
   947 // -----------------------
   954 // Inflation unlinks monitors from the global gFreeList and
   948 // Inflation unlinks monitors from the global g_free_list and
   955 // associates them with objects.  Deflation -- which occurs at
   949 // associates them with objects.  Deflation -- which occurs at
   956 // STW-time -- disassociates idle monitors from objects.  Such
   950 // STW-time -- disassociates idle monitors from objects.  Such
   957 // scavenged monitors are returned to the gFreeList.
   951 // scavenged monitors are returned to the g_free_list.
   958 //
   952 //
   959 // The global list is protected by gListLock.  All the critical sections
   953 // The global list is protected by gListLock.  All the critical sections
   960 // are short and operate in constant-time.
   954 // are short and operate in constant-time.
   961 //
   955 //
   962 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
   956 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
   963 //
   957 //
   964 // Lifecycle:
   958 // Lifecycle:
   965 // --   unassigned and on the global free list
   959 // --   unassigned and on the global free list
   966 // --   unassigned and on a thread's private omFreeList
   960 // --   unassigned and on a thread's private om_free_list
   967 // --   assigned to an object.  The object is inflated and the mark refers
   961 // --   assigned to an object.  The object is inflated and the mark refers
   968 //      to the objectmonitor.
   962 //      to the objectmonitor.
   969 
   963 
   970 
   964 
   971 // Constraining monitor pool growth via MonitorBound ...
   965 // Constraining monitor pool growth via MonitorBound ...
       
   966 //
       
   967 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
   972 //
   968 //
   973 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
   969 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
   974 // the rate of scavenging is driven primarily by GC.  As such,  we can find
   970 // the rate of scavenging is driven primarily by GC.  As such,  we can find
   975 // an inordinate number of monitors in circulation.
   971 // an inordinate number of monitors in circulation.
   976 // To avoid that scenario we can artificially induce a STW safepoint
   972 // To avoid that scenario we can artificially induce a STW safepoint
   981 // we could just loop. In addition, if MonitorBound is set to a low value
   977 // we could just loop. In addition, if MonitorBound is set to a low value
   982 // we'll incur more safepoints, which are harmful to performance.
   978 // we'll incur more safepoints, which are harmful to performance.
   983 // See also: GuaranteedSafepointInterval
   979 // See also: GuaranteedSafepointInterval
   984 //
   980 //
   985 // The current implementation uses asynchronous VM operations.
   981 // The current implementation uses asynchronous VM operations.
   986 
   982 //
   987 static void InduceScavenge(Thread * Self, const char * Whence) {
   983 // If MonitorBound is set, the boundry applies to
       
   984 //     (g_om_population - g_om_free_count)
       
   985 // i.e., if there are not enough ObjectMonitors on the global free list,
       
   986 // then a safepoint deflation is induced. Picking a good MonitorBound value
       
   987 // is non-trivial.
       
   988 
       
   989 static void InduceScavenge(Thread* self, const char * Whence) {
   988   // Induce STW safepoint to trim monitors
   990   // Induce STW safepoint to trim monitors
   989   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
   991   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
   990   // More precisely, trigger an asynchronous STW safepoint as the number
   992   // More precisely, trigger an asynchronous STW safepoint as the number
   991   // of active monitors passes the specified threshold.
   993   // of active monitors passes the specified threshold.
   992   // TODO: assert thread state is reasonable
   994   // TODO: assert thread state is reasonable
   998     // The VMThread will delete the op when completed.
  1000     // The VMThread will delete the op when completed.
   999     VMThread::execute(new VM_ScavengeMonitors());
  1001     VMThread::execute(new VM_ScavengeMonitors());
  1000   }
  1002   }
  1001 }
  1003 }
  1002 
  1004 
  1003 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
  1005 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
  1004   // A large MAXPRIVATE value reduces both list lock contention
  1006   // A large MAXPRIVATE value reduces both list lock contention
  1005   // and list coherency traffic, but also tends to increase the
  1007   // and list coherency traffic, but also tends to increase the
  1006   // number of objectMonitors in circulation as well as the STW
  1008   // number of ObjectMonitors in circulation as well as the STW
  1007   // scavenge costs.  As usual, we lean toward time in space-time
  1009   // scavenge costs.  As usual, we lean toward time in space-time
  1008   // tradeoffs.
  1010   // tradeoffs.
  1009   const int MAXPRIVATE = 1024;
  1011   const int MAXPRIVATE = 1024;
  1010   stringStream ss;
  1012   stringStream ss;
  1011   for (;;) {
  1013   for (;;) {
  1012     ObjectMonitor * m;
  1014     ObjectMonitor* m;
  1013 
  1015 
  1014     // 1: try to allocate from the thread's local omFreeList.
  1016     // 1: try to allocate from the thread's local om_free_list.
  1015     // Threads will attempt to allocate first from their local list, then
  1017     // Threads will attempt to allocate first from their local list, then
  1016     // from the global list, and only after those attempts fail will the thread
  1018     // from the global list, and only after those attempts fail will the thread
  1017     // attempt to instantiate new monitors.   Thread-local free lists take
  1019     // attempt to instantiate new monitors.   Thread-local free lists take
  1018     // heat off the gListLock and improve allocation latency, as well as reducing
  1020     // heat off the gListLock and improve allocation latency, as well as reducing
  1019     // coherency traffic on the shared global list.
  1021     // coherency traffic on the shared global list.
  1020     m = Self->omFreeList;
  1022     m = self->om_free_list;
  1021     if (m != NULL) {
  1023     if (m != NULL) {
  1022       Self->omFreeList = m->FreeNext;
  1024       self->om_free_list = m->_next_om;
  1023       Self->omFreeCount--;
  1025       self->om_free_count--;
  1024       guarantee(m->object() == NULL, "invariant");
  1026       guarantee(m->object() == NULL, "invariant");
  1025       m->FreeNext = Self->omInUseList;
  1027       m->_next_om = self->om_in_use_list;
  1026       Self->omInUseList = m;
  1028       self->om_in_use_list = m;
  1027       Self->omInUseCount++;
  1029       self->om_in_use_count++;
  1028       return m;
  1030       return m;
  1029     }
  1031     }
  1030 
  1032 
  1031     // 2: try to allocate from the global gFreeList
  1033     // 2: try to allocate from the global g_free_list
  1032     // CONSIDER: use muxTry() instead of muxAcquire().
  1034     // CONSIDER: use muxTry() instead of muxAcquire().
  1033     // If the muxTry() fails then drop immediately into case 3.
  1035     // If the muxTry() fails then drop immediately into case 3.
  1034     // If we're using thread-local free lists then try
  1036     // If we're using thread-local free lists then try
  1035     // to reprovision the caller's free list.
  1037     // to reprovision the caller's free list.
  1036     if (gFreeList != NULL) {
  1038     if (g_free_list != NULL) {
  1037       // Reprovision the thread's omFreeList.
  1039       // Reprovision the thread's om_free_list.
  1038       // Use bulk transfers to reduce the allocation rate and heat
  1040       // Use bulk transfers to reduce the allocation rate and heat
  1039       // on various locks.
  1041       // on various locks.
  1040       Thread::muxAcquire(&gListLock, "omAlloc(1)");
  1042       Thread::muxAcquire(&gListLock, "om_alloc(1)");
  1041       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
  1043       for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) {
  1042         gMonitorFreeCount--;
  1044         g_om_free_count--;
  1043         ObjectMonitor * take = gFreeList;
  1045         ObjectMonitor* take = g_free_list;
  1044         gFreeList = take->FreeNext;
  1046         g_free_list = take->_next_om;
  1045         guarantee(take->object() == NULL, "invariant");
  1047         guarantee(take->object() == NULL, "invariant");
  1046         take->Recycle();
  1048         take->Recycle();
  1047         omRelease(Self, take, false);
  1049         om_release(self, take, false);
  1048       }
  1050       }
  1049       Thread::muxRelease(&gListLock);
  1051       Thread::muxRelease(&gListLock);
  1050       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
  1052       self->om_free_provision += 1 + (self->om_free_provision/2);
  1051       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
  1053       if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
  1052 
  1054 
  1053       const int mx = MonitorBound;
  1055       const int mx = MonitorBound;
  1054       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
  1056       if (mx > 0 && (g_om_population-g_om_free_count) > mx) {
  1055         // We can't safely induce a STW safepoint from omAlloc() as our thread
  1057         // Not enough ObjectMonitors on the global free list.
       
  1058         // We can't safely induce a STW safepoint from om_alloc() as our thread
  1056         // state may not be appropriate for such activities and callers may hold
  1059         // state may not be appropriate for such activities and callers may hold
  1057         // naked oops, so instead we defer the action.
  1060         // naked oops, so instead we defer the action.
  1058         InduceScavenge(Self, "omAlloc");
  1061         InduceScavenge(self, "om_alloc");
  1059       }
  1062       }
  1060       continue;
  1063       continue;
  1061     }
  1064     }
  1062 
  1065 
  1063     // 3: allocate a block of new ObjectMonitors
  1066     // 3: allocate a block of new ObjectMonitors
  1064     // Both the local and global free lists are empty -- resort to malloc().
  1067     // Both the local and global free lists are empty -- resort to malloc().
  1065     // In the current implementation objectMonitors are TSM - immortal.
  1068     // In the current implementation ObjectMonitors are TSM - immortal.
  1066     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
  1069     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
  1067     // each ObjectMonitor to start at the beginning of a cache line,
  1070     // each ObjectMonitor to start at the beginning of a cache line,
  1068     // so we use align_up().
  1071     // so we use align_up().
  1069     // A better solution would be to use C++ placement-new.
  1072     // A better solution would be to use C++ placement-new.
  1070     // BEWARE: As it stands currently, we don't run the ctors!
  1073     // BEWARE: As it stands currently, we don't run the ctors!
  1071     assert(_BLOCKSIZE > 1, "invariant");
  1074     assert(_BLOCKSIZE > 1, "invariant");
  1072     size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
  1075     size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
  1073     PaddedEnd<ObjectMonitor> * temp;
  1076     PaddedObjectMonitor* temp;
  1074     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
  1077     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
  1075     void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
  1078     void* real_malloc_addr = (void*)NEW_C_HEAP_ARRAY(char, aligned_size,
  1076                                                       mtInternal);
  1079                                                      mtInternal);
  1077     temp = (PaddedEnd<ObjectMonitor> *)
  1080     temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
  1078              align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
       
  1079 
  1081 
  1080     // NOTE: (almost) no way to recover if allocation failed.
  1082     // NOTE: (almost) no way to recover if allocation failed.
  1081     // We might be able to induce a STW safepoint and scavenge enough
  1083     // We might be able to induce a STW safepoint and scavenge enough
  1082     // objectMonitors to permit progress.
  1084     // ObjectMonitors to permit progress.
  1083     if (temp == NULL) {
  1085     if (temp == NULL) {
  1084       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
  1086       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
  1085                             "Allocate ObjectMonitors");
  1087                             "Allocate ObjectMonitors");
  1086     }
  1088     }
  1087     (void)memset((void *) temp, 0, neededsize);
  1089     (void)memset((void *) temp, 0, neededsize);
  1088 
  1090 
  1089     // Format the block.
  1091     // Format the block.
  1090     // initialize the linked list, each monitor points to its next
  1092     // initialize the linked list, each monitor points to its next
  1091     // forming the single linked free list, the very first monitor
  1093     // forming the single linked free list, the very first monitor
  1092     // will points to next block, which forms the block list.
  1094     // will points to next block, which forms the block list.
  1093     // The trick of using the 1st element in the block as gBlockList
  1095     // The trick of using the 1st element in the block as g_block_list
  1094     // linkage should be reconsidered.  A better implementation would
  1096     // linkage should be reconsidered.  A better implementation would
  1095     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
  1097     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
  1096 
  1098 
  1097     for (int i = 1; i < _BLOCKSIZE; i++) {
  1099     for (int i = 1; i < _BLOCKSIZE; i++) {
  1098       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
  1100       temp[i]._next_om = (ObjectMonitor *)&temp[i+1];
  1099     }
  1101     }
  1100 
  1102 
  1101     // terminate the last monitor as the end of list
  1103     // terminate the last monitor as the end of list
  1102     temp[_BLOCKSIZE - 1].FreeNext = NULL;
  1104     temp[_BLOCKSIZE - 1]._next_om = NULL;
  1103 
  1105 
  1104     // Element [0] is reserved for global list linkage
  1106     // Element [0] is reserved for global list linkage
  1105     temp[0].set_object(CHAINMARKER);
  1107     temp[0].set_object(CHAINMARKER);
  1106 
  1108 
  1107     // Consider carving out this thread's current request from the
  1109     // Consider carving out this thread's current request from the
  1108     // block in hand.  This avoids some lock traffic and redundant
  1110     // block in hand.  This avoids some lock traffic and redundant
  1109     // list activity.
  1111     // list activity.
  1110 
  1112 
  1111     // Acquire the gListLock to manipulate gBlockList and gFreeList.
  1113     // Acquire the gListLock to manipulate g_block_list and g_free_list.
  1112     // An Oyama-Taura-Yonezawa scheme might be more efficient.
  1114     // An Oyama-Taura-Yonezawa scheme might be more efficient.
  1113     Thread::muxAcquire(&gListLock, "omAlloc(2)");
  1115     Thread::muxAcquire(&gListLock, "om_alloc(2)");
  1114     gMonitorPopulation += _BLOCKSIZE-1;
  1116     g_om_population += _BLOCKSIZE-1;
  1115     gMonitorFreeCount += _BLOCKSIZE-1;
  1117     g_om_free_count += _BLOCKSIZE-1;
  1116 
  1118 
  1117     // Add the new block to the list of extant blocks (gBlockList).
  1119     // Add the new block to the list of extant blocks (g_block_list).
  1118     // The very first objectMonitor in a block is reserved and dedicated.
  1120     // The very first ObjectMonitor in a block is reserved and dedicated.
  1119     // It serves as blocklist "next" linkage.
  1121     // It serves as blocklist "next" linkage.
  1120     temp[0].FreeNext = gBlockList;
  1122     temp[0]._next_om = g_block_list;
  1121     // There are lock-free uses of gBlockList so make sure that
  1123     // There are lock-free uses of g_block_list so make sure that
  1122     // the previous stores happen before we update gBlockList.
  1124     // the previous stores happen before we update g_block_list.
  1123     OrderAccess::release_store(&gBlockList, temp);
  1125     OrderAccess::release_store(&g_block_list, temp);
  1124 
  1126 
  1125     // Add the new string of objectMonitors to the global free list
  1127     // Add the new string of ObjectMonitors to the global free list
  1126     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
  1128     temp[_BLOCKSIZE - 1]._next_om = g_free_list;
  1127     gFreeList = temp + 1;
  1129     g_free_list = temp + 1;
  1128     Thread::muxRelease(&gListLock);
  1130     Thread::muxRelease(&gListLock);
  1129   }
  1131   }
  1130 }
  1132 }
  1131 
  1133 
  1132 // Place "m" on the caller's private per-thread omFreeList.
  1134 // Place "m" on the caller's private per-thread om_free_list.
  1133 // In practice there's no need to clamp or limit the number of
  1135 // In practice there's no need to clamp or limit the number of
  1134 // monitors on a thread's omFreeList as the only time we'll call
  1136 // monitors on a thread's om_free_list as the only non-allocation time
  1135 // omRelease is to return a monitor to the free list after a CAS
  1137 // we'll call om_release() is to return a monitor to the free list after
  1136 // attempt failed.  This doesn't allow unbounded #s of monitors to
  1138 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
  1137 // accumulate on a thread's free list.
  1139 // accumulate on a thread's free list.
  1138 //
  1140 //
  1139 // Key constraint: all ObjectMonitors on a thread's free list and the global
  1141 // Key constraint: all ObjectMonitors on a thread's free list and the global
  1140 // free list must have their object field set to null. This prevents the
  1142 // free list must have their object field set to null. This prevents the
  1141 // scavenger -- deflate_monitor_list() -- from reclaiming them.
  1143 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
  1142 
  1144 // are trying to release them.
  1143 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
  1145 
  1144                                    bool fromPerThreadAlloc) {
  1146 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
       
  1147                                     bool from_per_thread_alloc) {
  1145   guarantee(m->header().value() == 0, "invariant");
  1148   guarantee(m->header().value() == 0, "invariant");
  1146   guarantee(m->object() == NULL, "invariant");
  1149   guarantee(m->object() == NULL, "invariant");
  1147   stringStream ss;
  1150   stringStream ss;
  1148   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
  1151   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
  1149             "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss),
  1152             "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss),
  1150             m->_recursions);
  1153             m->_recursions);
  1151   // Remove from omInUseList
  1154   // _next_om is used for both per-thread in-use and free lists so
  1152   if (fromPerThreadAlloc) {
  1155   // we have to remove 'm' from the in-use list first (as needed).
       
  1156   if (from_per_thread_alloc) {
       
  1157     // Need to remove 'm' from om_in_use_list.
  1153     ObjectMonitor* cur_mid_in_use = NULL;
  1158     ObjectMonitor* cur_mid_in_use = NULL;
  1154     bool extracted = false;
  1159     bool extracted = false;
  1155     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
  1160     for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) {
  1156       if (m == mid) {
  1161       if (m == mid) {
  1157         // extract from per-thread in-use list
  1162         // extract from per-thread in-use list
  1158         if (mid == Self->omInUseList) {
  1163         if (mid == self->om_in_use_list) {
  1159           Self->omInUseList = mid->FreeNext;
  1164           self->om_in_use_list = mid->_next_om;
  1160         } else if (cur_mid_in_use != NULL) {
  1165         } else if (cur_mid_in_use != NULL) {
  1161           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
  1166           cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
  1162         }
  1167         }
  1163         extracted = true;
  1168         extracted = true;
  1164         Self->omInUseCount--;
  1169         self->om_in_use_count--;
  1165         break;
  1170         break;
  1166       }
  1171       }
  1167     }
  1172     }
  1168     assert(extracted, "Should have extracted from in-use list");
  1173     assert(extracted, "Should have extracted from in-use list");
  1169   }
  1174   }
  1170 
  1175 
  1171   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
  1176   m->_next_om = self->om_free_list;
  1172   m->FreeNext = Self->omFreeList;
  1177   self->om_free_list = m;
  1173   Self->omFreeList = m;
  1178   self->om_free_count++;
  1174   Self->omFreeCount++;
  1179 }
  1175 }
  1180 
  1176 
  1181 // Return ObjectMonitors on a moribund thread's free and in-use
  1177 // Return the monitors of a moribund thread's local free list to
  1182 // lists to the appropriate global lists. The ObjectMonitors on the
  1178 // the global free list.  Typically a thread calls omFlush() when
  1183 // per-thread in-use list may still be in use by other threads.
  1179 // it's dying.  We could also consider having the VM thread steal
       
  1180 // monitors from threads that have not run java code over a few
       
  1181 // consecutive STW safepoints.  Relatedly, we might decay
       
  1182 // omFreeProvision at STW safepoints.
       
  1183 //
  1184 //
  1184 // Also return the monitors of a moribund thread's omInUseList to
  1185 // We currently call om_flush() from Threads::remove() before the
  1185 // a global gOmInUseList under the global list lock so these
  1186 // thread has been excised from the thread list and is no longer a
  1186 // will continue to be scanned.
  1187 // mutator. This means that om_flush() cannot run concurrently with
  1187 //
  1188 // a safepoint and interleave with deflate_idle_monitors(). In
  1188 // We currently call omFlush() from Threads::remove() _before the thread
  1189 // particular, this ensures that the thread's in-use monitors are
  1189 // has been excised from the thread list and is no longer a mutator.
  1190 // scanned by a GC safepoint, either via Thread::oops_do() (before
  1190 // This means that omFlush() cannot run concurrently with a safepoint and
  1191 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
  1191 // interleave with the deflate_idle_monitors scavenge operator. In particular,
  1192 // om_flush() is called).
  1192 // this ensures that the thread's monitors are scanned by a GC safepoint,
  1193 
  1193 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via
  1194 void ObjectSynchronizer::om_flush(Thread* self) {
  1194 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
  1195   ObjectMonitor* free_list = self->om_free_list;
  1195 // monitors have been transferred to the global in-use list).
  1196   ObjectMonitor* free_tail = NULL;
  1196 
  1197   int free_count = 0;
  1197 void ObjectSynchronizer::omFlush(Thread * Self) {
  1198   if (free_list != NULL) {
  1198   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
  1199     ObjectMonitor* s;
  1199   ObjectMonitor * tail = NULL;
  1200     // The thread is going away. Set 'free_tail' to the last per-thread free
  1200   int tally = 0;
  1201     // monitor which will be linked to g_free_list below under the gListLock.
  1201   if (list != NULL) {
       
  1202     ObjectMonitor * s;
       
  1203     // The thread is going away. Set 'tail' to the last per-thread free
       
  1204     // monitor which will be linked to gFreeList below under the gListLock.
       
  1205     stringStream ss;
  1202     stringStream ss;
  1206     for (s = list; s != NULL; s = s->FreeNext) {
  1203     for (s = free_list; s != NULL; s = s->_next_om) {
  1207       tally++;
  1204       free_count++;
  1208       tail = s;
  1205       free_tail = s;
  1209       guarantee(s->object() == NULL, "invariant");
  1206       guarantee(s->object() == NULL, "invariant");
  1210       guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
  1207       guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
  1211     }
  1208     }
  1212     guarantee(tail != NULL, "invariant");
  1209     guarantee(free_tail != NULL, "invariant");
  1213     assert(Self->omFreeCount == tally, "free-count off");
  1210     assert(self->om_free_count == free_count, "free-count off");
  1214     Self->omFreeList = NULL;
  1211     self->om_free_list = NULL;
  1215     Self->omFreeCount = 0;
  1212     self->om_free_count = 0;
  1216   }
  1213   }
  1217 
  1214 
  1218   ObjectMonitor * inUseList = Self->omInUseList;
  1215   ObjectMonitor* in_use_list = self->om_in_use_list;
  1219   ObjectMonitor * inUseTail = NULL;
  1216   ObjectMonitor* in_use_tail = NULL;
  1220   int inUseTally = 0;
  1217   int in_use_count = 0;
  1221   if (inUseList != NULL) {
  1218   if (in_use_list != NULL) {
       
  1219     // The thread is going away, however the ObjectMonitors on the
       
  1220     // om_in_use_list may still be in-use by other threads. Link
       
  1221     // them to in_use_tail, which will be linked into the global
       
  1222     // in-use list g_om_in_use_list below, under the gListLock.
  1222     ObjectMonitor *cur_om;
  1223     ObjectMonitor *cur_om;
  1223     // The thread is going away, however the omInUseList inflated
  1224     for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
  1224     // monitors may still be in-use by other threads.
  1225       in_use_tail = cur_om;
  1225     // Link them to inUseTail, which will be linked into the global in-use list
  1226       in_use_count++;
  1226     // gOmInUseList below, under the gListLock
  1227     }
  1227     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
  1228     guarantee(in_use_tail != NULL, "invariant");
  1228       inUseTail = cur_om;
  1229     assert(self->om_in_use_count == in_use_count, "in-use count off");
  1229       inUseTally++;
  1230     self->om_in_use_list = NULL;
  1230     }
  1231     self->om_in_use_count = 0;
  1231     guarantee(inUseTail != NULL, "invariant");
  1232   }
  1232     assert(Self->omInUseCount == inUseTally, "in-use count off");
  1233 
  1233     Self->omInUseList = NULL;
  1234   Thread::muxAcquire(&gListLock, "om_flush");
  1234     Self->omInUseCount = 0;
  1235   if (free_tail != NULL) {
  1235   }
  1236     free_tail->_next_om = g_free_list;
  1236 
  1237     g_free_list = free_list;
  1237   Thread::muxAcquire(&gListLock, "omFlush");
  1238     g_om_free_count += free_count;
  1238   if (tail != NULL) {
  1239   }
  1239     tail->FreeNext = gFreeList;
  1240 
  1240     gFreeList = list;
  1241   if (in_use_tail != NULL) {
  1241     gMonitorFreeCount += tally;
  1242     in_use_tail->_next_om = g_om_in_use_list;
  1242   }
  1243     g_om_in_use_list = in_use_list;
  1243 
  1244     g_om_in_use_count += in_use_count;
  1244   if (inUseTail != NULL) {
       
  1245     inUseTail->FreeNext = gOmInUseList;
       
  1246     gOmInUseList = inUseList;
       
  1247     gOmInUseCount += inUseTally;
       
  1248   }
  1245   }
  1249 
  1246 
  1250   Thread::muxRelease(&gListLock);
  1247   Thread::muxRelease(&gListLock);
  1251 
  1248 
  1252   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1249   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1253   LogStreamHandle(Info, monitorinflation) lsh_info;
  1250   LogStreamHandle(Info, monitorinflation) lsh_info;
  1254   LogStream * ls = NULL;
  1251   LogStream* ls = NULL;
  1255   if (log_is_enabled(Debug, monitorinflation)) {
  1252   if (log_is_enabled(Debug, monitorinflation)) {
  1256     ls = &lsh_debug;
  1253     ls = &lsh_debug;
  1257   } else if ((tally != 0 || inUseTally != 0) &&
  1254   } else if ((free_count != 0 || in_use_count != 0) &&
  1258              log_is_enabled(Info, monitorinflation)) {
  1255              log_is_enabled(Info, monitorinflation)) {
  1259     ls = &lsh_info;
  1256     ls = &lsh_info;
  1260   }
  1257   }
  1261   if (ls != NULL) {
  1258   if (ls != NULL) {
  1262     ls->print_cr("omFlush: jt=" INTPTR_FORMAT ", free_monitor_tally=%d"
  1259     ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
  1263                  ", in_use_monitor_tally=%d" ", omFreeProvision=%d",
  1260                  ", in_use_count=%d" ", om_free_provision=%d",
  1264                  p2i(Self), tally, inUseTally, Self->omFreeProvision);
  1261                  p2i(self), free_count, in_use_count, self->om_free_provision);
  1265   }
  1262   }
  1266 }
  1263 }
  1267 
  1264 
  1268 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
  1265 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
  1269                                        const oop obj,
  1266                                        const oop obj,
  1285     return;
  1282     return;
  1286   }
  1283   }
  1287   inflate(Thread::current(), obj, inflate_cause_vm_internal);
  1284   inflate(Thread::current(), obj, inflate_cause_vm_internal);
  1288 }
  1285 }
  1289 
  1286 
  1290 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
  1287 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
  1291                                            oop object,
  1288                                            oop object,
  1292                                            const InflateCause cause) {
  1289                                            const InflateCause cause) {
  1293   // Inflate mutates the heap ...
  1290   // Inflate mutates the heap ...
  1294   // Relaxing assertion for bug 6320749.
  1291   // Relaxing assertion for bug 6320749.
  1295   assert(Universe::verify_in_progress() ||
  1292   assert(Universe::verify_in_progress() ||
  1308     // *  Neutral      - aggressively inflate the object.
  1305     // *  Neutral      - aggressively inflate the object.
  1309     // *  BIASED       - Illegal.  We should never see this
  1306     // *  BIASED       - Illegal.  We should never see this
  1310 
  1307 
  1311     // CASE: inflated
  1308     // CASE: inflated
  1312     if (mark.has_monitor()) {
  1309     if (mark.has_monitor()) {
  1313       ObjectMonitor * inf = mark.monitor();
  1310       ObjectMonitor* inf = mark.monitor();
  1314       markWord dmw = inf->header();
  1311       markWord dmw = inf->header();
  1315       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
  1312       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
  1316       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
  1313       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
  1317       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
  1314       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
  1318       return inf;
  1315       return inf;
  1323     // Only that thread can complete inflation -- other threads must wait.
  1320     // Only that thread can complete inflation -- other threads must wait.
  1324     // The INFLATING value is transient.
  1321     // The INFLATING value is transient.
  1325     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
  1322     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
  1326     // We could always eliminate polling by parking the thread on some auxiliary list.
  1323     // We could always eliminate polling by parking the thread on some auxiliary list.
  1327     if (mark == markWord::INFLATING()) {
  1324     if (mark == markWord::INFLATING()) {
  1328       ReadStableMark(object);
  1325       read_stable_mark(object);
  1329       continue;
  1326       continue;
  1330     }
  1327     }
  1331 
  1328 
  1332     // CASE: stack-locked
  1329     // CASE: stack-locked
  1333     // Could be stack-locked either by this thread or by some other thread.
  1330     // Could be stack-locked either by this thread or by some other thread.
  1342     // We now use per-thread private objectmonitor free lists.
  1339     // We now use per-thread private objectmonitor free lists.
  1343     // These list are reprovisioned from the global free list outside the
  1340     // These list are reprovisioned from the global free list outside the
  1344     // critical INFLATING...ST interval.  A thread can transfer
  1341     // critical INFLATING...ST interval.  A thread can transfer
  1345     // multiple objectmonitors en-mass from the global free list to its local free list.
  1342     // multiple objectmonitors en-mass from the global free list to its local free list.
  1346     // This reduces coherency traffic and lock contention on the global free list.
  1343     // This reduces coherency traffic and lock contention on the global free list.
  1347     // Using such local free lists, it doesn't matter if the omAlloc() call appears
  1344     // Using such local free lists, it doesn't matter if the om_alloc() call appears
  1348     // before or after the CAS(INFLATING) operation.
  1345     // before or after the CAS(INFLATING) operation.
  1349     // See the comments in omAlloc().
  1346     // See the comments in om_alloc().
  1350 
  1347 
  1351     LogStreamHandle(Trace, monitorinflation) lsh;
  1348     LogStreamHandle(Trace, monitorinflation) lsh;
  1352 
  1349 
  1353     if (mark.has_locker()) {
  1350     if (mark.has_locker()) {
  1354       ObjectMonitor * m = omAlloc(Self);
  1351       ObjectMonitor* m = om_alloc(self);
  1355       // Optimistically prepare the objectmonitor - anticipate successful CAS
  1352       // Optimistically prepare the objectmonitor - anticipate successful CAS
  1356       // We do this before the CAS in order to minimize the length of time
  1353       // We do this before the CAS in order to minimize the length of time
  1357       // in which INFLATING appears in the mark.
  1354       // in which INFLATING appears in the mark.
  1358       m->Recycle();
  1355       m->Recycle();
  1359       m->_Responsible  = NULL;
  1356       m->_Responsible  = NULL;
  1360       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
  1357       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
  1361 
  1358 
  1362       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
  1359       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
  1363       if (cmp != mark) {
  1360       if (cmp != mark) {
  1364         omRelease(Self, m, true);
  1361         om_release(self, m, true);
  1365         continue;       // Interference -- just retry
  1362         continue;       // Interference -- just retry
  1366       }
  1363       }
  1367 
  1364 
  1368       // We've successfully installed INFLATING (0) into the mark-word.
  1365       // We've successfully installed INFLATING (0) into the mark-word.
  1369       // This is the only case where 0 will appear in a mark-word.
  1366       // This is the only case where 0 will appear in a mark-word.
  1372       //
  1369       //
  1373       // Why do we CAS a 0 into the mark-word instead of just CASing the
  1370       // Why do we CAS a 0 into the mark-word instead of just CASing the
  1374       // mark-word from the stack-locked value directly to the new inflated state?
  1371       // mark-word from the stack-locked value directly to the new inflated state?
  1375       // Consider what happens when a thread unlocks a stack-locked object.
  1372       // Consider what happens when a thread unlocks a stack-locked object.
  1376       // It attempts to use CAS to swing the displaced header value from the
  1373       // It attempts to use CAS to swing the displaced header value from the
  1377       // on-stack basiclock back into the object header.  Recall also that the
  1374       // on-stack BasicLock back into the object header.  Recall also that the
  1378       // header value (hash code, etc) can reside in (a) the object header, or
  1375       // header value (hash code, etc) can reside in (a) the object header, or
  1379       // (b) a displaced header associated with the stack-lock, or (c) a displaced
  1376       // (b) a displaced header associated with the stack-lock, or (c) a displaced
  1380       // header in an objectMonitor.  The inflate() routine must copy the header
  1377       // header in an ObjectMonitor.  The inflate() routine must copy the header
  1381       // value from the basiclock on the owner's stack to the objectMonitor, all
  1378       // value from the BasicLock on the owner's stack to the ObjectMonitor, all
  1382       // the while preserving the hashCode stability invariants.  If the owner
  1379       // the while preserving the hashCode stability invariants.  If the owner
  1383       // decides to release the lock while the value is 0, the unlock will fail
  1380       // decides to release the lock while the value is 0, the unlock will fail
  1384       // and control will eventually pass from slow_exit() to inflate.  The owner
  1381       // and control will eventually pass from slow_exit() to inflate.  The owner
  1385       // will then spin, waiting for the 0 value to disappear.   Put another way,
  1382       // will then spin, waiting for the 0 value to disappear.   Put another way,
  1386       // the 0 causes the owner to stall if the owner happens to try to
  1383       // the 0 causes the owner to stall if the owner happens to try to
  1387       // drop the lock (restoring the header from the basiclock to the object)
  1384       // drop the lock (restoring the header from the BasicLock to the object)
  1388       // while inflation is in-progress.  This protocol avoids races that might
  1385       // while inflation is in-progress.  This protocol avoids races that might
  1389       // would otherwise permit hashCode values to change or "flicker" for an object.
  1386       // would otherwise permit hashCode values to change or "flicker" for an object.
  1390       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
  1387       // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
  1391       // 0 serves as a "BUSY" inflate-in-progress indicator.
  1388       // 0 serves as a "BUSY" inflate-in-progress indicator.
  1392 
  1389 
  1402 
  1399 
  1403       // Setup monitor fields to proper values -- prepare the monitor
  1400       // Setup monitor fields to proper values -- prepare the monitor
  1404       m->set_header(dmw);
  1401       m->set_header(dmw);
  1405 
  1402 
  1406       // Optimization: if the mark.locker stack address is associated
  1403       // Optimization: if the mark.locker stack address is associated
  1407       // with this thread we could simply set m->_owner = Self.
  1404       // with this thread we could simply set m->_owner = self.
  1408       // Note that a thread can inflate an object
  1405       // Note that a thread can inflate an object
  1409       // that it has stack-locked -- as might happen in wait() -- directly
  1406       // that it has stack-locked -- as might happen in wait() -- directly
  1410       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
  1407       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
  1411       m->set_owner(mark.locker());
  1408       m->set_owner(mark.locker());
  1412       m->set_object(object);
  1409       m->set_object(object);
  1419 
  1416 
  1420       // Hopefully the performance counters are allocated on distinct cache lines
  1417       // Hopefully the performance counters are allocated on distinct cache lines
  1421       // to avoid false sharing on MP systems ...
  1418       // to avoid false sharing on MP systems ...
  1422       OM_PERFDATA_OP(Inflations, inc());
  1419       OM_PERFDATA_OP(Inflations, inc());
  1423       if (log_is_enabled(Trace, monitorinflation)) {
  1420       if (log_is_enabled(Trace, monitorinflation)) {
  1424         ResourceMark rm(Self);
  1421         ResourceMark rm(self);
  1425         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
  1422         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
  1426                      INTPTR_FORMAT ", type='%s'", p2i(object),
  1423                      INTPTR_FORMAT ", type='%s'", p2i(object),
  1427                      object->mark().value(), object->klass()->external_name());
  1424                      object->mark().value(), object->klass()->external_name());
  1428       }
  1425       }
  1429       if (event.should_commit()) {
  1426       if (event.should_commit()) {
  1433     }
  1430     }
  1434 
  1431 
  1435     // CASE: neutral
  1432     // CASE: neutral
  1436     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
  1433     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
  1437     // If we know we're inflating for entry it's better to inflate by swinging a
  1434     // If we know we're inflating for entry it's better to inflate by swinging a
  1438     // pre-locked objectMonitor pointer into the object header.   A successful
  1435     // pre-locked ObjectMonitor pointer into the object header.   A successful
  1439     // CAS inflates the object *and* confers ownership to the inflating thread.
  1436     // CAS inflates the object *and* confers ownership to the inflating thread.
  1440     // In the current implementation we use a 2-step mechanism where we CAS()
  1437     // In the current implementation we use a 2-step mechanism where we CAS()
  1441     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
  1438     // to inflate and then CAS() again to try to swing _owner from NULL to self.
  1442     // An inflateTry() method that we could call from enter() would be useful.
  1439     // An inflateTry() method that we could call from enter() would be useful.
  1443 
  1440 
  1444     // Catch if the object's header is not neutral (not locked and
  1441     // Catch if the object's header is not neutral (not locked and
  1445     // not marked is what we care about here).
  1442     // not marked is what we care about here).
  1446     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
  1443     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
  1447     ObjectMonitor * m = omAlloc(Self);
  1444     ObjectMonitor* m = om_alloc(self);
  1448     // prepare m for installation - set monitor to initial state
  1445     // prepare m for installation - set monitor to initial state
  1449     m->Recycle();
  1446     m->Recycle();
  1450     m->set_header(mark);
  1447     m->set_header(mark);
  1451     m->set_object(object);
  1448     m->set_object(object);
  1452     m->_Responsible  = NULL;
  1449     m->_Responsible  = NULL;
  1454 
  1451 
  1455     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
  1452     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
  1456       m->set_header(markWord::zero());
  1453       m->set_header(markWord::zero());
  1457       m->set_object(NULL);
  1454       m->set_object(NULL);
  1458       m->Recycle();
  1455       m->Recycle();
  1459       omRelease(Self, m, true);
  1456       om_release(self, m, true);
  1460       m = NULL;
  1457       m = NULL;
  1461       continue;
  1458       continue;
  1462       // interference - the markword changed - just retry.
  1459       // interference - the markword changed - just retry.
  1463       // The state-transitions are one-way, so there's no chance of
  1460       // The state-transitions are one-way, so there's no chance of
  1464       // live-lock -- "Inflated" is an absorbing state.
  1461       // live-lock -- "Inflated" is an absorbing state.
  1466 
  1463 
  1467     // Hopefully the performance counters are allocated on distinct
  1464     // Hopefully the performance counters are allocated on distinct
  1468     // cache lines to avoid false sharing on MP systems ...
  1465     // cache lines to avoid false sharing on MP systems ...
  1469     OM_PERFDATA_OP(Inflations, inc());
  1466     OM_PERFDATA_OP(Inflations, inc());
  1470     if (log_is_enabled(Trace, monitorinflation)) {
  1467     if (log_is_enabled(Trace, monitorinflation)) {
  1471       ResourceMark rm(Self);
  1468       ResourceMark rm(self);
  1472       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
  1469       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
  1473                    INTPTR_FORMAT ", type='%s'", p2i(object),
  1470                    INTPTR_FORMAT ", type='%s'", p2i(object),
  1474                    object->mark().value(), object->klass()->external_name());
  1471                    object->mark().value(), object->klass()->external_name());
  1475     }
  1472     }
  1476     if (event.should_commit()) {
  1473     if (event.should_commit()) {
  1483 
  1480 
  1484 // We maintain a list of in-use monitors for each thread.
  1481 // We maintain a list of in-use monitors for each thread.
  1485 //
  1482 //
  1486 // deflate_thread_local_monitors() scans a single thread's in-use list, while
  1483 // deflate_thread_local_monitors() scans a single thread's in-use list, while
  1487 // deflate_idle_monitors() scans only a global list of in-use monitors which
  1484 // deflate_idle_monitors() scans only a global list of in-use monitors which
  1488 // is populated only as a thread dies (see omFlush()).
  1485 // is populated only as a thread dies (see om_flush()).
  1489 //
  1486 //
  1490 // These operations are called at all safepoints, immediately after mutators
  1487 // These operations are called at all safepoints, immediately after mutators
  1491 // are stopped, but before any objects have moved. Collectively they traverse
  1488 // are stopped, but before any objects have moved. Collectively they traverse
  1492 // the population of in-use monitors, deflating where possible. The scavenged
  1489 // the population of in-use monitors, deflating where possible. The scavenged
  1493 // monitors are returned to the global monitor free list.
  1490 // monitors are returned to the global monitor free list.
  1503 // This is an unfortunate aspect of this design.
  1500 // This is an unfortunate aspect of this design.
  1504 
  1501 
  1505 // Deflate a single monitor if not in-use
  1502 // Deflate a single monitor if not in-use
  1506 // Return true if deflated, false if in-use
  1503 // Return true if deflated, false if in-use
  1507 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
  1504 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
  1508                                          ObjectMonitor** freeHeadp,
  1505                                          ObjectMonitor** free_head_p,
  1509                                          ObjectMonitor** freeTailp) {
  1506                                          ObjectMonitor** free_tail_p) {
  1510   bool deflated;
  1507   bool deflated;
  1511   // Normal case ... The monitor is associated with obj.
  1508   // Normal case ... The monitor is associated with obj.
  1512   const markWord mark = obj->mark();
  1509   const markWord mark = obj->mark();
  1513   guarantee(mark == markWord::encode(mid), "should match: mark="
  1510   guarantee(mark == markWord::encode(mid), "should match: mark="
  1514             INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
  1511             INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
  1538     mid->clear();
  1535     mid->clear();
  1539 
  1536 
  1540     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
  1537     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
  1541            p2i(mid->object()));
  1538            p2i(mid->object()));
  1542 
  1539 
  1543     // Move the object to the working free list defined by freeHeadp, freeTailp
  1540     // Move the deflated ObjectMonitor to the working free list
  1544     if (*freeHeadp == NULL) *freeHeadp = mid;
  1541     // defined by free_head_p and free_tail_p.
  1545     if (*freeTailp != NULL) {
  1542     if (*free_head_p == NULL) *free_head_p = mid;
  1546       ObjectMonitor * prevtail = *freeTailp;
  1543     if (*free_tail_p != NULL) {
  1547       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
  1544       // We append to the list so the caller can use mid->_next_om
  1548       prevtail->FreeNext = mid;
  1545       // to fix the linkages in its context.
  1549     }
  1546       ObjectMonitor* prevtail = *free_tail_p;
  1550     *freeTailp = mid;
  1547       // Should have been cleaned up by the caller:
       
  1548       assert(prevtail->_next_om == NULL, "cleaned up deflated?");
       
  1549       prevtail->_next_om = mid;
       
  1550     }
       
  1551     *free_tail_p = mid;
       
  1552     // At this point, mid->_next_om still refers to its current
       
  1553     // value and another ObjectMonitor's _next_om field still
       
  1554     // refers to this ObjectMonitor. Those linkages have to be
       
  1555     // cleaned up by the caller who has the complete context.
  1551     deflated = true;
  1556     deflated = true;
  1552   }
  1557   }
  1553   return deflated;
  1558   return deflated;
  1554 }
  1559 }
  1555 
  1560 
  1564 // process the same monitor lists concurrently.
  1569 // process the same monitor lists concurrently.
  1565 //
  1570 //
  1566 // See also ParallelSPCleanupTask and
  1571 // See also ParallelSPCleanupTask and
  1567 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
  1572 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
  1568 // Threads::parallel_java_threads_do() in thread.cpp.
  1573 // Threads::parallel_java_threads_do() in thread.cpp.
  1569 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
  1574 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
  1570                                              ObjectMonitor** freeHeadp,
  1575                                              ObjectMonitor** free_head_p,
  1571                                              ObjectMonitor** freeTailp) {
  1576                                              ObjectMonitor** free_tail_p) {
  1572   ObjectMonitor* mid;
  1577   ObjectMonitor* mid;
  1573   ObjectMonitor* next;
  1578   ObjectMonitor* next;
  1574   ObjectMonitor* cur_mid_in_use = NULL;
  1579   ObjectMonitor* cur_mid_in_use = NULL;
  1575   int deflated_count = 0;
  1580   int deflated_count = 0;
  1576 
  1581 
  1577   for (mid = *listHeadp; mid != NULL;) {
  1582   for (mid = *list_p; mid != NULL;) {
  1578     oop obj = (oop) mid->object();
  1583     oop obj = (oop) mid->object();
  1579     if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) {
  1584     if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
  1580       // if deflate_monitor succeeded,
  1585       // Deflation succeeded and already updated free_head_p and
  1581       // extract from per-thread in-use list
  1586       // free_tail_p as needed. Finish the move to the local free list
  1582       if (mid == *listHeadp) {
  1587       // by unlinking mid from the global or per-thread in-use list.
  1583         *listHeadp = mid->FreeNext;
  1588       if (mid == *list_p) {
       
  1589         *list_p = mid->_next_om;
  1584       } else if (cur_mid_in_use != NULL) {
  1590       } else if (cur_mid_in_use != NULL) {
  1585         cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
  1591         cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
  1586       }
  1592       }
  1587       next = mid->FreeNext;
  1593       next = mid->_next_om;
  1588       mid->FreeNext = NULL;  // This mid is current tail in the freeHeadp list
  1594       mid->_next_om = NULL;  // This mid is current tail in the free_head_p list
  1589       mid = next;
  1595       mid = next;
  1590       deflated_count++;
  1596       deflated_count++;
  1591     } else {
  1597     } else {
  1592       cur_mid_in_use = mid;
  1598       cur_mid_in_use = mid;
  1593       mid = mid->FreeNext;
  1599       mid = mid->_next_om;
  1594     }
  1600     }
  1595   }
  1601   }
  1596   return deflated_count;
  1602   return deflated_count;
  1597 }
  1603 }
  1598 
  1604 
  1599 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1605 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1600   counters->nInuse = 0;              // currently associated with objects
  1606   counters->n_in_use = 0;              // currently associated with objects
  1601   counters->nInCirculation = 0;      // extant
  1607   counters->n_in_circulation = 0;      // extant
  1602   counters->nScavenged = 0;          // reclaimed (global and per-thread)
  1608   counters->n_scavenged = 0;           // reclaimed (global and per-thread)
  1603   counters->perThreadScavenged = 0;  // per-thread scavenge total
  1609   counters->per_thread_scavenged = 0;  // per-thread scavenge total
  1604   counters->perThreadTimes = 0.0;    // per-thread scavenge times
  1610   counters->per_thread_times = 0.0;    // per-thread scavenge times
  1605 }
  1611 }
  1606 
  1612 
  1607 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1613 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1608   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1614   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1609   bool deflated = false;
  1615   bool deflated = false;
  1610 
  1616 
  1611   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
  1617   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
  1612   ObjectMonitor * freeTailp = NULL;
  1618   ObjectMonitor* free_tail_p = NULL;
  1613   elapsedTimer timer;
  1619   elapsedTimer timer;
  1614 
  1620 
  1615   if (log_is_enabled(Info, monitorinflation)) {
  1621   if (log_is_enabled(Info, monitorinflation)) {
  1616     timer.start();
  1622     timer.start();
  1617   }
  1623   }
  1618 
  1624 
  1619   // Prevent omFlush from changing mids in Thread dtor's during deflation
  1625   // Prevent om_flush from changing mids in Thread dtor's during deflation
  1620   // And in case the vm thread is acquiring a lock during a safepoint
  1626   // And in case the vm thread is acquiring a lock during a safepoint
  1621   // See e.g. 6320749
  1627   // See e.g. 6320749
  1622   Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
  1628   Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
  1623 
  1629 
  1624   // Note: the thread-local monitors lists get deflated in
  1630   // Note: the thread-local monitors lists get deflated in
  1625   // a separate pass. See deflate_thread_local_monitors().
  1631   // a separate pass. See deflate_thread_local_monitors().
  1626 
  1632 
  1627   // For moribund threads, scan gOmInUseList
  1633   // For moribund threads, scan g_om_in_use_list
  1628   int deflated_count = 0;
  1634   int deflated_count = 0;
  1629   if (gOmInUseList) {
  1635   if (g_om_in_use_list) {
  1630     counters->nInCirculation += gOmInUseCount;
  1636     counters->n_in_circulation += g_om_in_use_count;
  1631     deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
  1637     deflated_count = deflate_monitor_list((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p);
  1632     gOmInUseCount -= deflated_count;
  1638     g_om_in_use_count -= deflated_count;
  1633     counters->nScavenged += deflated_count;
  1639     counters->n_scavenged += deflated_count;
  1634     counters->nInuse += gOmInUseCount;
  1640     counters->n_in_use += g_om_in_use_count;
  1635   }
  1641   }
  1636 
  1642 
  1637   // Move the scavenged monitors back to the global free list.
  1643   if (free_head_p != NULL) {
  1638   if (freeHeadp != NULL) {
  1644     // Move the deflated ObjectMonitors back to the global free list.
  1639     guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant");
  1645     guarantee(free_tail_p != NULL && counters->n_scavenged > 0, "invariant");
  1640     assert(freeTailp->FreeNext == NULL, "invariant");
  1646     assert(free_tail_p->_next_om == NULL, "invariant");
  1641     // constant-time list splice - prepend scavenged segment to gFreeList
  1647     // constant-time list splice - prepend scavenged segment to g_free_list
  1642     freeTailp->FreeNext = gFreeList;
  1648     free_tail_p->_next_om = g_free_list;
  1643     gFreeList = freeHeadp;
  1649     g_free_list = free_head_p;
  1644   }
  1650   }
  1645   Thread::muxRelease(&gListLock);
  1651   Thread::muxRelease(&gListLock);
  1646   timer.stop();
  1652   timer.stop();
  1647 
  1653 
  1648   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1654   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1649   LogStreamHandle(Info, monitorinflation) lsh_info;
  1655   LogStreamHandle(Info, monitorinflation) lsh_info;
  1650   LogStream * ls = NULL;
  1656   LogStream* ls = NULL;
  1651   if (log_is_enabled(Debug, monitorinflation)) {
  1657   if (log_is_enabled(Debug, monitorinflation)) {
  1652     ls = &lsh_debug;
  1658     ls = &lsh_debug;
  1653   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1659   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1654     ls = &lsh_info;
  1660     ls = &lsh_info;
  1655   }
  1661   }
  1661 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1667 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1662   // Report the cumulative time for deflating each thread's idle
  1668   // Report the cumulative time for deflating each thread's idle
  1663   // monitors. Note: if the work is split among more than one
  1669   // monitors. Note: if the work is split among more than one
  1664   // worker thread, then the reported time will likely be more
  1670   // worker thread, then the reported time will likely be more
  1665   // than a beginning to end measurement of the phase.
  1671   // than a beginning to end measurement of the phase.
  1666   log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged);
  1672   log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
  1667 
  1673 
  1668   gMonitorFreeCount += counters->nScavenged;
  1674   g_om_free_count += counters->n_scavenged;
  1669 
  1675 
  1670   if (log_is_enabled(Debug, monitorinflation)) {
  1676   if (log_is_enabled(Debug, monitorinflation)) {
  1671     // exit_globals()'s call to audit_and_print_stats() is done
  1677     // exit_globals()'s call to audit_and_print_stats() is done
  1672     // at the Info level.
  1678     // at the Info level.
  1673     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
  1679     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
  1674   } else if (log_is_enabled(Info, monitorinflation)) {
  1680   } else if (log_is_enabled(Info, monitorinflation)) {
  1675     Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
  1681     Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
  1676     log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, "
  1682     log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
  1677                                "gMonitorFreeCount=%d", gMonitorPopulation,
  1683                                "g_om_free_count=%d", g_om_population,
  1678                                gOmInUseCount, gMonitorFreeCount);
  1684                                g_om_in_use_count, g_om_free_count);
  1679     Thread::muxRelease(&gListLock);
  1685     Thread::muxRelease(&gListLock);
  1680   }
  1686   }
  1681 
  1687 
  1682   ForceMonitorScavenge = 0;    // Reset
  1688   ForceMonitorScavenge = 0;    // Reset
  1683 
  1689 
  1684   OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
  1690   OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
  1685   OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation));
  1691   OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
  1686 
  1692 
  1687   GVars.stwRandom = os::random();
  1693   GVars.stw_random = os::random();
  1688   GVars.stwCycle++;
  1694   GVars.stw_cycle++;
  1689 }
  1695 }
  1690 
  1696 
  1691 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
  1697 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
  1692   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1698   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1693 
  1699 
  1694   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
  1700   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
  1695   ObjectMonitor * freeTailp = NULL;
  1701   ObjectMonitor* free_tail_p = NULL;
  1696   elapsedTimer timer;
  1702   elapsedTimer timer;
  1697 
  1703 
  1698   if (log_is_enabled(Info, safepoint, cleanup) ||
  1704   if (log_is_enabled(Info, safepoint, cleanup) ||
  1699       log_is_enabled(Info, monitorinflation)) {
  1705       log_is_enabled(Info, monitorinflation)) {
  1700     timer.start();
  1706     timer.start();
  1701   }
  1707   }
  1702 
  1708 
  1703   int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
  1709   int deflated_count = deflate_monitor_list(thread->om_in_use_list_addr(), &free_head_p, &free_tail_p);
  1704 
  1710 
  1705   Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
  1711   Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
  1706 
  1712 
  1707   // Adjust counters
  1713   // Adjust counters
  1708   counters->nInCirculation += thread->omInUseCount;
  1714   counters->n_in_circulation += thread->om_in_use_count;
  1709   thread->omInUseCount -= deflated_count;
  1715   thread->om_in_use_count -= deflated_count;
  1710   counters->nScavenged += deflated_count;
  1716   counters->n_scavenged += deflated_count;
  1711   counters->nInuse += thread->omInUseCount;
  1717   counters->n_in_use += thread->om_in_use_count;
  1712   counters->perThreadScavenged += deflated_count;
  1718   counters->per_thread_scavenged += deflated_count;
  1713 
  1719 
  1714   // Move the scavenged monitors back to the global free list.
  1720   if (free_head_p != NULL) {
  1715   if (freeHeadp != NULL) {
  1721     // Move the deflated ObjectMonitors back to the global free list.
  1716     guarantee(freeTailp != NULL && deflated_count > 0, "invariant");
  1722     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
  1717     assert(freeTailp->FreeNext == NULL, "invariant");
  1723     assert(free_tail_p->_next_om == NULL, "invariant");
  1718 
  1724 
  1719     // constant-time list splice - prepend scavenged segment to gFreeList
  1725     // constant-time list splice - prepend scavenged segment to g_free_list
  1720     freeTailp->FreeNext = gFreeList;
  1726     free_tail_p->_next_om = g_free_list;
  1721     gFreeList = freeHeadp;
  1727     g_free_list = free_head_p;
  1722   }
  1728   }
  1723 
  1729 
  1724   timer.stop();
  1730   timer.stop();
  1725   // Safepoint logging cares about cumulative perThreadTimes and
  1731   // Safepoint logging cares about cumulative per_thread_times and
  1726   // we'll capture most of the cost, but not the muxRelease() which
  1732   // we'll capture most of the cost, but not the muxRelease() which
  1727   // should be cheap.
  1733   // should be cheap.
  1728   counters->perThreadTimes += timer.seconds();
  1734   counters->per_thread_times += timer.seconds();
  1729 
  1735 
  1730   Thread::muxRelease(&gListLock);
  1736   Thread::muxRelease(&gListLock);
  1731 
  1737 
  1732   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1738   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1733   LogStreamHandle(Info, monitorinflation) lsh_info;
  1739   LogStreamHandle(Info, monitorinflation) lsh_info;
  1734   LogStream * ls = NULL;
  1740   LogStream* ls = NULL;
  1735   if (log_is_enabled(Debug, monitorinflation)) {
  1741   if (log_is_enabled(Debug, monitorinflation)) {
  1736     ls = &lsh_debug;
  1742     ls = &lsh_debug;
  1737   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1743   } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
  1738     ls = &lsh_info;
  1744     ls = &lsh_info;
  1739   }
  1745   }
  1805 
  1811 
  1806 u_char* ObjectSynchronizer::get_gvars_addr() {
  1812 u_char* ObjectSynchronizer::get_gvars_addr() {
  1807   return (u_char*)&GVars;
  1813   return (u_char*)&GVars;
  1808 }
  1814 }
  1809 
  1815 
  1810 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() {
  1816 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
  1811   return (u_char*)&GVars.hcSequence;
  1817   return (u_char*)&GVars.hc_sequence;
  1812 }
  1818 }
  1813 
  1819 
  1814 size_t ObjectSynchronizer::get_gvars_size() {
  1820 size_t ObjectSynchronizer::get_gvars_size() {
  1815   return sizeof(SharedGlobals);
  1821   return sizeof(SharedGlobals);
  1816 }
  1822 }
  1817 
  1823 
  1818 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() {
  1824 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
  1819   return (u_char*)&GVars.stwRandom;
  1825   return (u_char*)&GVars.stw_random;
  1820 }
  1826 }
  1821 
  1827 
  1822 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
  1828 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
  1823   assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
  1829   assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
  1824 
  1830 
  1825   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1831   LogStreamHandle(Debug, monitorinflation) lsh_debug;
  1826   LogStreamHandle(Info, monitorinflation) lsh_info;
  1832   LogStreamHandle(Info, monitorinflation) lsh_info;
  1827   LogStreamHandle(Trace, monitorinflation) lsh_trace;
  1833   LogStreamHandle(Trace, monitorinflation) lsh_trace;
  1828   LogStream * ls = NULL;
  1834   LogStream* ls = NULL;
  1829   if (log_is_enabled(Trace, monitorinflation)) {
  1835   if (log_is_enabled(Trace, monitorinflation)) {
  1830     ls = &lsh_trace;
  1836     ls = &lsh_trace;
  1831   } else if (log_is_enabled(Debug, monitorinflation)) {
  1837   } else if (log_is_enabled(Debug, monitorinflation)) {
  1832     ls = &lsh_debug;
  1838     ls = &lsh_debug;
  1833   } else if (log_is_enabled(Info, monitorinflation)) {
  1839   } else if (log_is_enabled(Info, monitorinflation)) {
  1839     // Not at VM exit so grab the global list lock.
  1845     // Not at VM exit so grab the global list lock.
  1840     Thread::muxAcquire(&gListLock, "audit_and_print_stats");
  1846     Thread::muxAcquire(&gListLock, "audit_and_print_stats");
  1841   }
  1847   }
  1842 
  1848 
  1843   // Log counts for the global and per-thread monitor lists:
  1849   // Log counts for the global and per-thread monitor lists:
  1844   int chkMonitorPopulation = log_monitor_list_counts(ls);
  1850   int chk_om_population = log_monitor_list_counts(ls);
  1845   int error_cnt = 0;
  1851   int error_cnt = 0;
  1846 
  1852 
  1847   ls->print_cr("Checking global lists:");
  1853   ls->print_cr("Checking global lists:");
  1848 
  1854 
  1849   // Check gMonitorPopulation:
  1855   // Check g_om_population:
  1850   if (gMonitorPopulation == chkMonitorPopulation) {
  1856   if (g_om_population == chk_om_population) {
  1851     ls->print_cr("gMonitorPopulation=%d equals chkMonitorPopulation=%d",
  1857     ls->print_cr("g_om_population=%d equals chk_om_population=%d",
  1852                  gMonitorPopulation, chkMonitorPopulation);
  1858                  g_om_population, chk_om_population);
  1853   } else {
  1859   } else {
  1854     ls->print_cr("ERROR: gMonitorPopulation=%d is not equal to "
  1860     ls->print_cr("ERROR: g_om_population=%d is not equal to "
  1855                  "chkMonitorPopulation=%d", gMonitorPopulation,
  1861                  "chk_om_population=%d", g_om_population,
  1856                  chkMonitorPopulation);
  1862                  chk_om_population);
  1857     error_cnt++;
  1863     error_cnt++;
  1858   }
  1864   }
  1859 
  1865 
  1860   // Check gOmInUseList and gOmInUseCount:
  1866   // Check g_om_in_use_list and g_om_in_use_count:
  1861   chk_global_in_use_list_and_count(ls, &error_cnt);
  1867   chk_global_in_use_list_and_count(ls, &error_cnt);
  1862 
  1868 
  1863   // Check gFreeList and gMonitorFreeCount:
  1869   // Check g_free_list and g_om_free_count:
  1864   chk_global_free_list_and_count(ls, &error_cnt);
  1870   chk_global_free_list_and_count(ls, &error_cnt);
  1865 
  1871 
  1866   if (!on_exit) {
  1872   if (!on_exit) {
  1867     Thread::muxRelease(&gListLock);
  1873     Thread::muxRelease(&gListLock);
  1868   }
  1874   }
  1869 
  1875 
  1870   ls->print_cr("Checking per-thread lists:");
  1876   ls->print_cr("Checking per-thread lists:");
  1871 
  1877 
  1872   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  1878   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  1873     // Check omInUseList and omInUseCount:
  1879     // Check om_in_use_list and om_in_use_count:
  1874     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
  1880     chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
  1875 
  1881 
  1876     // Check omFreeList and omFreeCount:
  1882     // Check om_free_list and om_free_count:
  1877     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
  1883     chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
  1878   }
  1884   }
  1879 
  1885 
  1880   if (error_cnt == 0) {
  1886   if (error_cnt == 0) {
  1881     ls->print_cr("No errors found in monitor list checks.");
  1887     ls->print_cr("No errors found in monitor list checks.");
  1895 
  1901 
  1896   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
  1902   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
  1897 }
  1903 }
  1898 
  1904 
  1899 // Check a free monitor entry; log any errors.
  1905 // Check a free monitor entry; log any errors.
  1900 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
  1906 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
  1901                                         outputStream * out, int *error_cnt_p) {
  1907                                         outputStream * out, int *error_cnt_p) {
  1902   stringStream ss;
  1908   stringStream ss;
  1903   if (n->is_busy()) {
  1909   if (n->is_busy()) {
  1904     if (jt != NULL) {
  1910     if (jt != NULL) {
  1905       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1911       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1940 }
  1946 }
  1941 
  1947 
  1942 // Check the global free list and count; log the results of the checks.
  1948 // Check the global free list and count; log the results of the checks.
  1943 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
  1949 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
  1944                                                         int *error_cnt_p) {
  1950                                                         int *error_cnt_p) {
  1945   int chkMonitorFreeCount = 0;
  1951   int chk_om_free_count = 0;
  1946   for (ObjectMonitor * n = gFreeList; n != NULL; n = n->FreeNext) {
  1952   for (ObjectMonitor* n = g_free_list; n != NULL; n = n->_next_om) {
  1947     chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
  1953     chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
  1948     chkMonitorFreeCount++;
  1954     chk_om_free_count++;
  1949   }
  1955   }
  1950   if (gMonitorFreeCount == chkMonitorFreeCount) {
  1956   if (g_om_free_count == chk_om_free_count) {
  1951     out->print_cr("gMonitorFreeCount=%d equals chkMonitorFreeCount=%d",
  1957     out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
  1952                   gMonitorFreeCount, chkMonitorFreeCount);
  1958                   g_om_free_count, chk_om_free_count);
  1953   } else {
  1959   } else {
  1954     out->print_cr("ERROR: gMonitorFreeCount=%d is not equal to "
  1960     out->print_cr("ERROR: g_om_free_count=%d is not equal to "
  1955                   "chkMonitorFreeCount=%d", gMonitorFreeCount,
  1961                   "chk_om_free_count=%d", g_om_free_count,
  1956                   chkMonitorFreeCount);
  1962                   chk_om_free_count);
  1957     *error_cnt_p = *error_cnt_p + 1;
  1963     *error_cnt_p = *error_cnt_p + 1;
  1958   }
  1964   }
  1959 }
  1965 }
  1960 
  1966 
  1961 // Check the global in-use list and count; log the results of the checks.
  1967 // Check the global in-use list and count; log the results of the checks.
  1962 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
  1968 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
  1963                                                           int *error_cnt_p) {
  1969                                                           int *error_cnt_p) {
  1964   int chkOmInUseCount = 0;
  1970   int chk_om_in_use_count = 0;
  1965   for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
  1971   for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
  1966     chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
  1972     chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
  1967     chkOmInUseCount++;
  1973     chk_om_in_use_count++;
  1968   }
  1974   }
  1969   if (gOmInUseCount == chkOmInUseCount) {
  1975   if (g_om_in_use_count == chk_om_in_use_count) {
  1970     out->print_cr("gOmInUseCount=%d equals chkOmInUseCount=%d", gOmInUseCount,
  1976     out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", g_om_in_use_count,
  1971                   chkOmInUseCount);
  1977                   chk_om_in_use_count);
  1972   } else {
  1978   } else {
  1973     out->print_cr("ERROR: gOmInUseCount=%d is not equal to chkOmInUseCount=%d",
  1979     out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
  1974                   gOmInUseCount, chkOmInUseCount);
  1980                   g_om_in_use_count, chk_om_in_use_count);
  1975     *error_cnt_p = *error_cnt_p + 1;
  1981     *error_cnt_p = *error_cnt_p + 1;
  1976   }
  1982   }
  1977 }
  1983 }
  1978 
  1984 
  1979 // Check an in-use monitor entry; log any errors.
  1985 // Check an in-use monitor entry; log any errors.
  1980 void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n,
  1986 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
  1981                                           outputStream * out, int *error_cnt_p) {
  1987                                           outputStream * out, int *error_cnt_p) {
  1982   if (n->header().value() == 0) {
  1988   if (n->header().value() == 0) {
  1983     if (jt != NULL) {
  1989     if (jt != NULL) {
  1984       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1990       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  1985                     ": in-use per-thread monitor must have non-NULL _header "
  1991                     ": in-use per-thread monitor must have non-NULL _header "
  2015                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
  2021                     INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
  2016                     p2i(obj), mark.value());
  2022                     p2i(obj), mark.value());
  2017     }
  2023     }
  2018     *error_cnt_p = *error_cnt_p + 1;
  2024     *error_cnt_p = *error_cnt_p + 1;
  2019   }
  2025   }
  2020   ObjectMonitor * const obj_mon = mark.monitor();
  2026   ObjectMonitor* const obj_mon = mark.monitor();
  2021   if (n != obj_mon) {
  2027   if (n != obj_mon) {
  2022     if (jt != NULL) {
  2028     if (jt != NULL) {
  2023       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  2029       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
  2024                     ": in-use per-thread monitor's object does not refer "
  2030                     ": in-use per-thread monitor's object does not refer "
  2025                     "to the same monitor: obj=" INTPTR_FORMAT ", mark="
  2031                     "to the same monitor: obj=" INTPTR_FORMAT ", mark="
  2037 
  2043 
  2038 // Check the thread's free list and count; log the results of the checks.
  2044 // Check the thread's free list and count; log the results of the checks.
  2039 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
  2045 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
  2040                                                             outputStream * out,
  2046                                                             outputStream * out,
  2041                                                             int *error_cnt_p) {
  2047                                                             int *error_cnt_p) {
  2042   int chkOmFreeCount = 0;
  2048   int chk_om_free_count = 0;
  2043   for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) {
  2049   for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) {
  2044     chk_free_entry(jt, n, out, error_cnt_p);
  2050     chk_free_entry(jt, n, out, error_cnt_p);
  2045     chkOmFreeCount++;
  2051     chk_om_free_count++;
  2046   }
  2052   }
  2047   if (jt->omFreeCount == chkOmFreeCount) {
  2053   if (jt->om_free_count == chk_om_free_count) {
  2048     out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals "
  2054     out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
  2049                   "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount);
  2055                   "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count);
  2050   } else {
  2056   } else {
  2051     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not "
  2057     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
  2052                   "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount,
  2058                   "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
  2053                   chkOmFreeCount);
  2059                   chk_om_free_count);
  2054     *error_cnt_p = *error_cnt_p + 1;
  2060     *error_cnt_p = *error_cnt_p + 1;
  2055   }
  2061   }
  2056 }
  2062 }
  2057 
  2063 
  2058 // Check the thread's in-use list and count; log the results of the checks.
  2064 // Check the thread's in-use list and count; log the results of the checks.
  2059 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
  2065 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
  2060                                                               outputStream * out,
  2066                                                               outputStream * out,
  2061                                                               int *error_cnt_p) {
  2067                                                               int *error_cnt_p) {
  2062   int chkOmInUseCount = 0;
  2068   int chk_om_in_use_count = 0;
  2063   for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
  2069   for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
  2064     chk_in_use_entry(jt, n, out, error_cnt_p);
  2070     chk_in_use_entry(jt, n, out, error_cnt_p);
  2065     chkOmInUseCount++;
  2071     chk_om_in_use_count++;
  2066   }
  2072   }
  2067   if (jt->omInUseCount == chkOmInUseCount) {
  2073   if (jt->om_in_use_count == chk_om_in_use_count) {
  2068     out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals "
  2074     out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
  2069                   "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
  2075                   "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
  2070                   chkOmInUseCount);
  2076                   chk_om_in_use_count);
  2071   } else {
  2077   } else {
  2072     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not "
  2078     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
  2073                   "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
  2079                   "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
  2074                   chkOmInUseCount);
  2080                   chk_om_in_use_count);
  2075     *error_cnt_p = *error_cnt_p + 1;
  2081     *error_cnt_p = *error_cnt_p + 1;
  2076   }
  2082   }
  2077 }
  2083 }
  2078 
  2084 
  2079 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
  2085 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
  2085     // Not at VM exit so grab the global list lock.
  2091     // Not at VM exit so grab the global list lock.
  2086     Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
  2092     Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
  2087   }
  2093   }
  2088 
  2094 
  2089   stringStream ss;
  2095   stringStream ss;
  2090   if (gOmInUseCount > 0) {
  2096   if (g_om_in_use_count > 0) {
  2091     out->print_cr("In-use global monitor info:");
  2097     out->print_cr("In-use global monitor info:");
  2092     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2098     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2093     out->print_cr("%18s  %s  %18s  %18s",
  2099     out->print_cr("%18s  %s  %18s  %18s",
  2094                   "monitor", "BHL", "object", "object type");
  2100                   "monitor", "BHL", "object", "object type");
  2095     out->print_cr("==================  ===  ==================  ==================");
  2101     out->print_cr("==================  ===  ==================  ==================");
  2096     for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
  2102     for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
  2097       const oop obj = (oop) n->object();
  2103       const oop obj = (oop) n->object();
  2098       const markWord mark = n->header();
  2104       const markWord mark = n->header();
  2099       ResourceMark rm;
  2105       ResourceMark rm;
  2100       out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(n),
  2106       out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(n),
  2101                  n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
  2107                  n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
  2116   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2122   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
  2117   out->print_cr("%18s  %18s  %s  %18s  %18s",
  2123   out->print_cr("%18s  %18s  %s  %18s  %18s",
  2118                 "jt", "monitor", "BHL", "object", "object type");
  2124                 "jt", "monitor", "BHL", "object", "object type");
  2119   out->print_cr("==================  ==================  ===  ==================  ==================");
  2125   out->print_cr("==================  ==================  ===  ==================  ==================");
  2120   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2126   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2121     for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
  2127     for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
  2122       const oop obj = (oop) n->object();
  2128       const oop obj = (oop) n->object();
  2123       const markWord mark = n->header();
  2129       const markWord mark = n->header();
  2124       ResourceMark rm;
  2130       ResourceMark rm;
  2125       out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
  2131       out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
  2126                  "  %s", p2i(jt), p2i(n), n->is_busy() != 0,
  2132                  "  %s", p2i(jt), p2i(n), n->is_busy() != 0,
  2138 }
  2144 }
  2139 
  2145 
  2140 // Log counts for the global and per-thread monitor lists and return
  2146 // Log counts for the global and per-thread monitor lists and return
  2141 // the population count.
  2147 // the population count.
  2142 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
  2148 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
  2143   int popCount = 0;
  2149   int pop_count = 0;
  2144   out->print_cr("%18s  %10s  %10s  %10s",
  2150   out->print_cr("%18s  %10s  %10s  %10s",
  2145                 "Global Lists:", "InUse", "Free", "Total");
  2151                 "Global Lists:", "InUse", "Free", "Total");
  2146   out->print_cr("==================  ==========  ==========  ==========");
  2152   out->print_cr("==================  ==========  ==========  ==========");
  2147   out->print_cr("%18s  %10d  %10d  %10d", "",
  2153   out->print_cr("%18s  %10d  %10d  %10d", "",
  2148                 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation);
  2154                 g_om_in_use_count, g_om_free_count, g_om_population);
  2149   popCount += gOmInUseCount + gMonitorFreeCount;
  2155   pop_count += g_om_in_use_count + g_om_free_count;
  2150 
  2156 
  2151   out->print_cr("%18s  %10s  %10s  %10s",
  2157   out->print_cr("%18s  %10s  %10s  %10s",
  2152                 "Per-Thread Lists:", "InUse", "Free", "Provision");
  2158                 "Per-Thread Lists:", "InUse", "Free", "Provision");
  2153   out->print_cr("==================  ==========  ==========  ==========");
  2159   out->print_cr("==================  ==========  ==========  ==========");
  2154 
  2160 
  2155   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2161   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
  2156     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
  2162     out->print_cr(INTPTR_FORMAT "  %10d  %10d  %10d", p2i(jt),
  2157                   jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision);
  2163                   jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
  2158     popCount += jt->omInUseCount + jt->omFreeCount;
  2164     pop_count += jt->om_in_use_count + jt->om_free_count;
  2159   }
  2165   }
  2160   return popCount;
  2166   return pop_count;
  2161 }
  2167 }
  2162 
  2168 
  2163 #ifndef PRODUCT
  2169 #ifndef PRODUCT
  2164 
  2170 
  2165 // Check if monitor belongs to the monitor cache
  2171 // Check if monitor belongs to the monitor cache
  2166 // The list is grow-only so it's *relatively* safe to traverse
  2172 // The list is grow-only so it's *relatively* safe to traverse
  2167 // the list of extant blocks without taking a lock.
  2173 // the list of extant blocks without taking a lock.
  2168 
  2174 
  2169 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
  2175 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
  2170   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
  2176   PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
  2171   while (block != NULL) {
  2177   while (block != NULL) {
  2172     assert(block->object() == CHAINMARKER, "must be a block header");
  2178     assert(block->object() == CHAINMARKER, "must be a block header");
  2173     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
  2179     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
  2174       address mon = (address)monitor;
  2180       address mon = (address)monitor;
  2175       address blk = (address)block;
  2181       address blk = (address)block;
  2176       size_t diff = mon - blk;
  2182       size_t diff = mon - blk;
  2177       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
  2183       assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
  2178       return 1;
  2184       return 1;
  2179     }
  2185     }
  2180     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
  2186     block = (PaddedObjectMonitor*)block->_next_om;
  2181   }
  2187   }
  2182   return 0;
  2188   return 0;
  2183 }
  2189 }
  2184 
  2190 
  2185 #endif
  2191 #endif