src/hotspot/share/runtime/synchronizer.cpp
changeset 53557 4cfe0e5a3b79
parent 52703 e7fdc9d9c376
child 53588 a5f46c4690f8
equal deleted inserted replaced
53556:f3546d51ce9c 53557:4cfe0e5a3b79
     1 /*
     1 /*
     2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
   324       return;
   324       return;
   325     }
   325     }
   326   }
   326   }
   327 
   327 
   328   // We have to take the slow-path of possible inflation and then exit.
   328   // We have to take the slow-path of possible inflation and then exit.
   329   ObjectSynchronizer::inflate(THREAD,
   329   inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
   330                               object,
       
   331                               inflate_cause_vm_internal)->exit(true, THREAD);
       
   332 }
   330 }
   333 
   331 
   334 // -----------------------------------------------------------------------------
   332 // -----------------------------------------------------------------------------
   335 // Interpreter/Compiler Slow Case
   333 // Interpreter/Compiler Slow Case
   336 // This routine is used to handle interpreter/compiler slow case
   334 // This routine is used to handle interpreter/compiler slow case
   359   // The object header will never be displaced to this lock,
   357   // The object header will never be displaced to this lock,
   360   // so it does not matter what the value is, except that it
   358   // so it does not matter what the value is, except that it
   361   // must be non-zero to avoid looking like a re-entrant lock,
   359   // must be non-zero to avoid looking like a re-entrant lock,
   362   // and must not look locked either.
   360   // and must not look locked either.
   363   lock->set_displaced_header(markOopDesc::unused_mark());
   361   lock->set_displaced_header(markOopDesc::unused_mark());
   364   ObjectSynchronizer::inflate(THREAD,
   362   inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
   365                               obj(),
       
   366                               inflate_cause_monitor_enter)->enter(THREAD);
       
   367 }
   363 }
   368 
   364 
   369 // This routine is used to handle interpreter/compiler slow case
   365 // This routine is used to handle interpreter/compiler slow case
   370 // We don't need to use fast path here, because it must have
   366 // We don't need to use fast path here, because it must have
   371 // failed in the interpreter/compiler code. Simply use the heavy
   367 // failed in the interpreter/compiler code. Simply use the heavy
   390   if (UseBiasedLocking) {
   386   if (UseBiasedLocking) {
   391     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   387     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   392     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   388     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   393   }
   389   }
   394 
   390 
   395   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
   391   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
   396                                                        obj(),
       
   397                                                        inflate_cause_vm_internal);
       
   398 
   392 
   399   return monitor->complete_exit(THREAD);
   393   return monitor->complete_exit(THREAD);
   400 }
   394 }
   401 
   395 
   402 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
   396 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
   404   if (UseBiasedLocking) {
   398   if (UseBiasedLocking) {
   405     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   399     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   406     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   400     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   407   }
   401   }
   408 
   402 
   409   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
   403   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
   410                                                        obj(),
       
   411                                                        inflate_cause_vm_internal);
       
   412 
   404 
   413   monitor->reenter(recursion, THREAD);
   405   monitor->reenter(recursion, THREAD);
   414 }
   406 }
   415 // -----------------------------------------------------------------------------
   407 // -----------------------------------------------------------------------------
   416 // JNI locks on java objects
   408 // JNI locks on java objects
   420   if (UseBiasedLocking) {
   412   if (UseBiasedLocking) {
   421     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   413     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   422     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   414     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   423   }
   415   }
   424   THREAD->set_current_pending_monitor_is_from_java(false);
   416   THREAD->set_current_pending_monitor_is_from_java(false);
   425   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
   417   inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
   426   THREAD->set_current_pending_monitor_is_from_java(true);
   418   THREAD->set_current_pending_monitor_is_from_java(true);
   427 }
   419 }
   428 
   420 
   429 // NOTE: must use heavy weight monitor to handle jni monitor exit
   421 // NOTE: must use heavy weight monitor to handle jni monitor exit
   430 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
   422 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
   433     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
   425     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
   434     obj = h_obj();
   426     obj = h_obj();
   435   }
   427   }
   436   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   428   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   437 
   429 
   438   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
   430   ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
   439                                                        obj,
       
   440                                                        inflate_cause_jni_exit);
       
   441   // If this thread has locked the object, exit the monitor.  Note:  can't use
   431   // If this thread has locked the object, exit the monitor.  Note:  can't use
   442   // monitor->check(CHECK); must exit even if an exception is pending.
   432   // monitor->check(CHECK); must exit even if an exception is pending.
   443   if (monitor->check(THREAD)) {
   433   if (monitor->check(THREAD)) {
   444     monitor->exit(true, THREAD);
   434     monitor->exit(true, THREAD);
   445   }
   435   }
   475     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   465     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   476   }
   466   }
   477   if (millis < 0) {
   467   if (millis < 0) {
   478     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   468     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   479   }
   469   }
   480   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
   470   ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
   481                                                        obj(),
       
   482                                                        inflate_cause_wait);
       
   483 
   471 
   484   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
   472   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
   485   monitor->wait(millis, true, THREAD);
   473   monitor->wait(millis, true, THREAD);
   486 
   474 
   487   // This dummy call is in place to get around dtrace bug 6254741.  Once
   475   // This dummy call is in place to get around dtrace bug 6254741.  Once
   497     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   485     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   498   }
   486   }
   499   if (millis < 0) {
   487   if (millis < 0) {
   500     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   488     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   501   }
   489   }
   502   ObjectSynchronizer::inflate(THREAD,
   490   inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
   503                               obj(),
       
   504                               inflate_cause_wait)->wait(millis, false, THREAD);
       
   505 }
   491 }
   506 
   492 
   507 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
   493 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
   508   if (UseBiasedLocking) {
   494   if (UseBiasedLocking) {
   509     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   495     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
   512 
   498 
   513   markOop mark = obj->mark();
   499   markOop mark = obj->mark();
   514   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
   500   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
   515     return;
   501     return;
   516   }
   502   }
   517   ObjectSynchronizer::inflate(THREAD,
   503   inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
   518                               obj(),
       
   519                               inflate_cause_notify)->notify(THREAD);
       
   520 }
   504 }
   521 
   505 
   522 // NOTE: see comment of notify()
   506 // NOTE: see comment of notify()
   523 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
   507 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
   524   if (UseBiasedLocking) {
   508   if (UseBiasedLocking) {
   528 
   512 
   529   markOop mark = obj->mark();
   513   markOop mark = obj->mark();
   530   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
   514   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
   531     return;
   515     return;
   532   }
   516   }
   533   ObjectSynchronizer::inflate(THREAD,
   517   inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
   534                               obj(),
       
   535                               inflate_cause_notify)->notifyAll(THREAD);
       
   536 }
   518 }
   537 
   519 
   538 // -----------------------------------------------------------------------------
   520 // -----------------------------------------------------------------------------
   539 // Hash Code handling
   521 // Hash Code handling
   540 //
   522 //
   734   // object should remain ineligible for biased locking
   716   // object should remain ineligible for biased locking
   735   assert(!mark->has_bias_pattern(), "invariant");
   717   assert(!mark->has_bias_pattern(), "invariant");
   736 
   718 
   737   if (mark->is_neutral()) {
   719   if (mark->is_neutral()) {
   738     hash = mark->hash();              // this is a normal header
   720     hash = mark->hash();              // this is a normal header
   739     if (hash) {                       // if it has hash, just return it
   721     if (hash != 0) {                  // if it has hash, just return it
   740       return hash;
   722       return hash;
   741     }
   723     }
   742     hash = get_next_hash(Self, obj);  // allocate a new hash code
   724     hash = get_next_hash(Self, obj);  // allocate a new hash code
   743     temp = mark->copy_set_hash(hash); // merge the hash code into header
   725     temp = mark->copy_set_hash(hash); // merge the hash code into header
   744     // use (machine word version) atomic operation to install the hash
   726     // use (machine word version) atomic operation to install the hash
   750     // into heavy weight monitor. We could add more code here
   732     // into heavy weight monitor. We could add more code here
   751     // for fast path, but it does not worth the complexity.
   733     // for fast path, but it does not worth the complexity.
   752   } else if (mark->has_monitor()) {
   734   } else if (mark->has_monitor()) {
   753     monitor = mark->monitor();
   735     monitor = mark->monitor();
   754     temp = monitor->header();
   736     temp = monitor->header();
   755     assert(temp->is_neutral(), "invariant");
   737     assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp));
   756     hash = temp->hash();
   738     hash = temp->hash();
   757     if (hash) {
   739     if (hash != 0) {
   758       return hash;
   740       return hash;
   759     }
   741     }
   760     // Skip to the following code to reduce code size
   742     // Skip to the following code to reduce code size
   761   } else if (Self->is_lock_owned((address)mark->locker())) {
   743   } else if (Self->is_lock_owned((address)mark->locker())) {
   762     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
   744     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
   763     assert(temp->is_neutral(), "invariant");
   745     assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp));
   764     hash = temp->hash();              // by current thread, check if the displaced
   746     hash = temp->hash();              // by current thread, check if the displaced
   765     if (hash) {                       // header contains hash code
   747     if (hash != 0) {                  // header contains hash code
   766       return hash;
   748       return hash;
   767     }
   749     }
   768     // WARNING:
   750     // WARNING:
   769     //   The displaced header is strictly immutable.
   751     //   The displaced header is strictly immutable.
   770     // It can NOT be changed in ANY cases. So we have
   752     // It can NOT be changed in ANY cases. So we have
   775     // Any change to stack may not propagate to other threads
   757     // Any change to stack may not propagate to other threads
   776     // correctly.
   758     // correctly.
   777   }
   759   }
   778 
   760 
   779   // Inflate the monitor to set hash code
   761   // Inflate the monitor to set hash code
   780   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
   762   monitor = inflate(Self, obj, inflate_cause_hash_code);
   781   // Load displaced header and check it has hash code
   763   // Load displaced header and check it has hash code
   782   mark = monitor->header();
   764   mark = monitor->header();
   783   assert(mark->is_neutral(), "invariant");
   765   assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)mark));
   784   hash = mark->hash();
   766   hash = mark->hash();
   785   if (hash == 0) {
   767   if (hash == 0) {
   786     hash = get_next_hash(Self, obj);
   768     hash = get_next_hash(Self, obj);
   787     temp = mark->copy_set_hash(hash); // merge hash code into header
   769     temp = mark->copy_set_hash(hash); // merge hash code into header
   788     assert(temp->is_neutral(), "invariant");
   770     assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp));
   789     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
   771     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
   790     if (test != mark) {
   772     if (test != mark) {
   791       // The only update to the header in the monitor (outside GC)
   773       // The only update to the header in the monitor (outside GC)
   792       // is install the hash code. If someone add new usage of
   774       // is install the hash code. If someone add new usage of
   793       // displaced header, please update this code
   775       // displaced header, please update this code
   794       hash = test->hash();
   776       hash = test->hash();
   795       assert(test->is_neutral(), "invariant");
   777       assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)test));
   796       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
   778       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
   797     }
   779     }
   798   }
   780   }
   799   // We finally get the hash
   781   // We finally get the hash
   800   return hash;
   782   return hash;
   862   if (mark->has_locker()) {
   844   if (mark->has_locker()) {
   863     return self->is_lock_owned((address)mark->locker()) ?
   845     return self->is_lock_owned((address)mark->locker()) ?
   864       owner_self : owner_other;
   846       owner_self : owner_other;
   865   }
   847   }
   866 
   848 
   867   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
   849   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
   868   // The Object:ObjectMonitor relationship is stable as long as we're
   850   // The Object:ObjectMonitor relationship is stable as long as we're
   869   // not at a safepoint.
   851   // not at a safepoint.
   870   if (mark->has_monitor()) {
   852   if (mark->has_monitor()) {
   871     void * owner = mark->monitor()->_owner;
   853     void * owner = mark->monitor()->_owner;
   872     if (owner == NULL) return owner_none;
   854     if (owner == NULL) return owner_none;
   899   if (mark->has_locker()) {
   881   if (mark->has_locker()) {
   900     owner = (address) mark->locker();
   882     owner = (address) mark->locker();
   901   }
   883   }
   902 
   884 
   903   // Contended case, header points to ObjectMonitor (tagged pointer)
   885   // Contended case, header points to ObjectMonitor (tagged pointer)
   904   if (mark->has_monitor()) {
   886   else if (mark->has_monitor()) {
   905     ObjectMonitor* monitor = mark->monitor();
   887     ObjectMonitor* monitor = mark->monitor();
   906     assert(monitor != NULL, "monitor should be non-null");
   888     assert(monitor != NULL, "monitor should be non-null");
   907     owner = (address) monitor->owner();
   889     owner = (address) monitor->owner();
   908   }
   890   }
   909 
   891 
  1074     // to reprovision the caller's free list.
  1056     // to reprovision the caller's free list.
  1075     if (gFreeList != NULL) {
  1057     if (gFreeList != NULL) {
  1076       // Reprovision the thread's omFreeList.
  1058       // Reprovision the thread's omFreeList.
  1077       // Use bulk transfers to reduce the allocation rate and heat
  1059       // Use bulk transfers to reduce the allocation rate and heat
  1078       // on various locks.
  1060       // on various locks.
  1079       Thread::muxAcquire(&gListLock, "omAlloc");
  1061       Thread::muxAcquire(&gListLock, "omAlloc(1)");
  1080       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
  1062       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
  1081         gMonitorFreeCount--;
  1063         gMonitorFreeCount--;
  1082         ObjectMonitor * take = gFreeList;
  1064         ObjectMonitor * take = gFreeList;
  1083         gFreeList = take->FreeNext;
  1065         gFreeList = take->FreeNext;
  1084         guarantee(take->object() == NULL, "invariant");
  1066         guarantee(take->object() == NULL, "invariant");
  1148     // block in hand.  This avoids some lock traffic and redundant
  1130     // block in hand.  This avoids some lock traffic and redundant
  1149     // list activity.
  1131     // list activity.
  1150 
  1132 
  1151     // Acquire the gListLock to manipulate gBlockList and gFreeList.
  1133     // Acquire the gListLock to manipulate gBlockList and gFreeList.
  1152     // An Oyama-Taura-Yonezawa scheme might be more efficient.
  1134     // An Oyama-Taura-Yonezawa scheme might be more efficient.
  1153     Thread::muxAcquire(&gListLock, "omAlloc [2]");
  1135     Thread::muxAcquire(&gListLock, "omAlloc(2)");
  1154     gMonitorPopulation += _BLOCKSIZE-1;
  1136     gMonitorPopulation += _BLOCKSIZE-1;
  1155     gMonitorFreeCount += _BLOCKSIZE-1;
  1137     gMonitorFreeCount += _BLOCKSIZE-1;
  1156 
  1138 
  1157     // Add the new block to the list of extant blocks (gBlockList).
  1139     // Add the new block to the list of extant blocks (gBlockList).
  1158     // The very first objectMonitor in a block is reserved and dedicated.
  1140     // The very first objectMonitor in a block is reserved and dedicated.
  1176 // attempt failed.  This doesn't allow unbounded #s of monitors to
  1158 // attempt failed.  This doesn't allow unbounded #s of monitors to
  1177 // accumulate on a thread's free list.
  1159 // accumulate on a thread's free list.
  1178 //
  1160 //
  1179 // Key constraint: all ObjectMonitors on a thread's free list and the global
  1161 // Key constraint: all ObjectMonitors on a thread's free list and the global
  1180 // free list must have their object field set to null. This prevents the
  1162 // free list must have their object field set to null. This prevents the
  1181 // scavenger -- deflate_idle_monitors -- from reclaiming them.
  1163 // scavenger -- deflate_monitor_list() -- from reclaiming them.
  1182 
  1164 
  1183 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
  1165 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
  1184                                    bool fromPerThreadAlloc) {
  1166                                    bool fromPerThreadAlloc) {
       
  1167   guarantee(m->header() == NULL, "invariant");
  1185   guarantee(m->object() == NULL, "invariant");
  1168   guarantee(m->object() == NULL, "invariant");
  1186   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
  1169   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
  1187   // Remove from omInUseList
  1170   // Remove from omInUseList
  1188   if (fromPerThreadAlloc) {
  1171   if (fromPerThreadAlloc) {
  1189     ObjectMonitor* cur_mid_in_use = NULL;
  1172     ObjectMonitor* cur_mid_in_use = NULL;
  1221 // a global gOmInUseList under the global list lock so these
  1204 // a global gOmInUseList under the global list lock so these
  1222 // will continue to be scanned.
  1205 // will continue to be scanned.
  1223 //
  1206 //
  1224 // We currently call omFlush() from Threads::remove() _before the thread
  1207 // We currently call omFlush() from Threads::remove() _before the thread
  1225 // has been excised from the thread list and is no longer a mutator.
  1208 // has been excised from the thread list and is no longer a mutator.
  1226 // This means that omFlush() can not run concurrently with a safepoint and
  1209 // This means that omFlush() cannot run concurrently with a safepoint and
  1227 // interleave with the scavenge operator. In particular, this ensures that
  1210 // interleave with the deflate_idle_monitors scavenge operator. In particular,
  1228 // the thread's monitors are scanned by a GC safepoint, either via
  1211 // this ensures that the thread's monitors are scanned by a GC safepoint,
  1229 // Thread::oops_do() (if safepoint happens before omFlush()) or via
  1212 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via
  1230 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
  1213 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
  1231 // monitors have been transferred to the global in-use list).
  1214 // monitors have been transferred to the global in-use list).
  1232 
  1215 
  1233 void ObjectSynchronizer::omFlush(Thread * Self) {
  1216 void ObjectSynchronizer::omFlush(Thread * Self) {
  1234   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
  1217   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
  1235   Self->omFreeList = NULL;
       
  1236   ObjectMonitor * tail = NULL;
  1218   ObjectMonitor * tail = NULL;
  1237   int tally = 0;
  1219   int tally = 0;
  1238   if (list != NULL) {
  1220   if (list != NULL) {
  1239     ObjectMonitor * s;
  1221     ObjectMonitor * s;
  1240     // The thread is going away, the per-thread free monitors
  1222     // The thread is going away, the per-thread free monitors
  1246       tail = s;
  1228       tail = s;
  1247       guarantee(s->object() == NULL, "invariant");
  1229       guarantee(s->object() == NULL, "invariant");
  1248       guarantee(!s->is_busy(), "invariant");
  1230       guarantee(!s->is_busy(), "invariant");
  1249       s->set_owner(NULL);   // redundant but good hygiene
  1231       s->set_owner(NULL);   // redundant but good hygiene
  1250     }
  1232     }
  1251     guarantee(tail != NULL && list != NULL, "invariant");
  1233     guarantee(tail != NULL, "invariant");
       
  1234     assert(Self->omFreeCount == tally, "free-count off");
       
  1235     Self->omFreeList = NULL;
       
  1236     Self->omFreeCount = 0;
  1252   }
  1237   }
  1253 
  1238 
  1254   ObjectMonitor * inUseList = Self->omInUseList;
  1239   ObjectMonitor * inUseList = Self->omInUseList;
  1255   ObjectMonitor * inUseTail = NULL;
  1240   ObjectMonitor * inUseTail = NULL;
  1256   int inUseTally = 0;
  1241   int inUseTally = 0;
  1257   if (inUseList != NULL) {
  1242   if (inUseList != NULL) {
  1258     Self->omInUseList = NULL;
       
  1259     ObjectMonitor *cur_om;
  1243     ObjectMonitor *cur_om;
  1260     // The thread is going away, however the omInUseList inflated
  1244     // The thread is going away, however the omInUseList inflated
  1261     // monitors may still be in-use by other threads.
  1245     // monitors may still be in-use by other threads.
  1262     // Link them to inUseTail, which will be linked into the global in-use list
  1246     // Link them to inUseTail, which will be linked into the global in-use list
  1263     // gOmInUseList below, under the gListLock
  1247     // gOmInUseList below, under the gListLock
  1264     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
  1248     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
  1265       inUseTail = cur_om;
  1249       inUseTail = cur_om;
  1266       inUseTally++;
  1250       inUseTally++;
  1267     }
  1251     }
       
  1252     guarantee(inUseTail != NULL, "invariant");
  1268     assert(Self->omInUseCount == inUseTally, "in-use count off");
  1253     assert(Self->omInUseCount == inUseTally, "in-use count off");
       
  1254     Self->omInUseList = NULL;
  1269     Self->omInUseCount = 0;
  1255     Self->omInUseCount = 0;
  1270     guarantee(inUseTail != NULL && inUseList != NULL, "invariant");
       
  1271   }
  1256   }
  1272 
  1257 
  1273   Thread::muxAcquire(&gListLock, "omFlush");
  1258   Thread::muxAcquire(&gListLock, "omFlush");
  1274   if (tail != NULL) {
  1259   if (tail != NULL) {
  1275     tail->FreeNext = gFreeList;
  1260     tail->FreeNext = gFreeList;
  1276     gFreeList = list;
  1261     gFreeList = list;
  1277     gMonitorFreeCount += tally;
  1262     gMonitorFreeCount += tally;
  1278     assert(Self->omFreeCount == tally, "free-count off");
       
  1279     Self->omFreeCount = 0;
       
  1280   }
  1263   }
  1281 
  1264 
  1282   if (inUseTail != NULL) {
  1265   if (inUseTail != NULL) {
  1283     inUseTail->FreeNext = gOmInUseList;
  1266     inUseTail->FreeNext = gOmInUseList;
  1284     gOmInUseList = inUseList;
  1267     gOmInUseList = inUseList;
  1298   event->set_cause((u1)cause);
  1281   event->set_cause((u1)cause);
  1299   event->commit();
  1282   event->commit();
  1300 }
  1283 }
  1301 
  1284 
  1302 // Fast path code shared by multiple functions
  1285 // Fast path code shared by multiple functions
  1303 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
  1286 void ObjectSynchronizer::inflate_helper(oop obj) {
  1304   markOop mark = obj->mark();
  1287   markOop mark = obj->mark();
  1305   if (mark->has_monitor()) {
  1288   if (mark->has_monitor()) {
  1306     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
  1289     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
  1307     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
  1290     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
  1308     return mark->monitor();
  1291     return;
  1309   }
  1292   }
  1310   return ObjectSynchronizer::inflate(Thread::current(),
  1293   inflate(Thread::current(), obj, inflate_cause_vm_internal);
  1311                                      obj,
       
  1312                                      inflate_cause_vm_internal);
       
  1313 }
  1294 }
  1314 
  1295 
  1315 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
  1296 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
  1316                                                      oop object,
  1297                                            oop object,
  1317                                                      const InflateCause cause) {
  1298                                            const InflateCause cause) {
  1318 
       
  1319   // Inflate mutates the heap ...
  1299   // Inflate mutates the heap ...
  1320   // Relaxing assertion for bug 6320749.
  1300   // Relaxing assertion for bug 6320749.
  1321   assert(Universe::verify_in_progress() ||
  1301   assert(Universe::verify_in_progress() ||
  1322          !SafepointSynchronize::is_at_safepoint(), "invariant");
  1302          !SafepointSynchronize::is_at_safepoint(), "invariant");
  1323 
  1303 
  1335     // *  BIASED       - Illegal.  We should never see this
  1315     // *  BIASED       - Illegal.  We should never see this
  1336 
  1316 
  1337     // CASE: inflated
  1317     // CASE: inflated
  1338     if (mark->has_monitor()) {
  1318     if (mark->has_monitor()) {
  1339       ObjectMonitor * inf = mark->monitor();
  1319       ObjectMonitor * inf = mark->monitor();
  1340       assert(inf->header()->is_neutral(), "invariant");
  1320       markOop dmw = inf->header();
       
  1321       assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)dmw));
  1341       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
  1322       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
  1342       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
  1323       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
  1343       return inf;
  1324       return inf;
  1344     }
  1325     }
  1345 
  1326 
  1442       // Hopefully the performance counters are allocated on distinct cache lines
  1423       // Hopefully the performance counters are allocated on distinct cache lines
  1443       // to avoid false sharing on MP systems ...
  1424       // to avoid false sharing on MP systems ...
  1444       OM_PERFDATA_OP(Inflations, inc());
  1425       OM_PERFDATA_OP(Inflations, inc());
  1445       if (log_is_enabled(Debug, monitorinflation)) {
  1426       if (log_is_enabled(Debug, monitorinflation)) {
  1446         if (object->is_instance()) {
  1427         if (object->is_instance()) {
  1447           ResourceMark rm;
  1428           ResourceMark rm(Self);
  1448           log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
  1429           log_debug(monitorinflation)("inflate(has_locker): "
       
  1430                                       "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'",
  1449                                       p2i(object), p2i(object->mark()),
  1431                                       p2i(object), p2i(object->mark()),
  1450                                       object->klass()->external_name());
  1432                                       object->klass()->external_name());
  1451         }
  1433         }
  1452       }
  1434       }
  1453       if (event.should_commit()) {
  1435       if (event.should_commit()) {
  1476     m->_recursions   = 0;
  1458     m->_recursions   = 0;
  1477     m->_Responsible  = NULL;
  1459     m->_Responsible  = NULL;
  1478     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
  1460     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
  1479 
  1461 
  1480     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
  1462     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
       
  1463       m->set_header(NULL);
  1481       m->set_object(NULL);
  1464       m->set_object(NULL);
  1482       m->set_owner(NULL);
       
  1483       m->Recycle();
  1465       m->Recycle();
  1484       omRelease(Self, m, true);
  1466       omRelease(Self, m, true);
  1485       m = NULL;
  1467       m = NULL;
  1486       continue;
  1468       continue;
  1487       // interference - the markword changed - just retry.
  1469       // interference - the markword changed - just retry.
  1492     // Hopefully the performance counters are allocated on distinct
  1474     // Hopefully the performance counters are allocated on distinct
  1493     // cache lines to avoid false sharing on MP systems ...
  1475     // cache lines to avoid false sharing on MP systems ...
  1494     OM_PERFDATA_OP(Inflations, inc());
  1476     OM_PERFDATA_OP(Inflations, inc());
  1495     if (log_is_enabled(Debug, monitorinflation)) {
  1477     if (log_is_enabled(Debug, monitorinflation)) {
  1496       if (object->is_instance()) {
  1478       if (object->is_instance()) {
  1497         ResourceMark rm;
  1479         ResourceMark rm(Self);
  1498         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
  1480         log_debug(monitorinflation)("inflate(neutral): "
       
  1481                                     "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'",
  1499                                     p2i(object), p2i(object->mark()),
  1482                                     p2i(object), p2i(object->mark()),
  1500                                     object->klass()->external_name());
  1483                                     object->klass()->external_name());
  1501       }
  1484       }
  1502     }
  1485     }
  1503     if (event.should_commit()) {
  1486     if (event.should_commit()) {
  1547     // It's idle - scavenge and return to the global free list
  1530     // It's idle - scavenge and return to the global free list
  1548     // plain old deflation ...
  1531     // plain old deflation ...
  1549     if (log_is_enabled(Debug, monitorinflation)) {
  1532     if (log_is_enabled(Debug, monitorinflation)) {
  1550       if (obj->is_instance()) {
  1533       if (obj->is_instance()) {
  1551         ResourceMark rm;
  1534         ResourceMark rm;
  1552         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
  1535         log_debug(monitorinflation)("deflate_monitor: "
  1553                                     "mark " INTPTR_FORMAT " , type %s",
  1536                                     "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'",
  1554                                     p2i(obj), p2i(obj->mark()),
  1537                                     p2i(obj), p2i(obj->mark()),
  1555                                     obj->klass()->external_name());
  1538                                     obj->klass()->external_name());
  1556       }
  1539       }
  1557     }
  1540     }
  1558 
  1541 
  1575   return deflated;
  1558   return deflated;
  1576 }
  1559 }
  1577 
  1560 
  1578 // Walk a given monitor list, and deflate idle monitors
  1561 // Walk a given monitor list, and deflate idle monitors
  1579 // The given list could be a per-thread list or a global list
  1562 // The given list could be a per-thread list or a global list
  1580 // Caller acquires gListLock.
  1563 // Caller acquires gListLock as needed.
  1581 //
  1564 //
  1582 // In the case of parallel processing of thread local monitor lists,
  1565 // In the case of parallel processing of thread local monitor lists,
  1583 // work is done by Threads::parallel_threads_do() which ensures that
  1566 // work is done by Threads::parallel_threads_do() which ensures that
  1584 // each Java thread is processed by exactly one worker thread, and
  1567 // each Java thread is processed by exactly one worker thread, and
  1585 // thus avoid conflicts that would arise when worker threads would
  1568 // thus avoid conflicts that would arise when worker threads would
  1619 }
  1602 }
  1620 
  1603 
  1621 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1604 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1622   counters->nInuse = 0;            // currently associated with objects
  1605   counters->nInuse = 0;            // currently associated with objects
  1623   counters->nInCirculation = 0;    // extant
  1606   counters->nInCirculation = 0;    // extant
  1624   counters->nScavenged = 0;        // reclaimed
  1607   counters->nScavenged = 0;        // reclaimed (global and per-thread)
  1625   counters->perThreadTimes = 0.0;  // per-thread scavenge times
  1608   counters->perThreadTimes = 0.0;  // per-thread scavenge times
  1626 }
  1609 }
  1627 
  1610 
  1628 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1611 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
  1629   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1612   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1633   ObjectMonitor * freeTailp = NULL;
  1616   ObjectMonitor * freeTailp = NULL;
  1634 
  1617 
  1635   // Prevent omFlush from changing mids in Thread dtor's during deflation
  1618   // Prevent omFlush from changing mids in Thread dtor's during deflation
  1636   // And in case the vm thread is acquiring a lock during a safepoint
  1619   // And in case the vm thread is acquiring a lock during a safepoint
  1637   // See e.g. 6320749
  1620   // See e.g. 6320749
  1638   Thread::muxAcquire(&gListLock, "scavenge - return");
  1621   Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
  1639 
  1622 
  1640   // Note: the thread-local monitors lists get deflated in
  1623   // Note: the thread-local monitors lists get deflated in
  1641   // a separate pass. See deflate_thread_local_monitors().
  1624   // a separate pass. See deflate_thread_local_monitors().
  1642 
  1625 
  1643   // For moribund threads, scan gOmInUseList
  1626   // For moribund threads, scan gOmInUseList
  1698 
  1681 
  1699   int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
  1682   int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
  1700 
  1683 
  1701   timer.stop();
  1684   timer.stop();
  1702 
  1685 
  1703   Thread::muxAcquire(&gListLock, "scavenge - return");
  1686   Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
  1704 
  1687 
  1705   // Adjust counters
  1688   // Adjust counters
  1706   counters->nInCirculation += thread->omInUseCount;
  1689   counters->nInCirculation += thread->omInUseCount;
  1707   thread->omInUseCount -= deflated_count;
  1690   thread->omInUseCount -= deflated_count;
  1708   counters->nScavenged += deflated_count;
  1691   counters->nScavenged += deflated_count;