src/hotspot/share/runtime/biasedLocking.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 55005 9b70ebd131b4
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
    27 #include "jfr/jfrEvents.hpp"
    27 #include "jfr/jfrEvents.hpp"
    28 #include "jfr/support/jfrThreadId.hpp"
    28 #include "jfr/support/jfrThreadId.hpp"
    29 #include "logging/log.hpp"
    29 #include "logging/log.hpp"
    30 #include "memory/resourceArea.hpp"
    30 #include "memory/resourceArea.hpp"
    31 #include "oops/klass.inline.hpp"
    31 #include "oops/klass.inline.hpp"
    32 #include "oops/markOop.hpp"
    32 #include "oops/markWord.hpp"
    33 #include "oops/oop.inline.hpp"
    33 #include "oops/oop.inline.hpp"
    34 #include "runtime/atomic.hpp"
    34 #include "runtime/atomic.hpp"
    35 #include "runtime/basicLock.hpp"
    35 #include "runtime/basicLock.hpp"
    36 #include "runtime/biasedLocking.hpp"
    36 #include "runtime/biasedLocking.hpp"
    37 #include "runtime/handles.inline.hpp"
    37 #include "runtime/handles.inline.hpp"
    43 
    43 
    44 
    44 
    45 static bool _biased_locking_enabled = false;
    45 static bool _biased_locking_enabled = false;
    46 BiasedLockingCounters BiasedLocking::_counters;
    46 BiasedLockingCounters BiasedLocking::_counters;
    47 
    47 
    48 static GrowableArray<Handle>*  _preserved_oop_stack  = NULL;
    48 static GrowableArray<Handle>*   _preserved_oop_stack  = NULL;
    49 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
    49 static GrowableArray<markWord>* _preserved_mark_stack = NULL;
    50 
    50 
    51 static void enable_biased_locking(InstanceKlass* k) {
    51 static void enable_biased_locking(InstanceKlass* k) {
    52   k->set_prototype_header(markOopDesc::biased_locking_prototype());
    52   k->set_prototype_header(markWord::biased_locking_prototype());
       
    53 }
       
    54 
       
    55 static void enable_biased_locking() {
       
    56   _biased_locking_enabled = true;
       
    57   log_info(biasedlocking)("Biased locking enabled");
    53 }
    58 }
    54 
    59 
    55 class VM_EnableBiasedLocking: public VM_Operation {
    60 class VM_EnableBiasedLocking: public VM_Operation {
    56  private:
       
    57   bool _is_cheap_allocated;
       
    58  public:
    61  public:
    59   VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
    62   VM_EnableBiasedLocking() {}
    60   VMOp_Type type() const          { return VMOp_EnableBiasedLocking; }
    63   VMOp_Type type() const          { return VMOp_EnableBiasedLocking; }
    61   Mode evaluation_mode() const    { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
    64   Mode evaluation_mode() const    { return _async_safepoint; }
    62   bool is_cheap_allocated() const { return _is_cheap_allocated; }
    65   bool is_cheap_allocated() const { return true; }
    63 
    66 
    64   void doit() {
    67   void doit() {
    65     // Iterate the class loader data dictionaries enabling biased locking for all
    68     // Iterate the class loader data dictionaries enabling biased locking for all
    66     // currently loaded classes.
    69     // currently loaded classes.
    67     ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking);
    70     ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking);
    68     // Indicate that future instances should enable it as well
    71     // Indicate that future instances should enable it as well
    69     _biased_locking_enabled = true;
    72     enable_biased_locking();
    70 
       
    71     log_info(biasedlocking)("Biased locking enabled");
       
    72   }
    73   }
    73 
    74 
    74   bool allow_nested_vm_operations() const        { return false; }
    75   bool allow_nested_vm_operations() const        { return false; }
    75 };
    76 };
    76 
    77 
    81   EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
    82   EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
    82 
    83 
    83   virtual void task() {
    84   virtual void task() {
    84     // Use async VM operation to avoid blocking the Watcher thread.
    85     // Use async VM operation to avoid blocking the Watcher thread.
    85     // VM Thread will free C heap storage.
    86     // VM Thread will free C heap storage.
    86     VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
    87     VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking();
    87     VMThread::execute(op);
    88     VMThread::execute(op);
    88 
    89 
    89     // Reclaim our storage and disenroll ourself
    90     // Reclaim our storage and disenroll ourself
    90     delete this;
    91     delete this;
    91   }
    92   }
    92 };
    93 };
    93 
    94 
    94 
    95 
    95 void BiasedLocking::init() {
    96 void BiasedLocking::init() {
    96   // If biased locking is enabled, schedule a task to fire a few
    97   // If biased locking is enabled and BiasedLockingStartupDelay is set,
    97   // seconds into the run which turns on biased locking for all
    98   // schedule a task to fire after the specified delay which turns on
    98   // currently loaded classes as well as future ones. This is a
    99   // biased locking for all currently loaded classes as well as future
    99   // workaround for startup time regressions due to a large number of
   100   // ones. This could be a workaround for startup time regressions
   100   // safepoints being taken during VM startup for bias revocation.
   101   // due to large number of safepoints being taken during VM startup for
   101   // Ideally we would have a lower cost for individual bias revocation
   102   // bias revocation.
   102   // and not need a mechanism like this.
       
   103   if (UseBiasedLocking) {
   103   if (UseBiasedLocking) {
   104     if (BiasedLockingStartupDelay > 0) {
   104     if (BiasedLockingStartupDelay > 0) {
   105       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
   105       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
   106       task->enroll();
   106       task->enroll();
   107     } else {
   107     } else {
   108       VM_EnableBiasedLocking op(false);
   108       enable_biased_locking();
   109       VMThread::execute(&op);
       
   110     }
   109     }
   111   }
   110   }
   112 }
   111 }
   113 
   112 
   114 
   113 
   115 bool BiasedLocking::enabled() {
   114 bool BiasedLocking::enabled() {
   116   return _biased_locking_enabled;
   115   assert(UseBiasedLocking, "precondition");
   117 }
   116   // We check "BiasedLockingStartupDelay == 0" here to cover the
       
   117   // possibility of calls to BiasedLocking::enabled() before
       
   118   // BiasedLocking::init().
       
   119   return _biased_locking_enabled || BiasedLockingStartupDelay == 0;
       
   120 }
       
   121 
   118 
   122 
   119 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
   123 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
   120 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
   124 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
   121   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
   125   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
   122   if (info != NULL) {
   126   if (info != NULL) {
   148 
   152 
   149   thread->set_cached_monitor_info(info);
   153   thread->set_cached_monitor_info(info);
   150   return info;
   154   return info;
   151 }
   155 }
   152 
   156 
       
   157 
   153 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
   158 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
   154 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
   159 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
   155 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
   160 void BiasedLocking::single_revoke_at_safepoint(oop obj, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
   156   markOop mark = obj->mark();
   161   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
   157   if (!mark->has_bias_pattern()) {
   162   assert(Thread::current()->is_VM_thread(), "must be VMThread");
       
   163 
       
   164   markWord mark = obj->mark();
       
   165   if (!mark.has_bias_pattern()) {
   158     if (log_is_enabled(Info, biasedlocking)) {
   166     if (log_is_enabled(Info, biasedlocking)) {
   159       ResourceMark rm;
   167       ResourceMark rm;
   160       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
   168       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
   161                               ", mark " INTPTR_FORMAT ", type %s"
   169                               ", mark " INTPTR_FORMAT ", type %s"
   162                               ", requesting thread " INTPTR_FORMAT
   170                               ", requesting thread " INTPTR_FORMAT
   163                               " because it's no longer biased)",
   171                               " because it's no longer biased)",
   164                               p2i((void *)obj), (intptr_t) mark,
   172                               p2i((void *)obj), mark.value(),
   165                               obj->klass()->external_name(),
   173                               obj->klass()->external_name(),
   166                               (intptr_t) requesting_thread);
   174                               (intptr_t) requesting_thread);
   167     }
   175     }
   168     return BiasedLocking::NOT_BIASED;
   176     return;
   169   }
   177   }
   170 
   178 
   171   uint age = mark->age();
   179   uint age = mark.age();
   172   markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
   180   markWord unbiased_prototype = markWord::prototype().set_age(age);
   173   markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
       
   174 
   181 
   175   // Log at "info" level if not bulk, else "trace" level
   182   // Log at "info" level if not bulk, else "trace" level
   176   if (!is_bulk) {
   183   if (!is_bulk) {
   177     ResourceMark rm;
   184     ResourceMark rm;
   178     log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
   185     log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
   179                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
   186                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
   180                             ", allow rebias %d, requesting thread " INTPTR_FORMAT,
   187                             ", requesting thread " INTPTR_FORMAT,
   181                             p2i((void *)obj),
   188                             p2i((void *)obj),
   182                             (intptr_t) mark,
   189                             mark.value(),
   183                             obj->klass()->external_name(),
   190                             obj->klass()->external_name(),
   184                             (intptr_t) obj->klass()->prototype_header(),
   191                             obj->klass()->prototype_header().value(),
   185                             (allow_rebias ? 1 : 0),
       
   186                             (intptr_t) requesting_thread);
   192                             (intptr_t) requesting_thread);
   187   } else {
   193   } else {
   188     ResourceMark rm;
   194     ResourceMark rm;
   189     log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
   195     log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
   190                              INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
   196                              INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
   191                              " , allow rebias %d , requesting thread " INTPTR_FORMAT,
   197                              " , requesting thread " INTPTR_FORMAT,
   192                              p2i((void *)obj),
   198                              p2i((void *)obj),
   193                              (intptr_t) mark,
   199                              mark.value(),
   194                              obj->klass()->external_name(),
   200                              obj->klass()->external_name(),
   195                              (intptr_t) obj->klass()->prototype_header(),
   201                              obj->klass()->prototype_header().value(),
   196                              (allow_rebias ? 1 : 0),
       
   197                              (intptr_t) requesting_thread);
   202                              (intptr_t) requesting_thread);
   198   }
   203   }
   199 
   204 
   200   JavaThread* biased_thread = mark->biased_locker();
   205   JavaThread* biased_thread = mark.biased_locker();
   201   if (biased_thread == NULL) {
   206   if (biased_thread == NULL) {
   202     // Object is anonymously biased. We can get here if, for
   207     // Object is anonymously biased. We can get here if, for
   203     // example, we revoke the bias due to an identity hash code
   208     // example, we revoke the bias due to an identity hash code
   204     // being computed for an object.
   209     // being computed for an object.
   205     if (!allow_rebias) {
   210     obj->set_mark(unbiased_prototype);
   206       obj->set_mark(unbiased_prototype);
   211 
   207     }
       
   208     // Log at "info" level if not bulk, else "trace" level
   212     // Log at "info" level if not bulk, else "trace" level
   209     if (!is_bulk) {
   213     if (!is_bulk) {
   210       log_info(biasedlocking)("  Revoked bias of anonymously-biased object");
   214       log_info(biasedlocking)("  Revoked bias of anonymously-biased object");
   211     } else {
   215     } else {
   212       log_trace(biasedlocking)("  Revoked bias of anonymously-biased object");
   216       log_trace(biasedlocking)("  Revoked bias of anonymously-biased object");
   213     }
   217     }
   214     return BiasedLocking::BIAS_REVOKED;
   218     return;
   215   }
   219   }
   216 
   220 
   217   // Handle case where the thread toward which the object was biased has exited
   221   // Handle case where the thread toward which the object was biased has exited
   218   bool thread_is_alive = false;
   222   bool thread_is_alive = false;
   219   if (requesting_thread == biased_thread) {
   223   if (requesting_thread == biased_thread) {
   221   } else {
   225   } else {
   222     ThreadsListHandle tlh;
   226     ThreadsListHandle tlh;
   223     thread_is_alive = tlh.includes(biased_thread);
   227     thread_is_alive = tlh.includes(biased_thread);
   224   }
   228   }
   225   if (!thread_is_alive) {
   229   if (!thread_is_alive) {
   226     if (allow_rebias) {
   230     obj->set_mark(unbiased_prototype);
   227       obj->set_mark(biased_prototype);
       
   228     } else {
       
   229       obj->set_mark(unbiased_prototype);
       
   230     }
       
   231     // Log at "info" level if not bulk, else "trace" level
   231     // Log at "info" level if not bulk, else "trace" level
   232     if (!is_bulk) {
   232     if (!is_bulk) {
   233       log_info(biasedlocking)("  Revoked bias of object biased toward dead thread ("
   233       log_info(biasedlocking)("  Revoked bias of object biased toward dead thread ("
   234                               PTR_FORMAT ")", p2i(biased_thread));
   234                               PTR_FORMAT ")", p2i(biased_thread));
   235     } else {
   235     } else {
   236       log_trace(biasedlocking)("  Revoked bias of object biased toward dead thread ("
   236       log_trace(biasedlocking)("  Revoked bias of object biased toward dead thread ("
   237                                PTR_FORMAT ")", p2i(biased_thread));
   237                                PTR_FORMAT ")", p2i(biased_thread));
   238     }
   238     }
   239     return BiasedLocking::BIAS_REVOKED;
   239     return;
   240   }
   240   }
   241 
   241 
   242   // Log at "info" level if not bulk, else "trace" level
   242   // Log at "info" level if not bulk, else "trace" level
   243   if (!is_bulk) {
   243   if (!is_bulk) {
   244     log_info(biasedlocking)("  Revoked bias of object biased toward live thread ("
   244     log_info(biasedlocking)("  Revoked bias of object biased toward live thread ("
   255   // or unbiased state.
   255   // or unbiased state.
   256   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
   256   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
   257   BasicLock* highest_lock = NULL;
   257   BasicLock* highest_lock = NULL;
   258   for (int i = 0; i < cached_monitor_info->length(); i++) {
   258   for (int i = 0; i < cached_monitor_info->length(); i++) {
   259     MonitorInfo* mon_info = cached_monitor_info->at(i);
   259     MonitorInfo* mon_info = cached_monitor_info->at(i);
   260     if (oopDesc::equals(mon_info->owner(), obj)) {
   260     if (mon_info->owner() == obj) {
   261       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
   261       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
   262                                p2i((void *) mon_info->owner()),
   262                                p2i((void *) mon_info->owner()),
   263                                p2i((void *) obj));
   263                                p2i((void *) obj));
   264       // Assume recursive case and fix up highest lock later
   264       // Assume recursive case and fix up highest lock below
   265       markOop mark = markOopDesc::encode((BasicLock*) NULL);
   265       markWord mark = markWord::encode((BasicLock*) NULL);
   266       highest_lock = mon_info->lock();
   266       highest_lock = mon_info->lock();
   267       highest_lock->set_displaced_header(mark);
   267       highest_lock->set_displaced_header(mark);
   268     } else {
   268     } else {
   269       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
   269       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
   270                                p2i((void *) mon_info->owner()),
   270                                p2i((void *) mon_info->owner()),
   274   if (highest_lock != NULL) {
   274   if (highest_lock != NULL) {
   275     // Fix up highest lock to contain displaced header and point
   275     // Fix up highest lock to contain displaced header and point
   276     // object at it
   276     // object at it
   277     highest_lock->set_displaced_header(unbiased_prototype);
   277     highest_lock->set_displaced_header(unbiased_prototype);
   278     // Reset object header to point to displaced mark.
   278     // Reset object header to point to displaced mark.
   279     // Must release storing the lock address for platforms without TSO
   279     // Must release store the lock address for platforms without TSO
   280     // ordering (e.g. ppc).
   280     // ordering (e.g. ppc).
   281     obj->release_set_mark(markOopDesc::encode(highest_lock));
   281     obj->release_set_mark(markWord::encode(highest_lock));
   282     assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
   282     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
   283     // Log at "info" level if not bulk, else "trace" level
   283     // Log at "info" level if not bulk, else "trace" level
   284     if (!is_bulk) {
   284     if (!is_bulk) {
   285       log_info(biasedlocking)("  Revoked bias of currently-locked object");
   285       log_info(biasedlocking)("  Revoked bias of currently-locked object");
   286     } else {
   286     } else {
   287       log_trace(biasedlocking)("  Revoked bias of currently-locked object");
   287       log_trace(biasedlocking)("  Revoked bias of currently-locked object");
   291     if (!is_bulk) {
   291     if (!is_bulk) {
   292       log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
   292       log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
   293     } else {
   293     } else {
   294       log_trace(biasedlocking)("  Revoked bias of currently-unlocked object");
   294       log_trace(biasedlocking)("  Revoked bias of currently-unlocked object");
   295     }
   295     }
   296     if (allow_rebias) {
   296     // Store the unlocked value into the object's header.
   297       obj->set_mark(biased_prototype);
   297     obj->set_mark(unbiased_prototype);
   298     } else {
       
   299       // Store the unlocked value into the object's header.
       
   300       obj->set_mark(unbiased_prototype);
       
   301     }
       
   302   }
   298   }
   303 
   299 
   304   // If requested, return information on which thread held the bias
   300   // If requested, return information on which thread held the bias
   305   if (biased_locker != NULL) {
   301   if (biased_locker != NULL) {
   306     *biased_locker = biased_thread;
   302     *biased_locker = biased_thread;
   307   }
   303   }
   308 
       
   309   return BiasedLocking::BIAS_REVOKED;
       
   310 }
   304 }
   311 
   305 
   312 
   306 
   313 enum HeuristicsResult {
   307 enum HeuristicsResult {
   314   HR_NOT_BIASED    = 1,
   308   HR_NOT_BIASED    = 1,
   316   HR_BULK_REBIAS   = 3,
   310   HR_BULK_REBIAS   = 3,
   317   HR_BULK_REVOKE   = 4
   311   HR_BULK_REVOKE   = 4
   318 };
   312 };
   319 
   313 
   320 
   314 
   321 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
   315 static HeuristicsResult update_heuristics(oop o) {
   322   markOop mark = o->mark();
   316   markWord mark = o->mark();
   323   if (!mark->has_bias_pattern()) {
   317   if (!mark.has_bias_pattern()) {
   324     return HR_NOT_BIASED;
   318     return HR_NOT_BIASED;
   325   }
   319   }
   326 
   320 
   327   // Heuristics to attempt to throttle the number of revocations.
   321   // Heuristics to attempt to throttle the number of revocations.
   328   // Stages:
   322   // Stages:
   369 
   363 
   370   return HR_SINGLE_REVOKE;
   364   return HR_SINGLE_REVOKE;
   371 }
   365 }
   372 
   366 
   373 
   367 
   374 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
   368 void BiasedLocking::bulk_revoke_at_safepoint(oop o, bool bulk_rebias, JavaThread* requesting_thread) {
   375                                                                    bool bulk_rebias,
       
   376                                                                    bool attempt_rebias_of_object,
       
   377                                                                    JavaThread* requesting_thread) {
       
   378   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
   369   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
       
   370   assert(Thread::current()->is_VM_thread(), "must be VMThread");
   379 
   371 
   380   log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
   372   log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
   381                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
   373                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
   382                           (bulk_rebias ? "rebias" : "revoke"),
   374                           (bulk_rebias ? "rebias" : "revoke"),
   383                           p2i((void *) o),
   375                           p2i((void *) o),
   384                           (intptr_t) o->mark(),
   376                           o->mark().value(),
   385                           o->klass()->external_name());
   377                           o->klass()->external_name());
   386 
   378 
   387   jlong cur_time = os::javaTimeMillis();
   379   jlong cur_time = os::javaTimeMillis();
   388   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
   380   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
   389 
       
   390 
   381 
   391   Klass* k_o = o->klass();
   382   Klass* k_o = o->klass();
   392   Klass* klass = k_o;
   383   Klass* klass = k_o;
   393 
   384 
   394   {
   385   {
   403 
   394 
   404       // If the prototype header doesn't have the bias pattern, don't
   395       // If the prototype header doesn't have the bias pattern, don't
   405       // try to update the epoch -- assume another VM operation came in
   396       // try to update the epoch -- assume another VM operation came in
   406       // and reset the header to the unbiased state, which will
   397       // and reset the header to the unbiased state, which will
   407       // implicitly cause all existing biases to be revoked
   398       // implicitly cause all existing biases to be revoked
   408       if (klass->prototype_header()->has_bias_pattern()) {
   399       if (klass->prototype_header().has_bias_pattern()) {
   409         int prev_epoch = klass->prototype_header()->bias_epoch();
   400         int prev_epoch = klass->prototype_header().bias_epoch();
   410         klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
   401         klass->set_prototype_header(klass->prototype_header().incr_bias_epoch());
   411         int cur_epoch = klass->prototype_header()->bias_epoch();
   402         int cur_epoch = klass->prototype_header().bias_epoch();
   412 
   403 
   413         // Now walk all threads' stacks and adjust epochs of any biased
   404         // Now walk all threads' stacks and adjust epochs of any biased
   414         // and locked objects of this data type we encounter
   405         // and locked objects of this data type we encounter
   415         for (; JavaThread *thr = jtiwh.next(); ) {
   406         for (; JavaThread *thr = jtiwh.next(); ) {
   416           GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
   407           GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
   417           for (int i = 0; i < cached_monitor_info->length(); i++) {
   408           for (int i = 0; i < cached_monitor_info->length(); i++) {
   418             MonitorInfo* mon_info = cached_monitor_info->at(i);
   409             MonitorInfo* mon_info = cached_monitor_info->at(i);
   419             oop owner = mon_info->owner();
   410             oop owner = mon_info->owner();
   420             markOop mark = owner->mark();
   411             markWord mark = owner->mark();
   421             if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
   412             if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
   422               // We might have encountered this object already in the case of recursive locking
   413               // We might have encountered this object already in the case of recursive locking
   423               assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
   414               assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
   424               owner->set_mark(mark->set_bias_epoch(cur_epoch));
   415               owner->set_mark(mark.set_bias_epoch(cur_epoch));
   425             }
   416             }
   426           }
   417           }
   427         }
   418         }
   428       }
   419       }
   429 
   420 
   430       // At this point we're done. All we have to do is potentially
   421       // At this point we're done. All we have to do is potentially
   431       // adjust the header of the given object to revoke its bias.
   422       // adjust the header of the given object to revoke its bias.
   432       revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
   423       single_revoke_at_safepoint(o, true, requesting_thread, NULL);
   433     } else {
   424     } else {
   434       if (log_is_enabled(Info, biasedlocking)) {
   425       if (log_is_enabled(Info, biasedlocking)) {
   435         ResourceMark rm;
   426         ResourceMark rm;
   436         log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
   427         log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
   437       }
   428       }
   438 
   429 
   439       // Disable biased locking for this data type. Not only will this
   430       // Disable biased locking for this data type. Not only will this
   440       // cause future instances to not be biased, but existing biased
   431       // cause future instances to not be biased, but existing biased
   441       // instances will notice that this implicitly caused their biases
   432       // instances will notice that this implicitly caused their biases
   442       // to be revoked.
   433       // to be revoked.
   443       klass->set_prototype_header(markOopDesc::prototype());
   434       klass->set_prototype_header(markWord::prototype());
   444 
   435 
   445       // Now walk all threads' stacks and forcibly revoke the biases of
   436       // Now walk all threads' stacks and forcibly revoke the biases of
   446       // any locked and biased objects of this data type we encounter.
   437       // any locked and biased objects of this data type we encounter.
   447       for (; JavaThread *thr = jtiwh.next(); ) {
   438       for (; JavaThread *thr = jtiwh.next(); ) {
   448         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
   439         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
   449         for (int i = 0; i < cached_monitor_info->length(); i++) {
   440         for (int i = 0; i < cached_monitor_info->length(); i++) {
   450           MonitorInfo* mon_info = cached_monitor_info->at(i);
   441           MonitorInfo* mon_info = cached_monitor_info->at(i);
   451           oop owner = mon_info->owner();
   442           oop owner = mon_info->owner();
   452           markOop mark = owner->mark();
   443           markWord mark = owner->mark();
   453           if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
   444           if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
   454             revoke_bias(owner, false, true, requesting_thread, NULL);
   445             single_revoke_at_safepoint(owner, true, requesting_thread, NULL);
   455           }
   446           }
   456         }
   447         }
   457       }
   448       }
   458 
   449 
   459       // Must force the bias of the passed object to be forcibly revoked
   450       // Must force the bias of the passed object to be forcibly revoked
   460       // as well to ensure guarantees to callers
   451       // as well to ensure guarantees to callers
   461       revoke_bias(o, false, true, requesting_thread, NULL);
   452       single_revoke_at_safepoint(o, true, requesting_thread, NULL);
   462     }
   453     }
   463   } // ThreadsListHandle is destroyed here.
   454   } // ThreadsListHandle is destroyed here.
   464 
   455 
   465   log_info(biasedlocking)("* Ending bulk revocation");
   456   log_info(biasedlocking)("* Ending bulk revocation");
   466 
   457 
   467   BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
   458   assert(!o->mark().has_bias_pattern(), "bug in bulk bias revocation");
   468 
   459 }
   469   if (attempt_rebias_of_object &&
   460 
   470       o->mark()->has_bias_pattern() &&
   461 
   471       klass->prototype_header()->has_bias_pattern()) {
   462 static void clean_up_cached_monitor_info(JavaThread* thread = NULL) {
   472     markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
   463   if (thread != NULL) {
   473                                            klass->prototype_header()->bias_epoch());
   464     thread->set_cached_monitor_info(NULL);
   474     o->set_mark(new_mark);
   465   } else {
   475     status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
   466     // Walk the thread list clearing out the cached monitors
   476     log_info(biasedlocking)("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
   467     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
   477   }
   468       thr->set_cached_monitor_info(NULL);
   478 
   469     }
   479   assert(!o->mark()->has_bias_pattern() ||
   470   }
   480          (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
   471 }
   481          "bug in bulk bias revocation");
   472 
   482 
   473 
   483   return status_code;
   474 class VM_BulkRevokeBias : public VM_Operation {
   484 }
   475 private:
   485 
   476   Handle* _obj;
   486 
   477   JavaThread* _requesting_thread;
   487 static void clean_up_cached_monitor_info() {
   478   bool _bulk_rebias;
   488   // Walk the thread list clearing out the cached monitors
   479   uint64_t _safepoint_id;
   489   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
   480 
   490     thr->set_cached_monitor_info(NULL);
   481 public:
   491   }
   482   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
   492 }
   483                     bool bulk_rebias)
   493 
   484     : _obj(obj)
   494 
   485     , _requesting_thread(requesting_thread)
   495 class VM_RevokeBias : public VM_Operation {
   486     , _bulk_rebias(bulk_rebias)
       
   487     , _safepoint_id(0) {}
       
   488 
       
   489   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
       
   490 
       
   491   virtual void doit() {
       
   492     BiasedLocking::bulk_revoke_at_safepoint((*_obj)(), _bulk_rebias, _requesting_thread);
       
   493     _safepoint_id = SafepointSynchronize::safepoint_id();
       
   494     clean_up_cached_monitor_info();
       
   495   }
       
   496 
       
   497   bool is_bulk_rebias() const {
       
   498     return _bulk_rebias;
       
   499   }
       
   500 
       
   501   uint64_t safepoint_id() const {
       
   502     return _safepoint_id;
       
   503   }
       
   504 };
       
   505 
       
   506 
       
   507 class RevokeOneBias : public ThreadClosure {
   496 protected:
   508 protected:
   497   Handle* _obj;
   509   Handle _obj;
   498   GrowableArray<Handle>* _objs;
       
   499   JavaThread* _requesting_thread;
   510   JavaThread* _requesting_thread;
       
   511   JavaThread* _biased_locker;
   500   BiasedLocking::Condition _status_code;
   512   BiasedLocking::Condition _status_code;
   501   traceid _biased_locker_id;
   513   traceid _biased_locker_id;
   502   uint64_t _safepoint_id;
       
   503 
   514 
   504 public:
   515 public:
   505   VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
   516   RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker)
   506     : _obj(obj)
   517     : _obj(obj)
   507     , _objs(NULL)
       
   508     , _requesting_thread(requesting_thread)
   518     , _requesting_thread(requesting_thread)
       
   519     , _biased_locker(biased_locker)
   509     , _status_code(BiasedLocking::NOT_BIASED)
   520     , _status_code(BiasedLocking::NOT_BIASED)
   510     , _biased_locker_id(0)
   521     , _biased_locker_id(0) {}
   511     , _safepoint_id(0) {}
   522 
   512 
   523   void do_thread(Thread* target) {
   513   VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
   524     assert(target == _biased_locker, "Wrong thread");
   514     : _obj(NULL)
   525 
   515     , _objs(objs)
   526     oop o = _obj();
   516     , _requesting_thread(requesting_thread)
   527     markWord mark = o->mark();
   517     , _status_code(BiasedLocking::NOT_BIASED)
   528 
   518     , _biased_locker_id(0)
   529     if (!mark.has_bias_pattern()) {
   519     , _safepoint_id(0) {}
   530       return;
   520 
   531     }
   521   virtual VMOp_Type type() const { return VMOp_RevokeBias; }
   532 
   522 
   533     markWord prototype = o->klass()->prototype_header();
   523   virtual bool doit_prologue() {
   534     if (!prototype.has_bias_pattern()) {
   524     // Verify that there is actual work to do since the callers just
   535       // This object has a stale bias from before the handshake
   525     // give us locked object(s). If we don't find any biased objects
   536       // was requested. If we fail this race, the object's bias
   526     // there is nothing to do and we avoid a safepoint.
   537       // has been revoked by another thread so we simply return.
   527     if (_obj != NULL) {
   538       markWord biased_value = mark;
   528       markOop mark = (*_obj)()->mark();
   539       mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
   529       if (mark->has_bias_pattern()) {
   540       assert(!o->mark().has_bias_pattern(), "even if we raced, should still be revoked");
   530         return true;
   541       if (biased_value == mark) {
   531       }
   542         _status_code = BiasedLocking::BIAS_REVOKED;
   532     } else {
   543       }
   533       for ( int i = 0 ; i < _objs->length(); i++ ) {
   544       return;
   534         markOop mark = (_objs->at(i))()->mark();
   545     }
   535         if (mark->has_bias_pattern()) {
   546 
   536           return true;
   547     if (_biased_locker == mark.biased_locker()) {
       
   548       if (mark.bias_epoch() == prototype.bias_epoch()) {
       
   549         // Epoch is still valid. This means biaser could be currently
       
   550         // synchronized on this object. We must walk its stack looking
       
   551         // for monitor records associated with this object and change
       
   552         // them to be stack locks if any are found.
       
   553         ResourceMark rm;
       
   554         BiasedLocking::walk_stack_and_revoke(o, _biased_locker);
       
   555         _biased_locker->set_cached_monitor_info(NULL);
       
   556         assert(!o->mark().has_bias_pattern(), "invariant");
       
   557         _biased_locker_id = JFR_THREAD_ID(_biased_locker);
       
   558         _status_code = BiasedLocking::BIAS_REVOKED;
       
   559         return;
       
   560       } else {
       
   561         markWord biased_value = mark;
       
   562         mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
       
   563         if (mark == biased_value || !mark.has_bias_pattern()) {
       
   564           assert(!o->mark().has_bias_pattern(), "should be revoked");
       
   565           _status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED;
       
   566           return;
   537         }
   567         }
   538       }
   568       }
   539     }
   569     }
   540     return false;
   570 
   541   }
   571     _status_code = BiasedLocking::NOT_REVOKED;
   542 
       
   543   virtual void doit() {
       
   544     if (_obj != NULL) {
       
   545       log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:");
       
   546       JavaThread* biased_locker = NULL;
       
   547       _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
       
   548       if (biased_locker != NULL) {
       
   549         _biased_locker_id = JFR_THREAD_ID(biased_locker);
       
   550       }
       
   551       _safepoint_id = SafepointSynchronize::safepoint_counter();
       
   552       clean_up_cached_monitor_info();
       
   553       return;
       
   554     } else {
       
   555       log_info(biasedlocking)("Revoking bias with global safepoint:");
       
   556       BiasedLocking::revoke_at_safepoint(_objs);
       
   557     }
       
   558   }
   572   }
   559 
   573 
   560   BiasedLocking::Condition status_code() const {
   574   BiasedLocking::Condition status_code() const {
   561     return _status_code;
   575     return _status_code;
   562   }
   576   }
   563 
   577 
   564   traceid biased_locker() const {
   578   traceid biased_locker() const {
   565     return _biased_locker_id;
   579     return _biased_locker_id;
   566   }
   580   }
   567 
       
   568   uint64_t safepoint_id() const {
       
   569     return _safepoint_id;
       
   570   }
       
   571 };
   581 };
   572 
   582 
   573 
       
   574 class VM_BulkRevokeBias : public VM_RevokeBias {
       
   575 private:
       
   576   bool _bulk_rebias;
       
   577   bool _attempt_rebias_of_object;
       
   578 
       
   579 public:
       
   580   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
       
   581                     bool bulk_rebias,
       
   582                     bool attempt_rebias_of_object)
       
   583     : VM_RevokeBias(obj, requesting_thread)
       
   584     , _bulk_rebias(bulk_rebias)
       
   585     , _attempt_rebias_of_object(attempt_rebias_of_object) {}
       
   586 
       
   587   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
       
   588   virtual bool doit_prologue()   { return true; }
       
   589 
       
   590   virtual void doit() {
       
   591     _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
       
   592     _safepoint_id = SafepointSynchronize::safepoint_counter();
       
   593     clean_up_cached_monitor_info();
       
   594   }
       
   595 
       
   596   bool is_bulk_rebias() const {
       
   597     return _bulk_rebias;
       
   598   }
       
   599 };
       
   600 
   583 
   601 static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
   584 static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
   602   assert(event != NULL, "invariant");
   585   assert(event != NULL, "invariant");
   603   assert(k != NULL, "invariant");
   586   assert(k != NULL, "invariant");
   604   assert(event->should_commit(), "invariant");
   587   assert(event->should_commit(), "invariant");
   605   event->set_lockClass(k);
   588   event->set_lockClass(k);
   606   event->commit();
   589   event->commit();
   607 }
   590 }
   608 
   591 
   609 static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, VM_RevokeBias* op) {
   592 static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, RevokeOneBias* op) {
   610   assert(event != NULL, "invariant");
   593   assert(event != NULL, "invariant");
   611   assert(k != NULL, "invariant");
   594   assert(k != NULL, "invariant");
   612   assert(op != NULL, "invariant");
   595   assert(op != NULL, "invariant");
   613   assert(event->should_commit(), "invariant");
   596   assert(event->should_commit(), "invariant");
   614   event->set_lockClass(k);
   597   event->set_lockClass(k);
   615   event->set_safepointId(op->safepoint_id());
   598   event->set_safepointId(0);
   616   event->set_previousOwner(op->biased_locker());
   599   event->set_previousOwner(op->biased_locker());
   617   event->commit();
   600   event->commit();
   618 }
   601 }
   619 
   602 
   620 static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
   603 static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
   626   event->set_disableBiasing(!op->is_bulk_rebias());
   609   event->set_disableBiasing(!op->is_bulk_rebias());
   627   event->set_safepointId(op->safepoint_id());
   610   event->set_safepointId(op->safepoint_id());
   628   event->commit();
   611   event->commit();
   629 }
   612 }
   630 
   613 
   631 BiasedLocking::Condition BiasedLocking::revoke_own_locks_in_handshake(Handle obj, TRAPS) {
   614 
   632   markOop mark = obj->mark();
   615 BiasedLocking::Condition BiasedLocking::single_revoke_with_handshake(Handle obj, JavaThread *requester, JavaThread *biaser) {
   633 
   616 
   634   if (!mark->has_bias_pattern()) {
   617   EventBiasedLockRevocation event;
   635     return NOT_BIASED;
   618   if (PrintBiasedLockingStatistics) {
       
   619     Atomic::inc(handshakes_count_addr());
       
   620   }
       
   621   log_info(biasedlocking, handshake)("JavaThread " INTPTR_FORMAT " handshaking JavaThread "
       
   622                                      INTPTR_FORMAT " to revoke object " INTPTR_FORMAT, p2i(requester),
       
   623                                      p2i(biaser), p2i(obj()));
       
   624 
       
   625   RevokeOneBias revoke(obj, requester, biaser);
       
   626   bool executed = Handshake::execute(&revoke, biaser);
       
   627   if (revoke.status_code() == NOT_REVOKED) {
       
   628     return NOT_REVOKED;
       
   629   }
       
   630   if (executed) {
       
   631     log_info(biasedlocking, handshake)("Handshake revocation for object " INTPTR_FORMAT " succeeded. Bias was %srevoked",
       
   632                                        p2i(obj()), (revoke.status_code() == BIAS_REVOKED ? "" : "already "));
       
   633     if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) {
       
   634       post_revocation_event(&event, obj->klass(), &revoke);
       
   635     }
       
   636     assert(!obj->mark().has_bias_pattern(), "invariant");
       
   637     return revoke.status_code();
       
   638   } else {
       
   639     // Thread was not alive.
       
   640     // Grab Threads_lock before manually trying to revoke bias. This avoids race with a newly
       
   641     // created JavaThread (that happens to get the same memory address as biaser) synchronizing
       
   642     // on this object.
       
   643     {
       
   644       MutexLocker ml(Threads_lock);
       
   645       markWord mark = obj->mark();
       
   646       // Check if somebody else was able to revoke it before biased thread exited.
       
   647       if (!mark.has_bias_pattern()) {
       
   648         return NOT_BIASED;
       
   649       }
       
   650       ThreadsListHandle tlh;
       
   651       markWord prototype = obj->klass()->prototype_header();
       
   652       if (!prototype.has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark.biased_locker() &&
       
   653                                             prototype.bias_epoch() == mark.bias_epoch())) {
       
   654         obj->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
       
   655         if (event.should_commit()) {
       
   656           post_revocation_event(&event, obj->klass(), &revoke);
       
   657         }
       
   658         assert(!obj->mark().has_bias_pattern(), "bias should be revoked by now");
       
   659         return BIAS_REVOKED;
       
   660       }
       
   661     }
       
   662   }
       
   663 
       
   664   return NOT_REVOKED;
       
   665 }
       
   666 
       
   667 
       
   668 // Caller should have instantiated a ResourceMark object before calling this method
       
   669 void BiasedLocking::walk_stack_and_revoke(oop obj, JavaThread* biased_locker) {
       
   670   assert(!SafepointSynchronize::is_at_safepoint() || !ThreadLocalHandshakes,
       
   671          "if ThreadLocalHandshakes is enabled this should always be executed outside safepoints");
       
   672   assert(Thread::current() == biased_locker || Thread::current()->is_VM_thread(), "wrong thread");
       
   673 
       
   674   markWord mark = obj->mark();
       
   675   assert(mark.biased_locker() == biased_locker &&
       
   676          obj->klass()->prototype_header().bias_epoch() == mark.bias_epoch(), "invariant");
       
   677 
       
   678   log_trace(biasedlocking)("%s(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark "
       
   679                            INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
       
   680                            ", biaser " INTPTR_FORMAT " %s",
       
   681                            Thread::current()->is_VM_thread() ? "VMThread" : "JavaThread",
       
   682                            p2i(Thread::current()),
       
   683                            p2i(obj),
       
   684                            mark.value(),
       
   685                            obj->klass()->external_name(),
       
   686                            obj->klass()->prototype_header().value(),
       
   687                            p2i(biased_locker),
       
   688                            Thread::current()->is_VM_thread() ? "" : "(walking own stack)");
       
   689 
       
   690   markWord unbiased_prototype = markWord::prototype().set_age(obj->mark().age());
       
   691 
       
   692   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_locker);
       
   693   BasicLock* highest_lock = NULL;
       
   694   for (int i = 0; i < cached_monitor_info->length(); i++) {
       
   695     MonitorInfo* mon_info = cached_monitor_info->at(i);
       
   696     if (mon_info->owner() == obj) {
       
   697       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
       
   698                                p2i(mon_info->owner()),
       
   699                                p2i(obj));
       
   700       // Assume recursive case and fix up highest lock below
       
   701       markWord mark = markWord::encode((BasicLock*) NULL);
       
   702       highest_lock = mon_info->lock();
       
   703       highest_lock->set_displaced_header(mark);
       
   704     } else {
       
   705       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
       
   706                                p2i(mon_info->owner()),
       
   707                                p2i(obj));
       
   708     }
       
   709   }
       
   710   if (highest_lock != NULL) {
       
   711     // Fix up highest lock to contain displaced header and point
       
   712     // object at it
       
   713     highest_lock->set_displaced_header(unbiased_prototype);
       
   714     // Reset object header to point to displaced mark.
       
   715     // Must release store the lock address for platforms without TSO
       
   716     // ordering (e.g. ppc).
       
   717     obj->release_set_mark(markWord::encode(highest_lock));
       
   718     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
       
   719     log_info(biasedlocking)("  Revoked bias of currently-locked object");
       
   720   } else {
       
   721     log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
       
   722     // Store the unlocked value into the object's header.
       
   723     obj->set_mark(unbiased_prototype);
       
   724   }
       
   725 
       
   726   assert(!obj->mark().has_bias_pattern(), "must not be biased");
       
   727 }
       
   728 
       
   729 void BiasedLocking::revoke_own_lock(Handle obj, TRAPS) {
       
   730   assert(THREAD->is_Java_thread(), "must be called by a JavaThread");
       
   731   JavaThread* thread = (JavaThread*)THREAD;
       
   732 
       
   733   markWord mark = obj->mark();
       
   734 
       
   735   if (!mark.has_bias_pattern()) {
       
   736     return;
   636   }
   737   }
   637 
   738 
   638   Klass *k = obj->klass();
   739   Klass *k = obj->klass();
   639   markOop prototype_header = k->prototype_header();
   740   assert(mark.biased_locker() == thread &&
   640   assert(mark->biased_locker() == THREAD &&
   741          k->prototype_header().bias_epoch() == mark.bias_epoch(), "Revoke failed, unhandled biased lock state");
   641          prototype_header->bias_epoch() == mark->bias_epoch(), "Revoke failed, unhandled biased lock state");
       
   642   ResourceMark rm;
   742   ResourceMark rm;
   643   log_info(biasedlocking)("Revoking bias by walking my own stack:");
   743   log_info(biasedlocking)("Revoking bias by walking my own stack:");
   644   EventBiasedLockSelfRevocation event;
   744   EventBiasedLockSelfRevocation event;
   645   BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
   745   BiasedLocking::walk_stack_and_revoke(obj(), (JavaThread*) thread);
   646   ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
   746   thread->set_cached_monitor_info(NULL);
   647   assert(cond == BIAS_REVOKED, "why not?");
   747   assert(!obj->mark().has_bias_pattern(), "invariant");
   648   if (event.should_commit()) {
   748   if (event.should_commit()) {
   649     post_self_revocation_event(&event, k);
   749     post_self_revocation_event(&event, k);
   650   }
   750   }
   651   return cond;
   751 }
   652 }
   752 
   653 
   753 void BiasedLocking::revoke(Handle obj, TRAPS) {
   654 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
       
   655   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
   754   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
   656 
   755 
   657   // We can revoke the biases of anonymously-biased objects
   756   while (true) {
   658   // efficiently enough that we should not cause these revocations to
   757     // We can revoke the biases of anonymously-biased objects
   659   // update the heuristics because doing so may cause unwanted bulk
   758     // efficiently enough that we should not cause these revocations to
   660   // revocations (which are expensive) to occur.
   759     // update the heuristics because doing so may cause unwanted bulk
   661   markOop mark = obj->mark();
   760     // revocations (which are expensive) to occur.
   662   if (mark->is_biased_anonymously() && !attempt_rebias) {
   761     markWord mark = obj->mark();
   663     // We are probably trying to revoke the bias of this object due to
   762 
   664     // an identity hash code computation. Try to revoke the bias
   763     if (!mark.has_bias_pattern()) {
   665     // without a safepoint. This is possible if we can successfully
   764       return;
   666     // compare-and-exchange an unbiased header into the mark word of
   765     }
   667     // the object, meaning that no other thread has raced to acquire
   766 
   668     // the bias of the object.
   767     if (mark.is_biased_anonymously()) {
   669     markOop biased_value       = mark;
   768       // We are probably trying to revoke the bias of this object due to
   670     markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
   769       // an identity hash code computation. Try to revoke the bias
   671     markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
   770       // without a safepoint. This is possible if we can successfully
   672     if (res_mark == biased_value) {
   771       // compare-and-exchange an unbiased header into the mark word of
   673       return BIAS_REVOKED;
   772       // the object, meaning that no other thread has raced to acquire
   674     }
   773       // the bias of the object.
   675   } else if (mark->has_bias_pattern()) {
   774       markWord biased_value       = mark;
   676     Klass* k = obj->klass();
   775       markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
   677     markOop prototype_header = k->prototype_header();
   776       markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
   678     if (!prototype_header->has_bias_pattern()) {
   777       if (res_mark == biased_value) {
   679       // This object has a stale bias from before the bulk revocation
   778         return;
   680       // for this data type occurred. It's pointless to update the
   779       }
   681       // heuristics at this point so simply update the header with a
   780       mark = res_mark;  // Refresh mark with the latest value.
   682       // CAS. If we fail this race, the object's bias has been revoked
   781     } else {
   683       // by another thread so we simply return and let the caller deal
   782       Klass* k = obj->klass();
   684       // with it.
   783       markWord prototype_header = k->prototype_header();
   685       markOop biased_value       = mark;
   784       if (!prototype_header.has_bias_pattern()) {
   686       markOop res_mark = obj->cas_set_mark(prototype_header, mark);
   785         // This object has a stale bias from before the bulk revocation
   687       assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
   786         // for this data type occurred. It's pointless to update the
   688       return BIAS_REVOKED;
   787         // heuristics at this point so simply update the header with a
   689     } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
   788         // CAS. If we fail this race, the object's bias has been revoked
   690       // The epoch of this biasing has expired indicating that the
   789         // by another thread so we simply return and let the caller deal
   691       // object is effectively unbiased. Depending on whether we need
   790         // with it.
   692       // to rebias or revoke the bias of this object we can do it
   791         obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
   693       // efficiently enough with a CAS that we shouldn't update the
   792         assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
   694       // heuristics. This is normally done in the assembly code but we
   793         return;
   695       // can reach this point due to various points in the runtime
   794       } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
   696       // needing to revoke biases.
   795         // The epoch of this biasing has expired indicating that the
   697       if (attempt_rebias) {
   796         // object is effectively unbiased. We can revoke the bias of this
   698         assert(THREAD->is_Java_thread(), "");
   797         // object efficiently enough with a CAS that we shouldn't update the
   699         markOop biased_value       = mark;
   798         // heuristics. This is normally done in the assembly code but we
   700         markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
   799         // can reach this point due to various points in the runtime
   701         markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark);
   800         // needing to revoke biases.
       
   801         markWord res_mark;
       
   802         markWord biased_value       = mark;
       
   803         markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
       
   804         res_mark = obj->cas_set_mark(unbiased_prototype, mark);
   702         if (res_mark == biased_value) {
   805         if (res_mark == biased_value) {
   703           return BIAS_REVOKED_AND_REBIASED;
   806           return;
   704         }
   807         }
       
   808         mark = res_mark;  // Refresh mark with the latest value.
       
   809       }
       
   810     }
       
   811 
       
   812     HeuristicsResult heuristics = update_heuristics(obj());
       
   813     if (heuristics == HR_NOT_BIASED) {
       
   814       return;
       
   815     } else if (heuristics == HR_SINGLE_REVOKE) {
       
   816       JavaThread *blt = mark.biased_locker();
       
   817       assert(blt != NULL, "invariant");
       
   818       if (blt == THREAD) {
       
   819         // A thread is trying to revoke the bias of an object biased
       
   820         // toward it, again likely due to an identity hash code
       
   821         // computation. We can again avoid a safepoint/handshake in this case
       
   822         // since we are only going to walk our own stack. There are no
       
   823         // races with revocations occurring in other threads because we
       
   824         // reach no safepoints in the revocation path.
       
   825         EventBiasedLockSelfRevocation event;
       
   826         ResourceMark rm;
       
   827         walk_stack_and_revoke(obj(), blt);
       
   828         blt->set_cached_monitor_info(NULL);
       
   829         assert(!obj->mark().has_bias_pattern(), "invariant");
       
   830         if (event.should_commit()) {
       
   831           post_self_revocation_event(&event, obj->klass());
       
   832         }
       
   833         return;
   705       } else {
   834       } else {
   706         markOop biased_value       = mark;
   835         BiasedLocking::Condition cond = single_revoke_with_handshake(obj, (JavaThread*)THREAD, blt);
   707         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
   836         if (cond != NOT_REVOKED) {
   708         markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
   837           return;
   709         if (res_mark == biased_value) {
       
   710           return BIAS_REVOKED;
       
   711         }
   838         }
   712       }
   839       }
   713     }
   840     } else {
   714   }
   841       assert((heuristics == HR_BULK_REVOKE) ||
   715 
   842          (heuristics == HR_BULK_REBIAS), "?");
   716   HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
   843       EventBiasedLockClassRevocation event;
   717   if (heuristics == HR_NOT_BIASED) {
   844       VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*)THREAD,
   718     return NOT_BIASED;
   845                                     (heuristics == HR_BULK_REBIAS));
   719   } else if (heuristics == HR_SINGLE_REVOKE) {
   846       VMThread::execute(&bulk_revoke);
   720     Klass *k = obj->klass();
       
   721     markOop prototype_header = k->prototype_header();
       
   722     if (mark->biased_locker() == THREAD &&
       
   723         prototype_header->bias_epoch() == mark->bias_epoch()) {
       
   724       // A thread is trying to revoke the bias of an object biased
       
   725       // toward it, again likely due to an identity hash code
       
   726       // computation. We can again avoid a safepoint in this case
       
   727       // since we are only going to walk our own stack. There are no
       
   728       // races with revocations occurring in other threads because we
       
   729       // reach no safepoints in the revocation path.
       
   730       // Also check the epoch because even if threads match, another thread
       
   731       // can come in with a CAS to steal the bias of an object that has a
       
   732       // stale epoch.
       
   733       ResourceMark rm;
       
   734       log_info(biasedlocking)("Revoking bias by walking my own stack:");
       
   735       EventBiasedLockSelfRevocation event;
       
   736       BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
       
   737       ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
       
   738       assert(cond == BIAS_REVOKED, "why not?");
       
   739       if (event.should_commit()) {
   847       if (event.should_commit()) {
   740         post_self_revocation_event(&event, k);
   848         post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
   741       }
   849       }
   742       return cond;
   850       return;
   743     } else {
   851     }
   744       EventBiasedLockRevocation event;
   852   }
   745       VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
   853 }
   746       VMThread::execute(&revoke);
   854 
   747       if (event.should_commit() && revoke.status_code() != NOT_BIASED) {
   855 // All objects in objs should be locked by biaser
   748         post_revocation_event(&event, k, &revoke);
   856 void BiasedLocking::revoke(GrowableArray<Handle>* objs, JavaThread *biaser) {
   749       }
   857   bool clean_my_cache = false;
   750       return revoke.status_code();
   858   for (int i = 0; i < objs->length(); i++) {
   751     }
   859     oop obj = (objs->at(i))();
   752   }
   860     markWord mark = obj->mark();
   753 
   861     if (mark.has_bias_pattern()) {
   754   assert((heuristics == HR_BULK_REVOKE) ||
   862       walk_stack_and_revoke(obj, biaser);
   755          (heuristics == HR_BULK_REBIAS), "?");
   863       clean_my_cache = true;
   756   EventBiasedLockClassRevocation event;
   864     }
   757   VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
   865   }
   758                                 (heuristics == HR_BULK_REBIAS),
   866   if (clean_my_cache) {
   759                                 attempt_rebias);
   867     clean_up_cached_monitor_info(biaser);
   760   VMThread::execute(&bulk_revoke);
   868   }
   761   if (event.should_commit()) {
       
   762     post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
       
   763   }
       
   764   return bulk_revoke.status_code();
       
   765 }
       
   766 
       
   767 
       
   768 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
       
   769   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
       
   770   if (objs->length() == 0) {
       
   771     return;
       
   772   }
       
   773   VM_RevokeBias revoke(objs, JavaThread::current());
       
   774   VMThread::execute(&revoke);
       
   775 }
   869 }
   776 
   870 
   777 
   871 
   778 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
   872 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
   779   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
   873   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
   780   oop obj = h_obj();
   874   oop obj = h_obj();
   781   HeuristicsResult heuristics = update_heuristics(obj, false);
   875   HeuristicsResult heuristics = update_heuristics(obj);
   782   if (heuristics == HR_SINGLE_REVOKE) {
   876   if (heuristics == HR_SINGLE_REVOKE) {
   783     revoke_bias(obj, false, false, NULL, NULL);
   877     JavaThread* biased_locker = NULL;
       
   878     single_revoke_at_safepoint(obj, false, NULL, &biased_locker);
       
   879     if (biased_locker) {
       
   880       clean_up_cached_monitor_info(biased_locker);
       
   881     }
   784   } else if ((heuristics == HR_BULK_REBIAS) ||
   882   } else if ((heuristics == HR_BULK_REBIAS) ||
   785              (heuristics == HR_BULK_REVOKE)) {
   883              (heuristics == HR_BULK_REVOKE)) {
   786     bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
   884     bulk_revoke_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), NULL);
   787   }
   885     clean_up_cached_monitor_info();
   788   clean_up_cached_monitor_info();
   886   }
   789 }
       
   790 
       
   791 
       
   792 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
       
   793   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
       
   794   int len = objs->length();
       
   795   for (int i = 0; i < len; i++) {
       
   796     oop obj = (objs->at(i))();
       
   797     HeuristicsResult heuristics = update_heuristics(obj, false);
       
   798     if (heuristics == HR_SINGLE_REVOKE) {
       
   799       revoke_bias(obj, false, false, NULL, NULL);
       
   800     } else if ((heuristics == HR_BULK_REBIAS) ||
       
   801                (heuristics == HR_BULK_REVOKE)) {
       
   802       bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
       
   803     }
       
   804   }
       
   805   clean_up_cached_monitor_info();
       
   806 }
   887 }
   807 
   888 
   808 
   889 
   809 void BiasedLocking::preserve_marks() {
   890 void BiasedLocking::preserve_marks() {
   810   if (!UseBiasedLocking)
   891   if (!UseBiasedLocking)
   822   // must not clobber a bias is when a biased object is currently
   903   // must not clobber a bias is when a biased object is currently
   823   // locked. To handle this case we iterate over the currently-locked
   904   // locked. To handle this case we iterate over the currently-locked
   824   // monitors in a prepass and, if they are biased, preserve their
   905   // monitors in a prepass and, if they are biased, preserve their
   825   // mark words here. This should be a relatively small set of objects
   906   // mark words here. This should be a relatively small set of objects
   826   // especially compared to the number of objects in the heap.
   907   // especially compared to the number of objects in the heap.
   827   _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
   908   _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(10, true);
   828   _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
   909   _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
   829 
   910 
   830   ResourceMark rm;
   911   ResourceMark rm;
   831   Thread* cur = Thread::current();
   912   Thread* cur = Thread::current();
   832   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
   913   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
   840           for (int i = len - 1; i >= 0; i--) {
   921           for (int i = len - 1; i >= 0; i--) {
   841             MonitorInfo* mon_info = monitors->at(i);
   922             MonitorInfo* mon_info = monitors->at(i);
   842             if (mon_info->owner_is_scalar_replaced()) continue;
   923             if (mon_info->owner_is_scalar_replaced()) continue;
   843             oop owner = mon_info->owner();
   924             oop owner = mon_info->owner();
   844             if (owner != NULL) {
   925             if (owner != NULL) {
   845               markOop mark = owner->mark();
   926               markWord mark = owner->mark();
   846               if (mark->has_bias_pattern()) {
   927               if (mark.has_bias_pattern()) {
   847                 _preserved_oop_stack->push(Handle(cur, owner));
   928                 _preserved_oop_stack->push(Handle(cur, owner));
   848                 _preserved_mark_stack->push(mark);
   929                 _preserved_mark_stack->push(mark);
   849               }
   930               }
   850             }
   931             }
   851           }
   932           }
   864   assert(_preserved_mark_stack != NULL, "double free");
   945   assert(_preserved_mark_stack != NULL, "double free");
   865 
   946 
   866   int len = _preserved_oop_stack->length();
   947   int len = _preserved_oop_stack->length();
   867   for (int i = 0; i < len; i++) {
   948   for (int i = 0; i < len; i++) {
   868     Handle owner = _preserved_oop_stack->at(i);
   949     Handle owner = _preserved_oop_stack->at(i);
   869     markOop mark = _preserved_mark_stack->at(i);
   950     markWord mark = _preserved_mark_stack->at(i);
   870     owner->set_mark(mark);
   951     owner->set_mark(mark);
   871   }
   952   }
   872 
   953 
   873   delete _preserved_oop_stack;
   954   delete _preserved_oop_stack;
   874   _preserved_oop_stack = NULL;
   955   _preserved_oop_stack = NULL;
   880 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
   961 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
   881 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
   962 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
   882 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
   963 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
   883 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
   964 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
   884 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
   965 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
       
   966 int* BiasedLocking::handshakes_count_addr()                    { return _counters.handshakes_count_addr(); }
   885 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
   967 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
   886 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
   968 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
   887 
   969 
   888 
   970 
   889 // BiasedLockingCounters
   971 // BiasedLockingCounters
   903   tty->print_cr("# total entries: %d", _total_entry_count);
   985   tty->print_cr("# total entries: %d", _total_entry_count);
   904   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
   986   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
   905   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
   987   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
   906   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
   988   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
   907   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
   989   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
       
   990   tty->print_cr("# handshakes entries: %d", _handshakes_count);
   908   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
   991   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
   909   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
   992   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
   910 }
   993 }
   911 
   994 
   912 void BiasedLockingCounters::print() const { print_on(tty); }
   995 void BiasedLockingCounters::print() const { print_on(tty); }