diff -r 13588c901957 -r 9cf78a70fa4f src/hotspot/share/runtime/biasedLocking.cpp --- a/src/hotspot/share/runtime/biasedLocking.cpp Thu Oct 17 20:27:44 2019 +0100 +++ b/src/hotspot/share/runtime/biasedLocking.cpp Thu Oct 17 20:53:35 2019 +0100 @@ -29,7 +29,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/klass.inline.hpp" -#include "oops/markOop.hpp" +#include "oops/markWord.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/basicLock.hpp" @@ -45,30 +45,31 @@ static bool _biased_locking_enabled = false; BiasedLockingCounters BiasedLocking::_counters; -static GrowableArray* _preserved_oop_stack = NULL; -static GrowableArray* _preserved_mark_stack = NULL; +static GrowableArray* _preserved_oop_stack = NULL; +static GrowableArray* _preserved_mark_stack = NULL; static void enable_biased_locking(InstanceKlass* k) { - k->set_prototype_header(markOopDesc::biased_locking_prototype()); + k->set_prototype_header(markWord::biased_locking_prototype()); +} + +static void enable_biased_locking() { + _biased_locking_enabled = true; + log_info(biasedlocking)("Biased locking enabled"); } class VM_EnableBiasedLocking: public VM_Operation { - private: - bool _is_cheap_allocated; public: - VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } + VM_EnableBiasedLocking() {} VMOp_Type type() const { return VMOp_EnableBiasedLocking; } - Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } - bool is_cheap_allocated() const { return _is_cheap_allocated; } + Mode evaluation_mode() const { return _async_safepoint; } + bool is_cheap_allocated() const { return true; } void doit() { // Iterate the class loader data dictionaries enabling biased locking for all // currently loaded classes. ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking); // Indicate that future instances should enable it as well - _biased_locking_enabled = true; - - log_info(biasedlocking)("Biased locking enabled"); + enable_biased_locking(); } bool allow_nested_vm_operations() const { return false; } @@ -83,7 +84,7 @@ virtual void task() { // Use async VM operation to avoid blocking the Watcher thread. // VM Thread will free C heap storage. - VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); + VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(); VMThread::execute(op); // Reclaim our storage and disenroll ourself @@ -93,29 +94,32 @@ void BiasedLocking::init() { - // If biased locking is enabled, schedule a task to fire a few - // seconds into the run which turns on biased locking for all - // currently loaded classes as well as future ones. This is a - // workaround for startup time regressions due to a large number of - // safepoints being taken during VM startup for bias revocation. - // Ideally we would have a lower cost for individual bias revocation - // and not need a mechanism like this. + // If biased locking is enabled and BiasedLockingStartupDelay is set, + // schedule a task to fire after the specified delay which turns on + // biased locking for all currently loaded classes as well as future + // ones. This could be a workaround for startup time regressions + // due to large number of safepoints being taken during VM startup for + // bias revocation. if (UseBiasedLocking) { if (BiasedLockingStartupDelay > 0) { EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); task->enroll(); } else { - VM_EnableBiasedLocking op(false); - VMThread::execute(&op); + enable_biased_locking(); } } } bool BiasedLocking::enabled() { - return _biased_locking_enabled; + assert(UseBiasedLocking, "precondition"); + // We check "BiasedLockingStartupDelay == 0" here to cover the + // possibility of calls to BiasedLocking::enabled() before + // BiasedLocking::init(). + return _biased_locking_enabled || BiasedLockingStartupDelay == 0; } + // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order static GrowableArray* get_or_compute_monitor_info(JavaThread* thread) { GrowableArray* info = thread->cached_monitor_info(); @@ -150,68 +154,68 @@ return info; } + // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL, // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization). -static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) { - markOop mark = obj->mark(); - if (!mark->has_bias_pattern()) { +void BiasedLocking::single_revoke_at_safepoint(oop obj, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) { + assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); + assert(Thread::current()->is_VM_thread(), "must be VMThread"); + + markWord mark = obj->mark(); + if (!mark.has_bias_pattern()) { if (log_is_enabled(Info, biasedlocking)) { ResourceMark rm; log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT ", mark " INTPTR_FORMAT ", type %s" ", requesting thread " INTPTR_FORMAT " because it's no longer biased)", - p2i((void *)obj), (intptr_t) mark, + p2i((void *)obj), mark.value(), obj->klass()->external_name(), (intptr_t) requesting_thread); } - return BiasedLocking::NOT_BIASED; + return; } - uint age = mark->age(); - markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); - markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); + uint age = mark.age(); + markWord unbiased_prototype = markWord::prototype().set_age(age); // Log at "info" level if not bulk, else "trace" level if (!is_bulk) { ResourceMark rm; log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark " INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT - ", allow rebias %d, requesting thread " INTPTR_FORMAT, + ", requesting thread " INTPTR_FORMAT, p2i((void *)obj), - (intptr_t) mark, + mark.value(), obj->klass()->external_name(), - (intptr_t) obj->klass()->prototype_header(), - (allow_rebias ? 1 : 0), + obj->klass()->prototype_header().value(), (intptr_t) requesting_thread); } else { ResourceMark rm; log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT - " , allow rebias %d , requesting thread " INTPTR_FORMAT, + " , requesting thread " INTPTR_FORMAT, p2i((void *)obj), - (intptr_t) mark, + mark.value(), obj->klass()->external_name(), - (intptr_t) obj->klass()->prototype_header(), - (allow_rebias ? 1 : 0), + obj->klass()->prototype_header().value(), (intptr_t) requesting_thread); } - JavaThread* biased_thread = mark->biased_locker(); + JavaThread* biased_thread = mark.biased_locker(); if (biased_thread == NULL) { // Object is anonymously biased. We can get here if, for // example, we revoke the bias due to an identity hash code // being computed for an object. - if (!allow_rebias) { - obj->set_mark(unbiased_prototype); - } + obj->set_mark(unbiased_prototype); + // Log at "info" level if not bulk, else "trace" level if (!is_bulk) { log_info(biasedlocking)(" Revoked bias of anonymously-biased object"); } else { log_trace(biasedlocking)(" Revoked bias of anonymously-biased object"); } - return BiasedLocking::BIAS_REVOKED; + return; } // Handle case where the thread toward which the object was biased has exited @@ -223,11 +227,7 @@ thread_is_alive = tlh.includes(biased_thread); } if (!thread_is_alive) { - if (allow_rebias) { - obj->set_mark(biased_prototype); - } else { - obj->set_mark(unbiased_prototype); - } + obj->set_mark(unbiased_prototype); // Log at "info" level if not bulk, else "trace" level if (!is_bulk) { log_info(biasedlocking)(" Revoked bias of object biased toward dead thread (" @@ -236,7 +236,7 @@ log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread (" PTR_FORMAT ")", p2i(biased_thread)); } - return BiasedLocking::BIAS_REVOKED; + return; } // Log at "info" level if not bulk, else "trace" level @@ -257,12 +257,12 @@ BasicLock* highest_lock = NULL; for (int i = 0; i < cached_monitor_info->length(); i++) { MonitorInfo* mon_info = cached_monitor_info->at(i); - if (oopDesc::equals(mon_info->owner(), obj)) { + if (mon_info->owner() == obj) { log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", p2i((void *) mon_info->owner()), p2i((void *) obj)); - // Assume recursive case and fix up highest lock later - markOop mark = markOopDesc::encode((BasicLock*) NULL); + // Assume recursive case and fix up highest lock below + markWord mark = markWord::encode((BasicLock*) NULL); highest_lock = mon_info->lock(); highest_lock->set_displaced_header(mark); } else { @@ -276,10 +276,10 @@ // object at it highest_lock->set_displaced_header(unbiased_prototype); // Reset object header to point to displaced mark. - // Must release storing the lock address for platforms without TSO + // Must release store the lock address for platforms without TSO // ordering (e.g. ppc). - obj->release_set_mark(markOopDesc::encode(highest_lock)); - assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); + obj->release_set_mark(markWord::encode(highest_lock)); + assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit"); // Log at "info" level if not bulk, else "trace" level if (!is_bulk) { log_info(biasedlocking)(" Revoked bias of currently-locked object"); @@ -293,20 +293,14 @@ } else { log_trace(biasedlocking)(" Revoked bias of currently-unlocked object"); } - if (allow_rebias) { - obj->set_mark(biased_prototype); - } else { - // Store the unlocked value into the object's header. - obj->set_mark(unbiased_prototype); - } + // Store the unlocked value into the object's header. + obj->set_mark(unbiased_prototype); } // If requested, return information on which thread held the bias if (biased_locker != NULL) { *biased_locker = biased_thread; } - - return BiasedLocking::BIAS_REVOKED; } @@ -318,9 +312,9 @@ }; -static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { - markOop mark = o->mark(); - if (!mark->has_bias_pattern()) { +static HeuristicsResult update_heuristics(oop o) { + markWord mark = o->mark(); + if (!mark.has_bias_pattern()) { return HR_NOT_BIASED; } @@ -371,23 +365,20 @@ } -static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, - bool bulk_rebias, - bool attempt_rebias_of_object, - JavaThread* requesting_thread) { +void BiasedLocking::bulk_revoke_at_safepoint(oop o, bool bulk_rebias, JavaThread* requesting_thread) { assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); + assert(Thread::current()->is_VM_thread(), "must be VMThread"); log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", (bulk_rebias ? "rebias" : "revoke"), p2i((void *) o), - (intptr_t) o->mark(), + o->mark().value(), o->klass()->external_name()); jlong cur_time = os::javaTimeMillis(); o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time); - Klass* k_o = o->klass(); Klass* klass = k_o; @@ -405,10 +396,10 @@ // try to update the epoch -- assume another VM operation came in // and reset the header to the unbiased state, which will // implicitly cause all existing biases to be revoked - if (klass->prototype_header()->has_bias_pattern()) { - int prev_epoch = klass->prototype_header()->bias_epoch(); - klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); - int cur_epoch = klass->prototype_header()->bias_epoch(); + if (klass->prototype_header().has_bias_pattern()) { + int prev_epoch = klass->prototype_header().bias_epoch(); + klass->set_prototype_header(klass->prototype_header().incr_bias_epoch()); + int cur_epoch = klass->prototype_header().bias_epoch(); // Now walk all threads' stacks and adjust epochs of any biased // and locked objects of this data type we encounter @@ -417,11 +408,11 @@ for (int i = 0; i < cached_monitor_info->length(); i++) { MonitorInfo* mon_info = cached_monitor_info->at(i); oop owner = mon_info->owner(); - markOop mark = owner->mark(); - if ((owner->klass() == k_o) && mark->has_bias_pattern()) { + markWord mark = owner->mark(); + if ((owner->klass() == k_o) && mark.has_bias_pattern()) { // We might have encountered this object already in the case of recursive locking - assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); - owner->set_mark(mark->set_bias_epoch(cur_epoch)); + assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment"); + owner->set_mark(mark.set_bias_epoch(cur_epoch)); } } } @@ -429,7 +420,7 @@ // At this point we're done. All we have to do is potentially // adjust the header of the given object to revoke its bias. - revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL); + single_revoke_at_safepoint(o, true, requesting_thread, NULL); } else { if (log_is_enabled(Info, biasedlocking)) { ResourceMark rm; @@ -440,7 +431,7 @@ // cause future instances to not be biased, but existing biased // instances will notice that this implicitly caused their biases // to be revoked. - klass->set_prototype_header(markOopDesc::prototype()); + klass->set_prototype_header(markWord::prototype()); // Now walk all threads' stacks and forcibly revoke the biases of // any locked and biased objects of this data type we encounter. @@ -449,112 +440,135 @@ for (int i = 0; i < cached_monitor_info->length(); i++) { MonitorInfo* mon_info = cached_monitor_info->at(i); oop owner = mon_info->owner(); - markOop mark = owner->mark(); - if ((owner->klass() == k_o) && mark->has_bias_pattern()) { - revoke_bias(owner, false, true, requesting_thread, NULL); + markWord mark = owner->mark(); + if ((owner->klass() == k_o) && mark.has_bias_pattern()) { + single_revoke_at_safepoint(owner, true, requesting_thread, NULL); } } } // Must force the bias of the passed object to be forcibly revoked // as well to ensure guarantees to callers - revoke_bias(o, false, true, requesting_thread, NULL); + single_revoke_at_safepoint(o, true, requesting_thread, NULL); } } // ThreadsListHandle is destroyed here. log_info(biasedlocking)("* Ending bulk revocation"); - BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; - - if (attempt_rebias_of_object && - o->mark()->has_bias_pattern() && - klass->prototype_header()->has_bias_pattern()) { - markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), - klass->prototype_header()->bias_epoch()); - o->set_mark(new_mark); - status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; - log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); - } - - assert(!o->mark()->has_bias_pattern() || - (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), - "bug in bulk bias revocation"); - - return status_code; + assert(!o->mark().has_bias_pattern(), "bug in bulk bias revocation"); } -static void clean_up_cached_monitor_info() { - // Walk the thread list clearing out the cached monitors - for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) { - thr->set_cached_monitor_info(NULL); +static void clean_up_cached_monitor_info(JavaThread* thread = NULL) { + if (thread != NULL) { + thread->set_cached_monitor_info(NULL); + } else { + // Walk the thread list clearing out the cached monitors + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) { + thr->set_cached_monitor_info(NULL); + } } } -class VM_RevokeBias : public VM_Operation { -protected: +class VM_BulkRevokeBias : public VM_Operation { +private: Handle* _obj; - GrowableArray* _objs; JavaThread* _requesting_thread; - BiasedLocking::Condition _status_code; - traceid _biased_locker_id; + bool _bulk_rebias; uint64_t _safepoint_id; public: - VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) + VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, + bool bulk_rebias) : _obj(obj) - , _objs(NULL) , _requesting_thread(requesting_thread) - , _status_code(BiasedLocking::NOT_BIASED) - , _biased_locker_id(0) - , _safepoint_id(0) {} - - VM_RevokeBias(GrowableArray* objs, JavaThread* requesting_thread) - : _obj(NULL) - , _objs(objs) - , _requesting_thread(requesting_thread) - , _status_code(BiasedLocking::NOT_BIASED) - , _biased_locker_id(0) + , _bulk_rebias(bulk_rebias) , _safepoint_id(0) {} - virtual VMOp_Type type() const { return VMOp_RevokeBias; } + virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } + + virtual void doit() { + BiasedLocking::bulk_revoke_at_safepoint((*_obj)(), _bulk_rebias, _requesting_thread); + _safepoint_id = SafepointSynchronize::safepoint_id(); + clean_up_cached_monitor_info(); + } + + bool is_bulk_rebias() const { + return _bulk_rebias; + } + + uint64_t safepoint_id() const { + return _safepoint_id; + } +}; + + +class RevokeOneBias : public ThreadClosure { +protected: + Handle _obj; + JavaThread* _requesting_thread; + JavaThread* _biased_locker; + BiasedLocking::Condition _status_code; + traceid _biased_locker_id; + +public: + RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker) + : _obj(obj) + , _requesting_thread(requesting_thread) + , _biased_locker(biased_locker) + , _status_code(BiasedLocking::NOT_BIASED) + , _biased_locker_id(0) {} + + void do_thread(Thread* target) { + assert(target == _biased_locker, "Wrong thread"); - virtual bool doit_prologue() { - // Verify that there is actual work to do since the callers just - // give us locked object(s). If we don't find any biased objects - // there is nothing to do and we avoid a safepoint. - if (_obj != NULL) { - markOop mark = (*_obj)()->mark(); - if (mark->has_bias_pattern()) { - return true; + oop o = _obj(); + markWord mark = o->mark(); + + if (!mark.has_bias_pattern()) { + return; + } + + markWord prototype = o->klass()->prototype_header(); + if (!prototype.has_bias_pattern()) { + // This object has a stale bias from before the handshake + // was requested. If we fail this race, the object's bias + // has been revoked by another thread so we simply return. + markWord biased_value = mark; + mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark); + assert(!o->mark().has_bias_pattern(), "even if we raced, should still be revoked"); + if (biased_value == mark) { + _status_code = BiasedLocking::BIAS_REVOKED; } - } else { - for ( int i = 0 ; i < _objs->length(); i++ ) { - markOop mark = (_objs->at(i))()->mark(); - if (mark->has_bias_pattern()) { - return true; + return; + } + + if (_biased_locker == mark.biased_locker()) { + if (mark.bias_epoch() == prototype.bias_epoch()) { + // Epoch is still valid. This means biaser could be currently + // synchronized on this object. We must walk its stack looking + // for monitor records associated with this object and change + // them to be stack locks if any are found. + ResourceMark rm; + BiasedLocking::walk_stack_and_revoke(o, _biased_locker); + _biased_locker->set_cached_monitor_info(NULL); + assert(!o->mark().has_bias_pattern(), "invariant"); + _biased_locker_id = JFR_THREAD_ID(_biased_locker); + _status_code = BiasedLocking::BIAS_REVOKED; + return; + } else { + markWord biased_value = mark; + mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark); + if (mark == biased_value || !mark.has_bias_pattern()) { + assert(!o->mark().has_bias_pattern(), "should be revoked"); + _status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED; + return; } } } - return false; - } - virtual void doit() { - if (_obj != NULL) { - log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:"); - JavaThread* biased_locker = NULL; - _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); - if (biased_locker != NULL) { - _biased_locker_id = JFR_THREAD_ID(biased_locker); - } - _safepoint_id = SafepointSynchronize::safepoint_counter(); - clean_up_cached_monitor_info(); - return; - } else { - log_info(biasedlocking)("Revoking bias with global safepoint:"); - BiasedLocking::revoke_at_safepoint(_objs); - } + _status_code = BiasedLocking::NOT_REVOKED; } BiasedLocking::Condition status_code() const { @@ -564,40 +578,9 @@ traceid biased_locker() const { return _biased_locker_id; } - - uint64_t safepoint_id() const { - return _safepoint_id; - } }; -class VM_BulkRevokeBias : public VM_RevokeBias { -private: - bool _bulk_rebias; - bool _attempt_rebias_of_object; - -public: - VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, - bool bulk_rebias, - bool attempt_rebias_of_object) - : VM_RevokeBias(obj, requesting_thread) - , _bulk_rebias(bulk_rebias) - , _attempt_rebias_of_object(attempt_rebias_of_object) {} - - virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } - virtual bool doit_prologue() { return true; } - - virtual void doit() { - _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); - _safepoint_id = SafepointSynchronize::safepoint_counter(); - clean_up_cached_monitor_info(); - } - - bool is_bulk_rebias() const { - return _bulk_rebias; - } -}; - static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) { assert(event != NULL, "invariant"); assert(k != NULL, "invariant"); @@ -606,13 +589,13 @@ event->commit(); } -static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, VM_RevokeBias* op) { +static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, RevokeOneBias* op) { assert(event != NULL, "invariant"); assert(k != NULL, "invariant"); assert(op != NULL, "invariant"); assert(event->should_commit(), "invariant"); event->set_lockClass(k); - event->set_safepointId(op->safepoint_id()); + event->set_safepointId(0); event->set_previousOwner(op->biased_locker()); event->commit(); } @@ -628,181 +611,279 @@ event->commit(); } -BiasedLocking::Condition BiasedLocking::revoke_own_locks_in_handshake(Handle obj, TRAPS) { - markOop mark = obj->mark(); - if (!mark->has_bias_pattern()) { - return NOT_BIASED; - } +BiasedLocking::Condition BiasedLocking::single_revoke_with_handshake(Handle obj, JavaThread *requester, JavaThread *biaser) { - Klass *k = obj->klass(); - markOop prototype_header = k->prototype_header(); - assert(mark->biased_locker() == THREAD && - prototype_header->bias_epoch() == mark->bias_epoch(), "Revoke failed, unhandled biased lock state"); - ResourceMark rm; - log_info(biasedlocking)("Revoking bias by walking my own stack:"); - EventBiasedLockSelfRevocation event; - BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); - ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); - assert(cond == BIAS_REVOKED, "why not?"); - if (event.should_commit()) { - post_self_revocation_event(&event, k); + EventBiasedLockRevocation event; + if (PrintBiasedLockingStatistics) { + Atomic::inc(handshakes_count_addr()); } - return cond; -} - -BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { - assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); + log_info(biasedlocking, handshake)("JavaThread " INTPTR_FORMAT " handshaking JavaThread " + INTPTR_FORMAT " to revoke object " INTPTR_FORMAT, p2i(requester), + p2i(biaser), p2i(obj())); - // We can revoke the biases of anonymously-biased objects - // efficiently enough that we should not cause these revocations to - // update the heuristics because doing so may cause unwanted bulk - // revocations (which are expensive) to occur. - markOop mark = obj->mark(); - if (mark->is_biased_anonymously() && !attempt_rebias) { - // We are probably trying to revoke the bias of this object due to - // an identity hash code computation. Try to revoke the bias - // without a safepoint. This is possible if we can successfully - // compare-and-exchange an unbiased header into the mark word of - // the object, meaning that no other thread has raced to acquire - // the bias of the object. - markOop biased_value = mark; - markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); - markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); - if (res_mark == biased_value) { - return BIAS_REVOKED; + RevokeOneBias revoke(obj, requester, biaser); + bool executed = Handshake::execute(&revoke, biaser); + if (revoke.status_code() == NOT_REVOKED) { + return NOT_REVOKED; + } + if (executed) { + log_info(biasedlocking, handshake)("Handshake revocation for object " INTPTR_FORMAT " succeeded. Bias was %srevoked", + p2i(obj()), (revoke.status_code() == BIAS_REVOKED ? "" : "already ")); + if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) { + post_revocation_event(&event, obj->klass(), &revoke); } - } else if (mark->has_bias_pattern()) { - Klass* k = obj->klass(); - markOop prototype_header = k->prototype_header(); - if (!prototype_header->has_bias_pattern()) { - // This object has a stale bias from before the bulk revocation - // for this data type occurred. It's pointless to update the - // heuristics at this point so simply update the header with a - // CAS. If we fail this race, the object's bias has been revoked - // by another thread so we simply return and let the caller deal - // with it. - markOop biased_value = mark; - markOop res_mark = obj->cas_set_mark(prototype_header, mark); - assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked"); - return BIAS_REVOKED; - } else if (prototype_header->bias_epoch() != mark->bias_epoch()) { - // The epoch of this biasing has expired indicating that the - // object is effectively unbiased. Depending on whether we need - // to rebias or revoke the bias of this object we can do it - // efficiently enough with a CAS that we shouldn't update the - // heuristics. This is normally done in the assembly code but we - // can reach this point due to various points in the runtime - // needing to revoke biases. - if (attempt_rebias) { - assert(THREAD->is_Java_thread(), ""); - markOop biased_value = mark; - markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); - markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark); - if (res_mark == biased_value) { - return BIAS_REVOKED_AND_REBIASED; + assert(!obj->mark().has_bias_pattern(), "invariant"); + return revoke.status_code(); + } else { + // Thread was not alive. + // Grab Threads_lock before manually trying to revoke bias. This avoids race with a newly + // created JavaThread (that happens to get the same memory address as biaser) synchronizing + // on this object. + { + MutexLocker ml(Threads_lock); + markWord mark = obj->mark(); + // Check if somebody else was able to revoke it before biased thread exited. + if (!mark.has_bias_pattern()) { + return NOT_BIASED; + } + ThreadsListHandle tlh; + markWord prototype = obj->klass()->prototype_header(); + if (!prototype.has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark.biased_locker() && + prototype.bias_epoch() == mark.bias_epoch())) { + obj->cas_set_mark(markWord::prototype().set_age(mark.age()), mark); + if (event.should_commit()) { + post_revocation_event(&event, obj->klass(), &revoke); } - } else { - markOop biased_value = mark; - markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); - markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); - if (res_mark == biased_value) { - return BIAS_REVOKED; - } + assert(!obj->mark().has_bias_pattern(), "bias should be revoked by now"); + return BIAS_REVOKED; } } } - HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); - if (heuristics == HR_NOT_BIASED) { - return NOT_BIASED; - } else if (heuristics == HR_SINGLE_REVOKE) { - Klass *k = obj->klass(); - markOop prototype_header = k->prototype_header(); - if (mark->biased_locker() == THREAD && - prototype_header->bias_epoch() == mark->bias_epoch()) { - // A thread is trying to revoke the bias of an object biased - // toward it, again likely due to an identity hash code - // computation. We can again avoid a safepoint in this case - // since we are only going to walk our own stack. There are no - // races with revocations occurring in other threads because we - // reach no safepoints in the revocation path. - // Also check the epoch because even if threads match, another thread - // can come in with a CAS to steal the bias of an object that has a - // stale epoch. - ResourceMark rm; - log_info(biasedlocking)("Revoking bias by walking my own stack:"); - EventBiasedLockSelfRevocation event; - BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); - ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); - assert(cond == BIAS_REVOKED, "why not?"); - if (event.should_commit()) { - post_self_revocation_event(&event, k); - } - return cond; - } else { - EventBiasedLockRevocation event; - VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); - VMThread::execute(&revoke); - if (event.should_commit() && revoke.status_code() != NOT_BIASED) { - post_revocation_event(&event, k, &revoke); - } - return revoke.status_code(); - } - } - - assert((heuristics == HR_BULK_REVOKE) || - (heuristics == HR_BULK_REBIAS), "?"); - EventBiasedLockClassRevocation event; - VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, - (heuristics == HR_BULK_REBIAS), - attempt_rebias); - VMThread::execute(&bulk_revoke); - if (event.should_commit()) { - post_class_revocation_event(&event, obj->klass(), &bulk_revoke); - } - return bulk_revoke.status_code(); + return NOT_REVOKED; } -void BiasedLocking::revoke(GrowableArray* objs) { - assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); - if (objs->length() == 0) { +// Caller should have instantiated a ResourceMark object before calling this method +void BiasedLocking::walk_stack_and_revoke(oop obj, JavaThread* biased_locker) { + assert(!SafepointSynchronize::is_at_safepoint() || !ThreadLocalHandshakes, + "if ThreadLocalHandshakes is enabled this should always be executed outside safepoints"); + assert(Thread::current() == biased_locker || Thread::current()->is_VM_thread(), "wrong thread"); + + markWord mark = obj->mark(); + assert(mark.biased_locker() == biased_locker && + obj->klass()->prototype_header().bias_epoch() == mark.bias_epoch(), "invariant"); + + log_trace(biasedlocking)("%s(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark " + INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT + ", biaser " INTPTR_FORMAT " %s", + Thread::current()->is_VM_thread() ? "VMThread" : "JavaThread", + p2i(Thread::current()), + p2i(obj), + mark.value(), + obj->klass()->external_name(), + obj->klass()->prototype_header().value(), + p2i(biased_locker), + Thread::current()->is_VM_thread() ? "" : "(walking own stack)"); + + markWord unbiased_prototype = markWord::prototype().set_age(obj->mark().age()); + + GrowableArray* cached_monitor_info = get_or_compute_monitor_info(biased_locker); + BasicLock* highest_lock = NULL; + for (int i = 0; i < cached_monitor_info->length(); i++) { + MonitorInfo* mon_info = cached_monitor_info->at(i); + if (mon_info->owner() == obj) { + log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", + p2i(mon_info->owner()), + p2i(obj)); + // Assume recursive case and fix up highest lock below + markWord mark = markWord::encode((BasicLock*) NULL); + highest_lock = mon_info->lock(); + highest_lock->set_displaced_header(mark); + } else { + log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", + p2i(mon_info->owner()), + p2i(obj)); + } + } + if (highest_lock != NULL) { + // Fix up highest lock to contain displaced header and point + // object at it + highest_lock->set_displaced_header(unbiased_prototype); + // Reset object header to point to displaced mark. + // Must release store the lock address for platforms without TSO + // ordering (e.g. ppc). + obj->release_set_mark(markWord::encode(highest_lock)); + assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit"); + log_info(biasedlocking)(" Revoked bias of currently-locked object"); + } else { + log_info(biasedlocking)(" Revoked bias of currently-unlocked object"); + // Store the unlocked value into the object's header. + obj->set_mark(unbiased_prototype); + } + + assert(!obj->mark().has_bias_pattern(), "must not be biased"); +} + +void BiasedLocking::revoke_own_lock(Handle obj, TRAPS) { + assert(THREAD->is_Java_thread(), "must be called by a JavaThread"); + JavaThread* thread = (JavaThread*)THREAD; + + markWord mark = obj->mark(); + + if (!mark.has_bias_pattern()) { return; } - VM_RevokeBias revoke(objs, JavaThread::current()); - VMThread::execute(&revoke); + + Klass *k = obj->klass(); + assert(mark.biased_locker() == thread && + k->prototype_header().bias_epoch() == mark.bias_epoch(), "Revoke failed, unhandled biased lock state"); + ResourceMark rm; + log_info(biasedlocking)("Revoking bias by walking my own stack:"); + EventBiasedLockSelfRevocation event; + BiasedLocking::walk_stack_and_revoke(obj(), (JavaThread*) thread); + thread->set_cached_monitor_info(NULL); + assert(!obj->mark().has_bias_pattern(), "invariant"); + if (event.should_commit()) { + post_self_revocation_event(&event, k); + } +} + +void BiasedLocking::revoke(Handle obj, TRAPS) { + assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); + + while (true) { + // We can revoke the biases of anonymously-biased objects + // efficiently enough that we should not cause these revocations to + // update the heuristics because doing so may cause unwanted bulk + // revocations (which are expensive) to occur. + markWord mark = obj->mark(); + + if (!mark.has_bias_pattern()) { + return; + } + + if (mark.is_biased_anonymously()) { + // We are probably trying to revoke the bias of this object due to + // an identity hash code computation. Try to revoke the bias + // without a safepoint. This is possible if we can successfully + // compare-and-exchange an unbiased header into the mark word of + // the object, meaning that no other thread has raced to acquire + // the bias of the object. + markWord biased_value = mark; + markWord unbiased_prototype = markWord::prototype().set_age(mark.age()); + markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark); + if (res_mark == biased_value) { + return; + } + mark = res_mark; // Refresh mark with the latest value. + } else { + Klass* k = obj->klass(); + markWord prototype_header = k->prototype_header(); + if (!prototype_header.has_bias_pattern()) { + // This object has a stale bias from before the bulk revocation + // for this data type occurred. It's pointless to update the + // heuristics at this point so simply update the header with a + // CAS. If we fail this race, the object's bias has been revoked + // by another thread so we simply return and let the caller deal + // with it. + obj->cas_set_mark(prototype_header.set_age(mark.age()), mark); + assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked"); + return; + } else if (prototype_header.bias_epoch() != mark.bias_epoch()) { + // The epoch of this biasing has expired indicating that the + // object is effectively unbiased. We can revoke the bias of this + // object efficiently enough with a CAS that we shouldn't update the + // heuristics. This is normally done in the assembly code but we + // can reach this point due to various points in the runtime + // needing to revoke biases. + markWord res_mark; + markWord biased_value = mark; + markWord unbiased_prototype = markWord::prototype().set_age(mark.age()); + res_mark = obj->cas_set_mark(unbiased_prototype, mark); + if (res_mark == biased_value) { + return; + } + mark = res_mark; // Refresh mark with the latest value. + } + } + + HeuristicsResult heuristics = update_heuristics(obj()); + if (heuristics == HR_NOT_BIASED) { + return; + } else if (heuristics == HR_SINGLE_REVOKE) { + JavaThread *blt = mark.biased_locker(); + assert(blt != NULL, "invariant"); + if (blt == THREAD) { + // A thread is trying to revoke the bias of an object biased + // toward it, again likely due to an identity hash code + // computation. We can again avoid a safepoint/handshake in this case + // since we are only going to walk our own stack. There are no + // races with revocations occurring in other threads because we + // reach no safepoints in the revocation path. + EventBiasedLockSelfRevocation event; + ResourceMark rm; + walk_stack_and_revoke(obj(), blt); + blt->set_cached_monitor_info(NULL); + assert(!obj->mark().has_bias_pattern(), "invariant"); + if (event.should_commit()) { + post_self_revocation_event(&event, obj->klass()); + } + return; + } else { + BiasedLocking::Condition cond = single_revoke_with_handshake(obj, (JavaThread*)THREAD, blt); + if (cond != NOT_REVOKED) { + return; + } + } + } else { + assert((heuristics == HR_BULK_REVOKE) || + (heuristics == HR_BULK_REBIAS), "?"); + EventBiasedLockClassRevocation event; + VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*)THREAD, + (heuristics == HR_BULK_REBIAS)); + VMThread::execute(&bulk_revoke); + if (event.should_commit()) { + post_class_revocation_event(&event, obj->klass(), &bulk_revoke); + } + return; + } + } +} + +// All objects in objs should be locked by biaser +void BiasedLocking::revoke(GrowableArray* objs, JavaThread *biaser) { + bool clean_my_cache = false; + for (int i = 0; i < objs->length(); i++) { + oop obj = (objs->at(i))(); + markWord mark = obj->mark(); + if (mark.has_bias_pattern()) { + walk_stack_and_revoke(obj, biaser); + clean_my_cache = true; + } + } + if (clean_my_cache) { + clean_up_cached_monitor_info(biaser); + } } void BiasedLocking::revoke_at_safepoint(Handle h_obj) { assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); oop obj = h_obj(); - HeuristicsResult heuristics = update_heuristics(obj, false); + HeuristicsResult heuristics = update_heuristics(obj); if (heuristics == HR_SINGLE_REVOKE) { - revoke_bias(obj, false, false, NULL, NULL); + JavaThread* biased_locker = NULL; + single_revoke_at_safepoint(obj, false, NULL, &biased_locker); + if (biased_locker) { + clean_up_cached_monitor_info(biased_locker); + } } else if ((heuristics == HR_BULK_REBIAS) || (heuristics == HR_BULK_REVOKE)) { - bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); + bulk_revoke_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), NULL); + clean_up_cached_monitor_info(); } - clean_up_cached_monitor_info(); -} - - -void BiasedLocking::revoke_at_safepoint(GrowableArray* objs) { - assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); - int len = objs->length(); - for (int i = 0; i < len; i++) { - oop obj = (objs->at(i))(); - HeuristicsResult heuristics = update_heuristics(obj, false); - if (heuristics == HR_SINGLE_REVOKE) { - revoke_bias(obj, false, false, NULL, NULL); - } else if ((heuristics == HR_BULK_REBIAS) || - (heuristics == HR_BULK_REVOKE)) { - bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); - } - } - clean_up_cached_monitor_info(); } @@ -824,7 +905,7 @@ // monitors in a prepass and, if they are biased, preserve their // mark words here. This should be a relatively small set of objects // especially compared to the number of objects in the heap. - _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(10, true); + _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(10, true); _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(10, true); ResourceMark rm; @@ -842,8 +923,8 @@ if (mon_info->owner_is_scalar_replaced()) continue; oop owner = mon_info->owner(); if (owner != NULL) { - markOop mark = owner->mark(); - if (mark->has_bias_pattern()) { + markWord mark = owner->mark(); + if (mark.has_bias_pattern()) { _preserved_oop_stack->push(Handle(cur, owner)); _preserved_mark_stack->push(mark); } @@ -866,7 +947,7 @@ int len = _preserved_oop_stack->length(); for (int i = 0; i < len; i++) { Handle owner = _preserved_oop_stack->at(i); - markOop mark = _preserved_mark_stack->at(i); + markWord mark = _preserved_mark_stack->at(i); owner->set_mark(mark); } @@ -882,6 +963,7 @@ int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } +int* BiasedLocking::handshakes_count_addr() { return _counters.handshakes_count_addr(); } int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } @@ -905,6 +987,7 @@ tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); + tty->print_cr("# handshakes entries: %d", _handshakes_count); tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); }