8226699: [BACKOUT] JDK-8221734 Deoptimize with handshakes
authordcubed
Mon, 24 Jun 2019 22:38:17 -0400
changeset 55479 80b27dc96ca3
parent 55478 ae2e53e379cb
child 55488 d3e45bd166dc
child 57424 a327727090c7
8226699: [BACKOUT] JDK-8221734 Deoptimize with handshakes Reviewed-by: dholmes, rehn, dlong
src/hotspot/share/aot/aotCodeHeap.cpp
src/hotspot/share/aot/aotCompiledMethod.cpp
src/hotspot/share/aot/aotCompiledMethod.hpp
src/hotspot/share/code/codeCache.cpp
src/hotspot/share/code/compiledMethod.hpp
src/hotspot/share/code/nmethod.cpp
src/hotspot/share/code/nmethod.hpp
src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
src/hotspot/share/gc/z/zNMethod.cpp
src/hotspot/share/jvmci/jvmciEnv.cpp
src/hotspot/share/oops/method.cpp
src/hotspot/share/oops/method.hpp
src/hotspot/share/prims/jvmtiEventController.cpp
src/hotspot/share/prims/methodHandles.cpp
src/hotspot/share/prims/whitebox.cpp
src/hotspot/share/runtime/biasedLocking.cpp
src/hotspot/share/runtime/biasedLocking.hpp
src/hotspot/share/runtime/deoptimization.cpp
src/hotspot/share/runtime/deoptimization.hpp
src/hotspot/share/runtime/mutex.hpp
src/hotspot/share/runtime/mutexLocker.cpp
src/hotspot/share/runtime/mutexLocker.hpp
src/hotspot/share/runtime/thread.cpp
src/hotspot/share/runtime/thread.hpp
src/hotspot/share/runtime/vmOperations.cpp
src/hotspot/share/runtime/vmOperations.hpp
src/hotspot/share/services/dtraceAttacher.cpp
test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationAllTest.java
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -38,7 +38,6 @@
 #include "memory/universe.hpp"
 #include "oops/compressedOops.hpp"
 #include "oops/method.inline.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointVerifiers.hpp"
@@ -734,7 +733,8 @@
     }
   }
   if (marked > 0) {
-    Deoptimization::deoptimize_all_marked();
+    VM_Deoptimize op;
+    VMThread::execute(&op);
   }
 }
 
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -165,7 +165,7 @@
 
   {
     // Enter critical section.  Does not block for safepoint.
-    MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 
     if (*_state_adr == new_state) {
       // another thread already performed this transition so nothing
@@ -188,10 +188,12 @@
 #endif
 
     // Remove AOTCompiledMethod from method.
-    if (method() != NULL) {
-      method()->unlink_code(this);
+    if (method() != NULL && (method()->code() == this ||
+                             method()->from_compiled_entry() == verified_entry_point())) {
+      HandleMark hm;
+      method()->clear_code(false /* already owns Patching_lock */);
     }
-  } // leave critical region under CompiledMethod_lock
+  } // leave critical region under Patching_lock
 
 
   if (TraceCreateZombies) {
@@ -214,7 +216,7 @@
 
   {
     // Enter critical section.  Does not block for safepoint.
-    MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 
     if (*_state_adr == in_use) {
       // another thread already performed this transition so nothing
@@ -228,7 +230,7 @@
 
     // Log the transition once
     log_state_change();
-  } // leave critical region under CompiledMethod_lock
+  } // leave critical region under Patching_lock
 
 
   if (TraceCreateZombies) {
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -176,7 +176,6 @@
                                                  state() == not_used; }
   virtual bool is_alive() const { return _is_alive(); }
   virtual bool is_in_use() const { return state() == in_use; }
-  virtual bool is_not_installed() const { return state() == not_installed; }
 
   virtual bool is_unloading() { return false; }
 
--- a/src/hotspot/share/code/codeCache.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/code/codeCache.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -1142,25 +1142,28 @@
 
   // At least one nmethod has been marked for deoptimization
 
-  Deoptimization::deoptimize_all_marked();
+  // All this already happens inside a VM_Operation, so we'll do all the work here.
+  // Stuff copied from VM_Deoptimize and modified slightly.
+
+  // We do not want any GCs to happen while we are in the middle of this VM operation
+  ResourceMark rm;
+  DeoptimizationMarker dm;
+
+  // Deoptimize all activations depending on marked nmethods
+  Deoptimization::deoptimize_dependents();
+
+  // Make the dependent methods not entrant
+  make_marked_nmethods_not_entrant();
 }
 #endif // INCLUDE_JVMTI
 
-// Mark methods for deopt (if safe or possible).
+// Deoptimize all methods
 void CodeCache::mark_all_nmethods_for_deoptimization() {
   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
   while(iter.next()) {
     CompiledMethod* nm = iter.method();
-    if (!nm->method()->is_method_handle_intrinsic() &&
-        !nm->is_not_installed() &&
-        nm->is_in_use() &&
-        !nm->is_native_method()) {
-      // Intrinsics and native methods are never deopted. A method that is
-      // not installed yet or is not in use is not safe to deopt; the
-      // is_in_use() check covers the not_entrant and not zombie cases.
-      // Note: A not_entrant method can become a zombie at anytime if it was
-      // made not_entrant before the previous safepoint/handshake.
+    if (!nm->method()->is_method_handle_intrinsic()) {
       nm->mark_for_deoptimization();
     }
   }
@@ -1188,12 +1191,7 @@
   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
   while(iter.next()) {
     CompiledMethod* nm = iter.method();
-    if (nm->is_marked_for_deoptimization() && nm->is_in_use()) {
-      // only_alive_and_not_unloading() can return not_entrant nmethods.
-      // A not_entrant method can become a zombie at anytime if it was
-      // made not_entrant before the previous safepoint/handshake. The
-      // is_in_use() check covers the not_entrant and not zombie cases
-      // that have become true after the method was marked for deopt.
+    if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) {
       nm->make_not_entrant();
     }
   }
@@ -1205,12 +1203,17 @@
 
   if (number_of_nmethods_with_dependencies() == 0) return;
 
+  // CodeCache can only be updated by a thread_in_VM and they will all be
+  // stopped during the safepoint so CodeCache will be safe to update without
+  // holding the CodeCache_lock.
+
   KlassDepChange changes(dependee);
 
   // Compute the dependent nmethods
   if (mark_for_deoptimization(changes) > 0) {
     // At least one nmethod has been marked for deoptimization
-    Deoptimization::deoptimize_all_marked();
+    VM_Deoptimize op;
+    VMThread::execute(&op);
   }
 }
 
@@ -1219,9 +1222,26 @@
   // --- Compile_lock is not held. However we are at a safepoint.
   assert_locked_or_safepoint(Compile_lock);
 
+  // CodeCache can only be updated by a thread_in_VM and they will all be
+  // stopped dring the safepoint so CodeCache will be safe to update without
+  // holding the CodeCache_lock.
+
   // Compute the dependent nmethods
   if (mark_for_deoptimization(m_h()) > 0) {
-    Deoptimization::deoptimize_all_marked();
+    // At least one nmethod has been marked for deoptimization
+
+    // All this already happens inside a VM_Operation, so we'll do all the work here.
+    // Stuff copied from VM_Deoptimize and modified slightly.
+
+    // We do not want any GCs to happen while we are in the middle of this VM operation
+    ResourceMark rm;
+    DeoptimizationMarker dm;
+
+    // Deoptimize all activations depending on marked nmethods
+    Deoptimization::deoptimize_dependents();
+
+    // Make the dependent methods not entrant
+    make_marked_nmethods_not_entrant();
   }
 }
 
--- a/src/hotspot/share/code/compiledMethod.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/code/compiledMethod.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -214,7 +214,6 @@
   };
 
   virtual bool  is_in_use() const = 0;
-  virtual bool  is_not_installed() const = 0;
   virtual int   comp_level() const = 0;
   virtual int   compile_id() const = 0;
 
--- a/src/hotspot/share/code/nmethod.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/code/nmethod.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -50,7 +50,6 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
@@ -1178,7 +1177,11 @@
   // have the Method* live here, in case we unload the nmethod because
   // it is pointing to some oop (other than the Method*) being unloaded.
   if (_method != NULL) {
-    _method->unlink_code(this);
+    // OSR methods point to the Method*, but the Method* does not
+    // point back!
+    if (_method->code() == this) {
+      _method->clear_code(); // Break a cycle
+    }
   }
 
   // Make the class unloaded - i.e., change state and notify sweeper
@@ -1260,9 +1263,16 @@
   }
 }
 
-void nmethod::unlink_from_method() {
-  if (method() != NULL) {
-    method()->unlink_code(this);
+void nmethod::unlink_from_method(bool acquire_lock) {
+  // We need to check if both the _code and _from_compiled_code_entry_point
+  // refer to this nmethod because there is a race in setting these two fields
+  // in Method* as seen in bugid 4947125.
+  // If the vep() points to the zombie nmethod, the memory for the nmethod
+  // could be flushed and the compiler and vtable stubs could still call
+  // through it.
+  if (method() != NULL && (method()->code() == this ||
+                           method()->from_compiled_entry() == verified_entry_point())) {
+    method()->clear_code(acquire_lock);
   }
 }
 
@@ -1289,24 +1299,24 @@
 
   // during patching, depending on the nmethod state we must notify the GC that
   // code has been unloaded, unregistering it. We cannot do this right while
-  // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
+  // holding the Patching_lock because we need to use the CodeCache_lock. This
   // would be prone to deadlocks.
   // This flag is used to remember whether we need to later lock and unregister.
   bool nmethod_needs_unregister = false;
 
-  // invalidate osr nmethod before acquiring the patching lock since
-  // they both acquire leaf locks and we don't want a deadlock.
-  // This logic is equivalent to the logic below for patching the
-  // verified entry point of regular methods. We check that the
-  // nmethod is in use to ensure that it is invalidated only once.
-  if (is_osr_method() && is_in_use()) {
-    // this effectively makes the osr nmethod not entrant
-    invalidate_osr_method();
-  }
-
   {
+    // invalidate osr nmethod before acquiring the patching lock since
+    // they both acquire leaf locks and we don't want a deadlock.
+    // This logic is equivalent to the logic below for patching the
+    // verified entry point of regular methods. We check that the
+    // nmethod is in use to ensure that it is invalidated only once.
+    if (is_osr_method() && is_in_use()) {
+      // this effectively makes the osr nmethod not entrant
+      invalidate_osr_method();
+    }
+
     // Enter critical section.  Does not block for safepoint.
-    MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 
     if (_state == state) {
       // another thread already performed this transition so nothing
@@ -1350,9 +1360,8 @@
     log_state_change();
 
     // Remove nmethod from method.
-    unlink_from_method();
-
-  } // leave critical region under CompiledMethod_lock
+    unlink_from_method(false /* already owns Patching_lock */);
+  } // leave critical region under Patching_lock
 
 #if INCLUDE_JVMCI
   // Invalidate can't occur while holding the Patching lock
--- a/src/hotspot/share/code/nmethod.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/code/nmethod.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -119,7 +119,7 @@
   // used by jvmti to track if an unload event has been posted for this nmethod.
   bool _unload_reported;
 
-  // Protected by CompiledMethod_lock
+  // Protected by Patching_lock
   volatile signed char _state;               // {not_installed, in_use, not_entrant, zombie, unloaded}
 
 #ifdef ASSERT
@@ -387,7 +387,7 @@
 
   int   comp_level() const                        { return _comp_level; }
 
-  void unlink_from_method();
+  void unlink_from_method(bool acquire_lock);
 
   // Support for oops in scopes and relocs:
   // Note: index 0 is reserved for null.
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
     // We don't need to take the lock when unlinking nmethods from
     // the Method, because it is only concurrently unlinked by
     // the entry barrier, which acquires the per nmethod lock.
-    nm->unlink_from_method();
+    nm->unlink_from_method(false /* acquire_lock */);
 
     // We can end up calling nmethods that are unloading
     // since we clear compiled ICs lazily. Returning false
--- a/src/hotspot/share/gc/z/zNMethod.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/gc/z/zNMethod.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -285,7 +285,7 @@
       // We don't need to take the lock when unlinking nmethods from
       // the Method, because it is only concurrently unlinked by
       // the entry barrier, which acquires the per nmethod lock.
-      nm->unlink_from_method();
+      nm->unlink_from_method(false /* acquire_lock */);
       return;
     }
 
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -31,7 +31,6 @@
 #include "memory/universe.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/typeArrayOop.inline.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "jvmci/jniAccessMark.inline.hpp"
@@ -1493,7 +1492,8 @@
     // Invalidating the HotSpotNmethod means we want the nmethod
     // to be deoptimized.
     nm->mark_for_deoptimization();
-    Deoptimization::deoptimize_all_marked();
+    VM_Deoptimize op;
+    VMThread::execute(&op);
   }
 
   // A HotSpotNmethod instance can only reference a single nmethod
--- a/src/hotspot/share/oops/method.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/oops/method.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -103,7 +103,7 @@
   // Fix and bury in Method*
   set_interpreter_entry(NULL); // sets i2i entry and from_int
   set_adapter_entry(NULL);
-  Method::clear_code(); // from_c/from_i get set to c2i/i2i
+  clear_code(false /* don't need a lock */); // from_c/from_i get set to c2i/i2i
 
   if (access_flags.is_native()) {
     clear_native_function();
@@ -819,7 +819,7 @@
   set_native_function(
     SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
     !native_bind_event_is_interesting);
-  this->unlink_code();
+  clear_code();
 }
 
 address Method::critical_native_function() {
@@ -943,7 +943,8 @@
 }
 
 // Revert to using the interpreter and clear out the nmethod
-void Method::clear_code() {
+void Method::clear_code(bool acquire_lock /* = true */) {
+  MutexLocker pl(acquire_lock ? Patching_lock : NULL, Mutex::_no_safepoint_check_flag);
   // this may be NULL if c2i adapters have not been made yet
   // Only should happen at allocate time.
   if (adapter() == NULL) {
@@ -957,25 +958,6 @@
   _code = NULL;
 }
 
-void Method::unlink_code(CompiledMethod *compare) {
-  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
-  // We need to check if either the _code or _from_compiled_code_entry_point
-  // refer to this nmethod because there is a race in setting these two fields
-  // in Method* as seen in bugid 4947125.
-  // If the vep() points to the zombie nmethod, the memory for the nmethod
-  // could be flushed and the compiler and vtable stubs could still call
-  // through it.
-  if (code() == compare ||
-      from_compiled_entry() == compare->verified_entry_point()) {
-    clear_code();
-  }
-}
-
-void Method::unlink_code() {
-  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
-  clear_code();
-}
-
 #if INCLUDE_CDS
 // Called by class data sharing to remove any entry points (which are not shared)
 void Method::unlink_method() {
@@ -1202,7 +1184,7 @@
 
 // Install compiled code.  Instantly it can execute.
 void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
-  MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+  MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
   assert( code, "use clear_code to remove code" );
   assert( mh->check_code(), "" );
 
--- a/src/hotspot/share/oops/method.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/oops/method.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -463,17 +463,7 @@
   address verified_code_entry();
   bool check_code() const;      // Not inline to avoid circular ref
   CompiledMethod* volatile code() const;
-
-  // Locks CompiledMethod_lock if not held.
-  void unlink_code(CompiledMethod *compare);
-  // Locks CompiledMethod_lock if not held.
-  void unlink_code();
-
-private:
-  // Either called with CompiledMethod_lock held or from constructor.
-  void clear_code();
-
-public:
+  void clear_code(bool acquire_lock = true);    // Clear out any compiled code
   static void set_code(const methodHandle& mh, CompiledMethod* code);
   void set_adapter_entry(AdapterHandlerEntry* adapter) {
     constMethod()->set_adapter_entry(adapter);
--- a/src/hotspot/share/prims/jvmtiEventController.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiEventController.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -32,7 +32,6 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "prims/jvmtiThreadState.inline.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
@@ -240,7 +239,8 @@
       }
     }
     if (num_marked > 0) {
-      Deoptimization::deoptimize_all_marked();
+      VM_Deoptimize op;
+      VMThread::execute(&op);
     }
   }
 }
--- a/src/hotspot/share/prims/methodHandles.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/prims/methodHandles.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -42,7 +42,6 @@
 #include "oops/typeArrayOop.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/compilationPolicy.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
@@ -1110,7 +1109,8 @@
   }
   if (marked > 0) {
     // At least one nmethod has been marked for deoptimization.
-    Deoptimization::deoptimize_all_marked();
+    VM_Deoptimize op;
+    VMThread::execute(&op);
   }
 }
 
@@ -1506,7 +1506,8 @@
     }
     if (marked > 0) {
       // At least one nmethod has been marked for deoptimization
-      Deoptimization::deoptimize_all_marked();
+      VM_Deoptimize op;
+      VMThread::execute(&op);
     }
   }
 }
--- a/src/hotspot/share/prims/whitebox.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/prims/whitebox.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -822,8 +822,10 @@
 WB_END
 
 WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
+  MutexLocker mu(Compile_lock);
   CodeCache::mark_all_nmethods_for_deoptimization();
-  Deoptimization::deoptimize_all_marked();
+  VM_Deoptimize op;
+  VMThread::execute(&op);
 WB_END
 
 WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
@@ -840,7 +842,8 @@
   }
   result += CodeCache::mark_for_deoptimization(mh());
   if (result > 0) {
-    Deoptimization::deoptimize_all_marked();
+    VM_Deoptimize op;
+    VMThread::execute(&op);
   }
   return result;
 WB_END
--- a/src/hotspot/share/runtime/biasedLocking.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/biasedLocking.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -628,29 +628,6 @@
   event->commit();
 }
 
-BiasedLocking::Condition BiasedLocking::revoke_own_locks_in_handshake(Handle obj, TRAPS) {
-  markOop mark = obj->mark();
-
-  if (!mark->has_bias_pattern()) {
-    return NOT_BIASED;
-  }
-
-  Klass *k = obj->klass();
-  markOop prototype_header = k->prototype_header();
-  assert(mark->biased_locker() == THREAD &&
-         prototype_header->bias_epoch() == mark->bias_epoch(), "Revoke failed, unhandled biased lock state");
-  ResourceMark rm;
-  log_info(biasedlocking)("Revoking bias by walking my own stack:");
-  EventBiasedLockSelfRevocation event;
-  BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
-  ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
-  assert(cond == BIAS_REVOKED, "why not?");
-  if (event.should_commit()) {
-    post_self_revocation_event(&event, k);
-  }
-  return cond;
-}
-
 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
 
--- a/src/hotspot/share/runtime/biasedLocking.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/biasedLocking.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -175,7 +175,6 @@
 
   // This should be called by JavaThreads to revoke the bias of an object
   static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS);
-  static Condition revoke_own_locks_in_handshake(Handle obj, TRAPS);
 
   // These do not allow rebiasing; they are used by deoptimization to
   // ensure that monitors on the stack can be migrated
--- a/src/hotspot/share/runtime/deoptimization.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -779,35 +779,10 @@
   return bt;
 JRT_END
 
-class DeoptimizeMarkedTC : public ThreadClosure {
-  bool _in_handshake;
- public:
-  DeoptimizeMarkedTC(bool in_handshake) : _in_handshake(in_handshake) {}
-  virtual void do_thread(Thread* thread) {
-    assert(thread->is_Java_thread(), "must be");
-    JavaThread* jt = (JavaThread*)thread;
-    jt->deoptimize_marked_methods(_in_handshake);
-  }
-};
 
-void Deoptimization::deoptimize_all_marked() {
-  ResourceMark rm;
-  DeoptimizationMarker dm;
-
-  if (SafepointSynchronize::is_at_safepoint()) {
-    DeoptimizeMarkedTC deopt(false);
-    // Make the dependent methods not entrant
-    CodeCache::make_marked_nmethods_not_entrant();
-    Threads::java_threads_do(&deopt);
-  } else {
-    // Make the dependent methods not entrant
-    {
-      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      CodeCache::make_marked_nmethods_not_entrant();
-    }
-    DeoptimizeMarkedTC deopt(true);
-    Handshake::execute(&deopt);
-  }
+int Deoptimization::deoptimize_dependents() {
+  Threads::deoptimized_wrt_marked_nmethods();
+  return 0;
 }
 
 Deoptimization::DeoptAction Deoptimization::_unloaded_action
@@ -1412,7 +1387,14 @@
   }
 }
 
-static void get_monitors_from_stack(GrowableArray<Handle>* objects_to_revoke, JavaThread* thread, frame fr, RegisterMap* map) {
+
+void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
+  if (!UseBiasedLocking) {
+    return;
+  }
+
+  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
+
   // Unfortunately we don't have a RegisterMap available in most of
   // the places we want to call this routine so we need to walk the
   // stack again to update the register map.
@@ -1436,14 +1418,6 @@
     cvf = compiledVFrame::cast(cvf->sender());
   }
   collect_monitors(cvf, objects_to_revoke);
-}
-
-void Deoptimization::revoke_using_safepoint(JavaThread* thread, frame fr, RegisterMap* map) {
-  if (!UseBiasedLocking) {
-    return;
-  }
-  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
-  get_monitors_from_stack(objects_to_revoke, thread, fr, map);
 
   if (SafepointSynchronize::is_at_safepoint()) {
     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
@@ -1452,21 +1426,6 @@
   }
 }
 
-void Deoptimization::revoke_using_handshake(JavaThread* thread, frame fr, RegisterMap* map) {
-  if (!UseBiasedLocking) {
-    return;
-  }
-  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
-  get_monitors_from_stack(objects_to_revoke, thread, fr, map);
-
-  int len = objects_to_revoke->length();
-  for (int i = 0; i < len; i++) {
-    oop obj = (objects_to_revoke->at(i))();
-    BiasedLocking::revoke_own_locks_in_handshake(objects_to_revoke->at(i), thread);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-}
-
 
 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
   assert(fr.can_be_deoptimized(), "checking frame type");
@@ -1495,16 +1454,11 @@
   fr.deoptimize(thread);
 }
 
-void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, bool in_handshake) {
-  deopt_thread(in_handshake, thread, fr, map, Reason_constraint);
+void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
+  deoptimize(thread, fr, map, Reason_constraint);
 }
 
 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
-  deopt_thread(false, thread, fr, map, reason);
-}
-
-void Deoptimization::deopt_thread(bool in_handshake, JavaThread* thread,
-                                  frame fr, RegisterMap *map, DeoptReason reason) {
   // Deoptimize only if the frame comes from compile code.
   // Do not deoptimize the frame which is already patched
   // during the execution of the loops below.
@@ -1514,11 +1468,7 @@
   ResourceMark rm;
   DeoptimizationMarker dm;
   if (UseBiasedLocking) {
-    if (in_handshake) {
-      revoke_using_handshake(thread, fr, map);
-    } else {
-      revoke_using_safepoint(thread, fr, map);
-    }
+    revoke_biases_of_monitors(thread, fr, map);
   }
   deoptimize_single_frame(thread, fr, reason);
 
--- a/src/hotspot/share/runtime/deoptimization.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/deoptimization.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -137,19 +137,12 @@
     Unpack_LIMIT                = 4
   };
 
-  static void deoptimize_all_marked();
-
- private:
   // Checks all compiled methods. Invalid methods are deleted and
   // corresponding activations are deoptimized.
   static int deoptimize_dependents();
-  static void revoke_using_handshake(JavaThread* thread, frame fr, RegisterMap* map);
-  static void revoke_using_safepoint(JavaThread* thread, frame fr, RegisterMap* map);
-  static void deopt_thread(bool in_handshake, JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason);
 
- public:
   // Deoptimizes a frame lazily. nmethod gets patched deopt happens on return to the frame
-  static void deoptimize(JavaThread* thread, frame fr, RegisterMap *map, bool in_handshake = false);
+  static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
   static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason);
 
 #if INCLUDE_JVMCI
@@ -163,9 +156,7 @@
 
   // Helper function to revoke biases of all monitors in frame if UseBiasedLocking
   // is enabled
-  static void revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
-    revoke_using_safepoint(thread, fr, map);
-  }
+  static void revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map);
 
 #if COMPILER2_OR_JVMCI
 JVMCI_ONLY(public:)
--- a/src/hotspot/share/runtime/mutex.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/mutex.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -62,7 +62,7 @@
        event,
        access         = event          +   1,
        tty            = access         +   2,
-       special        = tty            +   2,
+       special        = tty            +   1,
        suspend_resume = special        +   1,
        vmweak         = suspend_resume +   2,
        leaf           = vmweak         +   2,
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -39,7 +39,6 @@
 // Consider using GCC's __read_mostly.
 
 Mutex*   Patching_lock                = NULL;
-Mutex*   CompiledMethod_lock          = NULL;
 Monitor* SystemDictionary_lock        = NULL;
 Mutex*   ProtectionDomainSet_lock     = NULL;
 Mutex*   SharedDictionary_lock        = NULL;
@@ -262,8 +261,6 @@
   def(ClassLoaderDataGraph_lock    , PaddedMutex  , nonleaf,     true,  Monitor::_safepoint_check_always);
 
   def(Patching_lock                , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);      // used for safepointing and code patching.
-  def(OsrList_lock                 , PaddedMutex  , special-1,   true,  Monitor::_safepoint_check_never);
-  def(CompiledMethod_lock          , PaddedMutex  , special-1,   true,  Monitor::_safepoint_check_never);
   def(Service_lock                 , PaddedMonitor, special,     true,  Monitor::_safepoint_check_never);      // used for service thread operations
   def(JmethodIdCreation_lock       , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always); // used for creating jmethodIDs.
 
@@ -279,6 +276,7 @@
   def(SymbolArena_lock             , PaddedMutex  , leaf+2,      true,  Monitor::_safepoint_check_never);
   def(ProfilePrint_lock            , PaddedMutex  , leaf,        false, Monitor::_safepoint_check_always); // serial profile printing
   def(ExceptionCache_lock          , PaddedMutex  , leaf,        false, Monitor::_safepoint_check_always); // serial profile printing
+  def(OsrList_lock                 , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
   def(Debug1_lock                  , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 #ifndef PRODUCT
   def(FullGCALot_lock              , PaddedMutex  , leaf,        false, Monitor::_safepoint_check_always); // a lock to make FullGCALot MT safe
--- a/src/hotspot/share/runtime/mutexLocker.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/mutexLocker.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -32,7 +32,6 @@
 // Mutexes used in the VM.
 
 extern Mutex*   Patching_lock;                   // a lock used to guard code patching of compiled code
-extern Mutex*   CompiledMethod_lock;             // a lock used to guard a compiled method
 extern Monitor* SystemDictionary_lock;           // a lock on the system dictionary
 extern Mutex*   ProtectionDomainSet_lock;        // a lock on the pd_set list in the system dictionary
 extern Mutex*   SharedDictionary_lock;           // a lock on the CDS shared dictionary
--- a/src/hotspot/share/runtime/thread.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/thread.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -2903,17 +2903,18 @@
 #endif // PRODUCT
 
 
-void JavaThread::deoptimize_marked_methods(bool in_handshake) {
+void JavaThread::deoptimized_wrt_marked_nmethods() {
   if (!has_last_Java_frame()) return;
   // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
   StackFrameStream fst(this, UseBiasedLocking);
   for (; !fst.is_done(); fst.next()) {
     if (fst.current()->should_be_deoptimized()) {
-      Deoptimization::deoptimize(this, *fst.current(), fst.register_map(), in_handshake);
+      Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
     }
   }
 }
 
+
 // If the caller is a NamedThread, then remember, in the current scope,
 // the given JavaThread in its _processed_thread field.
 class RememberProcessedThread: public StackObj {
@@ -4652,6 +4653,13 @@
   threads_do(&handles_closure);
 }
 
+void Threads::deoptimized_wrt_marked_nmethods() {
+  ALL_JAVA_THREADS(p) {
+    p->deoptimized_wrt_marked_nmethods();
+  }
+}
+
+
 // Get count Java threads that are waiting to enter the specified monitor.
 GrowableArray<JavaThread*>* Threads::get_pending_threads(ThreadsList * t_list,
                                                          int count,
--- a/src/hotspot/share/runtime/thread.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/thread.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -1923,7 +1923,7 @@
   void deoptimize();
   void make_zombies();
 
-  void deoptimize_marked_methods(bool in_handshake);
+  void deoptimized_wrt_marked_nmethods();
 
  public:
   // Returns the running thread as a JavaThread
--- a/src/hotspot/share/runtime/vmOperations.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/vmOperations.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -118,6 +118,18 @@
   }
 }
 
+void VM_Deoptimize::doit() {
+  // We do not want any GCs to happen while we are in the middle of this VM operation
+  ResourceMark rm;
+  DeoptimizationMarker dm;
+
+  // Deoptimize all activations depending on marked nmethods
+  Deoptimization::deoptimize_dependents();
+
+  // Make the dependent methods not entrant
+  CodeCache::make_marked_nmethods_not_entrant();
+}
+
 void VM_MarkActiveNMethods::doit() {
   NMethodSweeper::mark_active_nmethods();
 }
--- a/src/hotspot/share/runtime/vmOperations.hpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/runtime/vmOperations.hpp	Mon Jun 24 22:38:17 2019 -0400
@@ -49,6 +49,7 @@
   template(ClearICs)                              \
   template(ForceSafepoint)                        \
   template(ForceAsyncSafepoint)                   \
+  template(Deoptimize)                            \
   template(DeoptimizeFrame)                       \
   template(DeoptimizeAll)                         \
   template(ZombieAll)                             \
@@ -318,6 +319,14 @@
   VM_GTestExecuteAtSafepoint() {}
 };
 
+class VM_Deoptimize: public VM_Operation {
+ public:
+  VM_Deoptimize() {}
+  VMOp_Type type() const                        { return VMOp_Deoptimize; }
+  void doit();
+  bool allow_nested_vm_operations() const        { return true; }
+};
+
 class VM_MarkActiveNMethods: public VM_Operation {
  public:
   VM_MarkActiveNMethods() {}
--- a/src/hotspot/share/services/dtraceAttacher.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/services/dtraceAttacher.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,23 @@
 
 #ifdef SOLARIS
 
+class VM_DeoptimizeTheWorld : public VM_Operation {
+ public:
+  VMOp_Type type() const {
+    return VMOp_DeoptimizeTheWorld;
+  }
+  void doit() {
+    CodeCache::mark_all_nmethods_for_deoptimization();
+    ResourceMark rm;
+    DeoptimizationMarker dm;
+    // Deoptimize all activations depending on marked methods
+    Deoptimization::deoptimize_dependents();
+
+    // Mark the dependent methods non entrant
+    CodeCache::make_marked_nmethods_not_entrant();
+  }
+};
+
 static void set_bool_flag(const char* flag, bool value) {
   JVMFlag::boolAtPut((char*)flag, strlen(flag), &value,
                               JVMFlag::ATTACH_ON_DEMAND);
@@ -57,8 +74,8 @@
 
   if (changed) {
     // one or more flags changed, need to deoptimize
-    CodeCache::mark_all_nmethods_for_deoptimization();
-    Deoptimization::deoptimize_all_marked();
+    VM_DeoptimizeTheWorld op;
+    VMThread::execute(&op);
   }
 }
 
@@ -80,8 +97,8 @@
   }
   if (changed) {
     // one or more flags changed, need to deoptimize
-    CodeCache::mark_all_nmethods_for_deoptimization();
-    Deoptimization::deoptimize_all_marked();
+    VM_DeoptimizeTheWorld op;
+    VMThread::execute(&op);
   }
 }
 
--- a/test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationAllTest.java	Mon Jun 24 16:51:23 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test UnexpectedDeoptimizationAllTest
- * @key stress
- * @summary stressing code cache by forcing unexpected deoptimizations of all methods
- * @library /test/lib /
- * @modules java.base/jdk.internal.misc
- *          java.management
- *
- * @build sun.hotspot.WhiteBox compiler.codecache.stress.Helper compiler.codecache.stress.TestCaseImpl
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- *                                sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- *                   -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
- *                   -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
- *                   -XX:-SegmentedCodeCache
- *                   compiler.codecache.stress.UnexpectedDeoptimizationAllTest
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- *                   -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
- *                   -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
- *                   -XX:+SegmentedCodeCache
- *                   compiler.codecache.stress.UnexpectedDeoptimizationAllTest
- */
-
-package compiler.codecache.stress;
-
-public class UnexpectedDeoptimizationAllTest implements Runnable {
-
-    public static void main(String[] args) {
-        new CodeCacheStressRunner(new UnexpectedDeoptimizationAllTest()).runTest();
-    }
-
-    @Override
-    public void run() {
-        Helper.WHITE_BOX.deoptimizeAll();
-        try {
-            Thread.sleep(10);
-        } catch (Exception e) {
-        }
-    }
-
-}