8226705: [REDO] Deoptimize with handshakes
authorrehn
Thu, 19 Sep 2019 10:52:22 +0200
changeset 58226 408c445d04e8
parent 58225 4eebb9aadbe3
child 58229 722a19a45994
8226705: [REDO] Deoptimize with handshakes Reviewed-by: eosterlund, dcubed, dlong, pchilanomate
src/hotspot/share/aot/aotCodeHeap.cpp
src/hotspot/share/aot/aotCompiledMethod.cpp
src/hotspot/share/ci/ciEnv.cpp
src/hotspot/share/code/codeCache.cpp
src/hotspot/share/code/compiledMethod.cpp
src/hotspot/share/code/compiledMethod.hpp
src/hotspot/share/code/nmethod.cpp
src/hotspot/share/code/nmethod.hpp
src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
src/hotspot/share/gc/z/zNMethod.cpp
src/hotspot/share/jvmci/jvmciEnv.cpp
src/hotspot/share/jvmci/jvmciRuntime.cpp
src/hotspot/share/oops/instanceKlass.cpp
src/hotspot/share/oops/method.cpp
src/hotspot/share/oops/method.hpp
src/hotspot/share/prims/jvmtiEventController.cpp
src/hotspot/share/prims/methodHandles.cpp
src/hotspot/share/prims/whitebox.cpp
src/hotspot/share/runtime/biasedLocking.cpp
src/hotspot/share/runtime/biasedLocking.hpp
src/hotspot/share/runtime/deoptimization.cpp
src/hotspot/share/runtime/deoptimization.hpp
src/hotspot/share/runtime/mutex.hpp
src/hotspot/share/runtime/mutexLocker.cpp
src/hotspot/share/runtime/mutexLocker.hpp
src/hotspot/share/runtime/sharedRuntime.cpp
src/hotspot/share/runtime/thread.cpp
src/hotspot/share/runtime/thread.hpp
src/hotspot/share/runtime/tieredThresholdPolicy.cpp
src/hotspot/share/runtime/vmOperations.cpp
src/hotspot/share/runtime/vmOperations.hpp
src/hotspot/share/services/dtraceAttacher.cpp
test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationAllTest.java
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -38,6 +38,7 @@
 #include "memory/universe.hpp"
 #include "oops/compressedOops.hpp"
 #include "oops/method.inline.hpp"
+#include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointVerifiers.hpp"
@@ -351,7 +352,10 @@
 #ifdef TIERED
     mh->set_aot_code(aot);
 #endif
-    Method::set_code(mh, aot);
+    {
+      MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+      Method::set_code(mh, aot);
+    }
     if (PrintAOT || (PrintCompilation && PrintAOT)) {
       PauseNoSafepointVerifier pnsv(&nsv); // aot code is registered already
       aot->print_on(tty, NULL);
@@ -731,8 +735,7 @@
     }
   }
   if (marked > 0) {
-    VM_Deoptimize op;
-    VMThread::execute(&op);
+    Deoptimization::deoptimize_all_marked();
   }
 }
 
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -165,7 +165,7 @@
 
   {
     // Enter critical section.  Does not block for safepoint.
-    MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
 
     if (*_state_adr == new_state) {
       // another thread already performed this transition so nothing
@@ -188,12 +188,10 @@
 #endif
 
     // Remove AOTCompiledMethod from method.
-    if (method() != NULL && (method()->code() == this ||
-                             method()->from_compiled_entry() == verified_entry_point())) {
-      HandleMark hm;
-      method()->clear_code(false /* already owns Patching_lock */);
+    if (method() != NULL) {
+      method()->unlink_code(this);
     }
-  } // leave critical region under Patching_lock
+  } // leave critical region under CompiledMethod_lock
 
 
   if (TraceCreateZombies) {
@@ -208,7 +206,6 @@
 #ifdef TIERED
 bool AOTCompiledMethod::make_entrant() {
   assert(!method()->is_old(), "reviving evolved method!");
-  assert(*_state_adr != not_entrant, "%s", method()->has_aot_code() ? "has_aot_code() not cleared" : "caller didn't check has_aot_code()");
 
   // Make sure the method is not flushed in case of a safepoint in code below.
   methodHandle the_method(method());
@@ -216,9 +213,9 @@
 
   {
     // Enter critical section.  Does not block for safepoint.
-    MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
 
-    if (*_state_adr == in_use) {
+    if (*_state_adr == in_use || *_state_adr == not_entrant) {
       // another thread already performed this transition so nothing
       // to do, but return false to indicate this.
       return false;
@@ -230,7 +227,7 @@
 
     // Log the transition once
     log_state_change();
-  } // leave critical region under Patching_lock
+  } // leave critical region under CompiledMethod_lock
 
 
   if (TraceCreateZombies) {
--- a/src/hotspot/share/ci/ciEnv.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/ci/ciEnv.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -1072,7 +1072,10 @@
                     task()->comp_level(), method_name);
         }
         // Allow the code to be executed
-        method->set_code(method, nm);
+        MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+        if (nm->make_in_use()) {
+          method->set_code(method, nm);
+        }
       } else {
         LogTarget(Info, nmethod, install) lt;
         if (lt.is_enabled()) {
@@ -1081,9 +1084,11 @@
           lt.print("Installing osr method (%d) %s @ %d",
                     task()->comp_level(), method_name, entry_bci);
         }
-        method->method_holder()->add_osr_nmethod(nm);
+        MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+        if (nm->make_in_use()) {
+          method->method_holder()->add_osr_nmethod(nm);
+        }
       }
-      nm->make_in_use();
     }
   }  // safepoints are allowed again
 
--- a/src/hotspot/share/code/codeCache.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/code/codeCache.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -1143,28 +1143,17 @@
 
   // At least one nmethod has been marked for deoptimization
 
-  // All this already happens inside a VM_Operation, so we'll do all the work here.
-  // Stuff copied from VM_Deoptimize and modified slightly.
-
-  // We do not want any GCs to happen while we are in the middle of this VM operation
-  ResourceMark rm;
-  DeoptimizationMarker dm;
-
-  // Deoptimize all activations depending on marked nmethods
-  Deoptimization::deoptimize_dependents();
-
-  // Make the dependent methods not entrant
-  make_marked_nmethods_not_entrant();
+  Deoptimization::deoptimize_all_marked();
 }
 #endif // INCLUDE_JVMTI
 
-// Deoptimize all methods
+// Mark methods for deopt (if safe or possible).
 void CodeCache::mark_all_nmethods_for_deoptimization() {
   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
   while(iter.next()) {
     CompiledMethod* nm = iter.method();
-    if (!nm->method()->is_method_handle_intrinsic()) {
+    if (!nm->is_native_method()) {
       nm->mark_for_deoptimization();
     }
   }
@@ -1192,7 +1181,7 @@
   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
   while(iter.next()) {
     CompiledMethod* nm = iter.method();
-    if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) {
+    if (nm->is_marked_for_deoptimization()) {
       nm->make_not_entrant();
     }
   }
@@ -1204,17 +1193,12 @@
 
   if (number_of_nmethods_with_dependencies() == 0) return;
 
-  // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped during the safepoint so CodeCache will be safe to update without
-  // holding the CodeCache_lock.
-
   KlassDepChange changes(dependee);
 
   // Compute the dependent nmethods
   if (mark_for_deoptimization(changes) > 0) {
     // At least one nmethod has been marked for deoptimization
-    VM_Deoptimize op;
-    VMThread::execute(&op);
+    Deoptimization::deoptimize_all_marked();
   }
 }
 
@@ -1223,26 +1207,9 @@
   // --- Compile_lock is not held. However we are at a safepoint.
   assert_locked_or_safepoint(Compile_lock);
 
-  // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
-  // holding the CodeCache_lock.
-
   // Compute the dependent nmethods
   if (mark_for_deoptimization(m_h()) > 0) {
-    // At least one nmethod has been marked for deoptimization
-
-    // All this already happens inside a VM_Operation, so we'll do all the work here.
-    // Stuff copied from VM_Deoptimize and modified slightly.
-
-    // We do not want any GCs to happen while we are in the middle of this VM operation
-    ResourceMark rm;
-    DeoptimizationMarker dm;
-
-    // Deoptimize all activations depending on marked nmethods
-    Deoptimization::deoptimize_dependents();
-
-    // Make the dependent methods not entrant
-    make_marked_nmethods_not_entrant();
+    Deoptimization::deoptimize_all_marked();
   }
 }
 
--- a/src/hotspot/share/code/compiledMethod.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/code/compiledMethod.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -104,6 +104,13 @@
 }
 
 //-----------------------------------------------------------------------------
+void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
+  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
+                 Mutex::_no_safepoint_check_flag);
+  _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
+}
+
+//-----------------------------------------------------------------------------
 
 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
   return OrderAccess::load_acquire(&_exception_cache);
--- a/src/hotspot/share/code/compiledMethod.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/code/compiledMethod.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -244,10 +244,9 @@
   bool is_at_poll_return(address pc);
   bool is_at_poll_or_poll_return(address pc);
 
-  bool  is_marked_for_deoptimization() const      { return _mark_for_deoptimization_status != not_marked; }
-  void  mark_for_deoptimization(bool inc_recompile_counts = true) {
-    _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
-  }
+  bool  is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
+  void  mark_for_deoptimization(bool inc_recompile_counts = true);
+
   bool update_recompile_counts() const {
     // Update recompile counts when either the update is explicitly requested (deoptimize)
     // or the nmethod is not marked for deoptimization at all (not_marked).
--- a/src/hotspot/share/code/nmethod.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -50,6 +50,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/deoptimization.hpp"
 #include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
@@ -476,7 +477,6 @@
     debug_only(nm->verify();) // might block
 
     nm->log_new_nmethod();
-    nm->make_in_use();
   }
   return nm;
 }
@@ -1138,6 +1138,11 @@
 
 bool nmethod::try_transition(int new_state_int) {
   signed char new_state = new_state_int;
+#ifdef DEBUG
+  if (new_state != unloaded) {
+    assert_lock_strong(CompiledMethod_lock);
+  }
+#endif
   for (;;) {
     signed char old_state = Atomic::load(&_state);
     if (old_state >= new_state) {
@@ -1193,11 +1198,7 @@
   // have the Method* live here, in case we unload the nmethod because
   // it is pointing to some oop (other than the Method*) being unloaded.
   if (_method != NULL) {
-    // OSR methods point to the Method*, but the Method* does not
-    // point back!
-    if (_method->code() == this) {
-      _method->clear_code(); // Break a cycle
-    }
+    _method->unlink_code(this);
   }
 
   // Make the class unloaded - i.e., change state and notify sweeper
@@ -1281,16 +1282,9 @@
   }
 }
 
-void nmethod::unlink_from_method(bool acquire_lock) {
-  // We need to check if both the _code and _from_compiled_code_entry_point
-  // refer to this nmethod because there is a race in setting these two fields
-  // in Method* as seen in bugid 4947125.
-  // If the vep() points to the zombie nmethod, the memory for the nmethod
-  // could be flushed and the compiler and vtable stubs could still call
-  // through it.
-  if (method() != NULL && (method()->code() == this ||
-                           method()->from_compiled_entry() == verified_entry_point())) {
-    method()->clear_code(acquire_lock);
+void nmethod::unlink_from_method() {
+  if (method() != NULL) {
+    method()->unlink_code(this);
   }
 }
 
@@ -1317,24 +1311,24 @@
 
   // during patching, depending on the nmethod state we must notify the GC that
   // code has been unloaded, unregistering it. We cannot do this right while
-  // holding the Patching_lock because we need to use the CodeCache_lock. This
+  // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
   // would be prone to deadlocks.
   // This flag is used to remember whether we need to later lock and unregister.
   bool nmethod_needs_unregister = false;
 
+  // invalidate osr nmethod before acquiring the patching lock since
+  // they both acquire leaf locks and we don't want a deadlock.
+  // This logic is equivalent to the logic below for patching the
+  // verified entry point of regular methods. We check that the
+  // nmethod is in use to ensure that it is invalidated only once.
+  if (is_osr_method() && is_in_use()) {
+    // this effectively makes the osr nmethod not entrant
+    invalidate_osr_method();
+  }
+
   {
-    // invalidate osr nmethod before acquiring the patching lock since
-    // they both acquire leaf locks and we don't want a deadlock.
-    // This logic is equivalent to the logic below for patching the
-    // verified entry point of regular methods. We check that the
-    // nmethod is in use to ensure that it is invalidated only once.
-    if (is_osr_method() && is_in_use()) {
-      // this effectively makes the osr nmethod not entrant
-      invalidate_osr_method();
-    }
-
     // Enter critical section.  Does not block for safepoint.
-    MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
 
     if (Atomic::load(&_state) >= state) {
       // another thread already performed this transition so nothing
@@ -1389,8 +1383,9 @@
     log_state_change();
 
     // Remove nmethod from method.
-    unlink_from_method(false /* already owns Patching_lock */);
-  } // leave critical region under Patching_lock
+    unlink_from_method();
+
+  } // leave critical region under CompiledMethod_lock
 
 #if INCLUDE_JVMCI
   // Invalidate can't occur while holding the Patching lock
--- a/src/hotspot/share/code/nmethod.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/code/nmethod.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -119,7 +119,7 @@
   // used by jvmti to track if an unload event has been posted for this nmethod.
   bool _unload_reported;
 
-  // Protected by Patching_lock
+  // Protected by CompiledMethod_lock
   volatile signed char _state;               // {not_installed, in_use, not_entrant, zombie, unloaded}
 
 #ifdef ASSERT
@@ -357,7 +357,9 @@
   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 #endif
 
-  void make_in_use()                              { _state = in_use; }
+  bool make_in_use() {
+    return try_transition(in_use);
+  }
   // Make the nmethod non entrant. The nmethod will continue to be
   // alive.  It is used when an uncommon trap happens.  Returns true
   // if this thread changed the state of the nmethod or false if
@@ -390,7 +392,7 @@
 
   int   comp_level() const                        { return _comp_level; }
 
-  void unlink_from_method(bool acquire_lock);
+  void unlink_from_method();
 
   // Support for oops in scopes and relocs:
   // Note: index 0 is reserved for null.
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
     // We don't need to take the lock when unlinking nmethods from
     // the Method, because it is only concurrently unlinked by
     // the entry barrier, which acquires the per nmethod lock.
-    nm->unlink_from_method(false /* acquire_lock */);
+    nm->unlink_from_method();
 
     // We can end up calling nmethods that are unloading
     // since we clear compiled ICs lazily. Returning false
--- a/src/hotspot/share/gc/z/zNMethod.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/gc/z/zNMethod.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -266,10 +266,11 @@
     // handshake separating unlink and purge.
     nm->flush_dependencies(false /* delete_immediately */);
 
-    // We don't need to take the lock when unlinking nmethods from
+    // unlink_from_method will take the CompiledMethod_lock.
+    // In this case we don't strictly need it when unlinking nmethods from
     // the Method, because it is only concurrently unlinked by
     // the entry barrier, which acquires the per nmethod lock.
-    nm->unlink_from_method(false /* acquire_lock */);
+    nm->unlink_from_method();
 
     if (nm->is_osr_method()) {
       // Invalidate the osr nmethod before the handshake. The nmethod
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -31,6 +31,7 @@
 #include "memory/universe.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/typeArrayOop.inline.hpp"
+#include "runtime/deoptimization.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "jvmci/jniAccessMark.inline.hpp"
@@ -1491,8 +1492,7 @@
     // Invalidating the HotSpotNmethod means we want the nmethod
     // to be deoptimized.
     nm->mark_for_deoptimization();
-    VM_Deoptimize op;
-    VMThread::execute(&op);
+    Deoptimization::deoptimize_all_marked();
   }
 
   // A HotSpotNmethod instance can only reference a single nmethod
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -1520,7 +1520,10 @@
                         comp_level, method_name, nm->entry_point());
             }
             // Allow the code to be executed
-            method->set_code(method, nm);
+            MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+            if (nm->make_in_use()) {
+              method->set_code(method, nm);
+            }
           } else {
             LogTarget(Info, nmethod, install) lt;
             if (lt.is_enabled()) {
@@ -1529,12 +1532,14 @@
               lt.print("Installing osr method (%d) %s @ %d",
                         comp_level, method_name, entry_bci);
             }
-            InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm);
+            MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+            if (nm->make_in_use()) {
+              InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm);
+            }
           }
         } else {
           assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == HotSpotJVMCI::resolve(nmethod_mirror), "must be");
         }
-        nm->make_in_use();
       }
       result = nm != NULL ? JVMCI::ok :JVMCI::cache_full;
     }
--- a/src/hotspot/share/oops/instanceKlass.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -2973,6 +2973,7 @@
 
 // On-stack replacement stuff
 void InstanceKlass::add_osr_nmethod(nmethod* n) {
+  assert_lock_strong(CompiledMethod_lock);
 #ifndef PRODUCT
   if (TieredCompilation) {
       nmethod * prev = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), n->comp_level(), true);
@@ -2982,8 +2983,6 @@
 #endif
   // only one compilation can be active
   {
-    // This is a short non-blocking critical region, so the no safepoint check is ok.
-    MutexLocker ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
     assert(n->is_osr_method(), "wrong kind of nmethod");
     n->set_osr_link(osr_nmethods_head());
     set_osr_nmethods_head(n);
@@ -3008,7 +3007,8 @@
 // Remove osr nmethod from the list. Return true if found and removed.
 bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
   // This is a short non-blocking critical region, so the no safepoint check is ok.
-  MutexLocker ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
+  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock
+                 , Mutex::_no_safepoint_check_flag);
   assert(n->is_osr_method(), "wrong kind of nmethod");
   nmethod* last = NULL;
   nmethod* cur  = osr_nmethods_head();
@@ -3051,8 +3051,8 @@
 }
 
 int InstanceKlass::mark_osr_nmethods(const Method* m) {
-  // This is a short non-blocking critical region, so the no safepoint check is ok.
-  MutexLocker ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
+  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
+                 Mutex::_no_safepoint_check_flag);
   nmethod* osr = osr_nmethods_head();
   int found = 0;
   while (osr != NULL) {
@@ -3067,8 +3067,8 @@
 }
 
 nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
-  // This is a short non-blocking critical region, so the no safepoint check is ok.
-  MutexLocker ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
+  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
+                 Mutex::_no_safepoint_check_flag);
   nmethod* osr = osr_nmethods_head();
   nmethod* best = NULL;
   while (osr != NULL) {
--- a/src/hotspot/share/oops/method.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/oops/method.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -103,7 +103,7 @@
   // Fix and bury in Method*
   set_interpreter_entry(NULL); // sets i2i entry and from_int
   set_adapter_entry(NULL);
-  clear_code(false /* don't need a lock */); // from_c/from_i get set to c2i/i2i
+  Method::clear_code(); // from_c/from_i get set to c2i/i2i
 
   if (access_flags.is_native()) {
     clear_native_function();
@@ -825,7 +825,7 @@
   set_native_function(
     SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
     !native_bind_event_is_interesting);
-  clear_code();
+  this->unlink_code();
 }
 
 
@@ -941,8 +941,7 @@
 }
 
 // Revert to using the interpreter and clear out the nmethod
-void Method::clear_code(bool acquire_lock /* = true */) {
-  MutexLocker pl(acquire_lock ? Patching_lock : NULL, Mutex::_no_safepoint_check_flag);
+void Method::clear_code() {
   // this may be NULL if c2i adapters have not been made yet
   // Only should happen at allocate time.
   if (adapter() == NULL) {
@@ -956,6 +955,25 @@
   _code = NULL;
 }
 
+void Method::unlink_code(CompiledMethod *compare) {
+  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+  // We need to check if either the _code or _from_compiled_code_entry_point
+  // refer to this nmethod because there is a race in setting these two fields
+  // in Method* as seen in bugid 4947125.
+  // If the vep() points to the zombie nmethod, the memory for the nmethod
+  // could be flushed and the compiler and vtable stubs could still call
+  // through it.
+  if (code() == compare ||
+      from_compiled_entry() == compare->verified_entry_point()) {
+    clear_code();
+  }
+}
+
+void Method::unlink_code() {
+  MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+  clear_code();
+}
+
 #if INCLUDE_CDS
 // Called by class data sharing to remove any entry points (which are not shared)
 void Method::unlink_method() {
@@ -1182,7 +1200,7 @@
 
 // Install compiled code.  Instantly it can execute.
 void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
-  MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+  assert_lock_strong(CompiledMethod_lock);
   assert( code, "use clear_code to remove code" );
   assert( mh->check_code(), "" );
 
--- a/src/hotspot/share/oops/method.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/oops/method.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -463,7 +463,17 @@
   address verified_code_entry();
   bool check_code() const;      // Not inline to avoid circular ref
   CompiledMethod* volatile code() const;
-  void clear_code(bool acquire_lock = true);    // Clear out any compiled code
+
+  // Locks CompiledMethod_lock if not held.
+  void unlink_code(CompiledMethod *compare);
+  // Locks CompiledMethod_lock if not held.
+  void unlink_code();
+
+private:
+  // Either called with CompiledMethod_lock held or from constructor.
+  void clear_code();
+
+public:
   static void set_code(const methodHandle& mh, CompiledMethod* code);
   void set_adapter_entry(AdapterHandlerEntry* adapter) {
     constMethod()->set_adapter_entry(adapter);
--- a/src/hotspot/share/prims/jvmtiEventController.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/prims/jvmtiEventController.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -32,6 +32,7 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "prims/jvmtiThreadState.inline.hpp"
+#include "runtime/deoptimization.hpp"
 #include "runtime/frame.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
@@ -239,8 +240,7 @@
       }
     }
     if (num_marked > 0) {
-      VM_Deoptimize op;
-      VMThread::execute(&op);
+      Deoptimization::deoptimize_all_marked();
     }
   }
 }
--- a/src/hotspot/share/prims/methodHandles.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/prims/methodHandles.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -42,6 +42,7 @@
 #include "oops/typeArrayOop.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/compilationPolicy.hpp"
+#include "runtime/deoptimization.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
@@ -1109,8 +1110,7 @@
   }
   if (marked > 0) {
     // At least one nmethod has been marked for deoptimization.
-    VM_Deoptimize op;
-    VMThread::execute(&op);
+    Deoptimization::deoptimize_all_marked();
   }
 }
 
@@ -1506,8 +1506,7 @@
     }
     if (marked > 0) {
       // At least one nmethod has been marked for deoptimization
-      VM_Deoptimize op;
-      VMThread::execute(&op);
+      Deoptimization::deoptimize_all_marked();
     }
   }
 }
--- a/src/hotspot/share/prims/whitebox.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -820,10 +820,8 @@
 WB_END
 
 WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
-  MutexLocker mu(Compile_lock);
   CodeCache::mark_all_nmethods_for_deoptimization();
-  VM_Deoptimize op;
-  VMThread::execute(&op);
+  Deoptimization::deoptimize_all_marked();
 WB_END
 
 WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
@@ -840,8 +838,7 @@
   }
   result += CodeCache::mark_for_deoptimization(mh());
   if (result > 0) {
-    VM_Deoptimize op;
-    VMThread::execute(&op);
+    Deoptimization::deoptimize_all_marked();
   }
   return result;
 WB_END
--- a/src/hotspot/share/runtime/biasedLocking.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/biasedLocking.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -726,6 +726,29 @@
   assert(!obj->mark().has_bias_pattern(), "must not be biased");
 }
 
+void BiasedLocking::revoke_own_lock(Handle obj, TRAPS) {
+  assert(THREAD->is_Java_thread(), "must be called by a JavaThread");
+  JavaThread* thread = (JavaThread*)THREAD;
+
+  markWord mark = obj->mark();
+
+  if (!mark.has_bias_pattern()) {
+    return;
+  }
+
+  Klass *k = obj->klass();
+  assert(mark.biased_locker() == thread &&
+         k->prototype_header().bias_epoch() == mark.bias_epoch(), "Revoke failed, unhandled biased lock state");
+  ResourceMark rm;
+  log_info(biasedlocking)("Revoking bias by walking my own stack:");
+  EventBiasedLockSelfRevocation event;
+  BiasedLocking::walk_stack_and_revoke(obj(), (JavaThread*) thread);
+  thread->set_cached_monitor_info(NULL);
+  assert(!obj->mark().has_bias_pattern(), "invariant");
+  if (event.should_commit()) {
+    post_self_revocation_event(&event, k);
+  }
+}
 
 void BiasedLocking::revoke(Handle obj, TRAPS) {
   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
@@ -864,23 +887,6 @@
 }
 
 
-void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
-  int len = objs->length();
-  for (int i = 0; i < len; i++) {
-    oop obj = (objs->at(i))();
-    HeuristicsResult heuristics = update_heuristics(obj);
-    if (heuristics == HR_SINGLE_REVOKE) {
-      single_revoke_at_safepoint(obj, false, NULL, NULL);
-    } else if ((heuristics == HR_BULK_REBIAS) ||
-               (heuristics == HR_BULK_REVOKE)) {
-      bulk_revoke_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), NULL);
-    }
-  }
-  clean_up_cached_monitor_info();
-}
-
-
 void BiasedLocking::preserve_marks() {
   if (!UseBiasedLocking)
     return;
--- a/src/hotspot/share/runtime/biasedLocking.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/biasedLocking.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -190,12 +190,14 @@
   // This should be called by JavaThreads to revoke the bias of an object
   static void revoke(Handle obj, TRAPS);
 
+  // This must only be called by a JavaThread to revoke the bias of an owned object.
+  static void revoke_own_lock(Handle obj, TRAPS);
+
   static void revoke_at_safepoint(Handle obj);
 
   // These are used by deoptimization to ensure that monitors on the stack
   // can be migrated
   static void revoke(GrowableArray<Handle>* objs, JavaThread *biaser);
-  static void revoke_at_safepoint(GrowableArray<Handle>* objs);
 
   static void print_counters() { _counters.print(); }
   static BiasedLockingCounters* counters() { return &_counters; }
--- a/src/hotspot/share/runtime/deoptimization.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -157,6 +157,92 @@
   return fetch_unroll_info_helper(thread, exec_mode);
 JRT_END
 
+#if COMPILER2_OR_JVMCI
+static bool eliminate_allocations(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
+                                  frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk) {
+  bool realloc_failures = false;
+  assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
+
+  GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
+
+  // The flag return_oop() indicates call sites which return oop
+  // in compiled code. Such sites include java method calls,
+  // runtime calls (for example, used to allocate new objects/arrays
+  // on slow code path) and any other calls generated in compiled code.
+  // It is not guaranteed that we can get such information here only
+  // by analyzing bytecode in deoptimized frames. This is why this flag
+  // is set during method compilation (see Compile::Process_OopMap_Node()).
+  // If the previous frame was popped or if we are dispatching an exception,
+  // we don't have an oop result.
+  bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
+  Handle return_value;
+  if (save_oop_result) {
+    // Reallocation may trigger GC. If deoptimization happened on return from
+    // call which returns oop we need to save it since it is not in oopmap.
+    oop result = deoptee.saved_oop_result(&map);
+    assert(oopDesc::is_oop_or_null(result), "must be oop");
+    return_value = Handle(thread, result);
+    assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
+    if (TraceDeoptimization) {
+      ttyLocker ttyl;
+      tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
+    }
+  }
+  if (objects != NULL) {
+    JRT_BLOCK
+      realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
+    JRT_END
+    bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
+    Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
+#ifndef PRODUCT
+    if (TraceDeoptimization) {
+      ttyLocker ttyl;
+      tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
+      Deoptimization::print_objects(objects, realloc_failures);
+    }
+#endif
+  }
+  if (save_oop_result) {
+    // Restore result.
+    deoptee.set_saved_oop_result(&map, return_value());
+  }
+  return realloc_failures;
+}
+
+static void eliminate_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
+#ifndef PRODUCT
+  bool first = true;
+#endif
+  for (int i = 0; i < chunk->length(); i++) {
+    compiledVFrame* cvf = chunk->at(i);
+    assert (cvf->scope() != NULL,"expect only compiled java frames");
+    GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
+    if (monitors->is_nonempty()) {
+      Deoptimization::relock_objects(monitors, thread, realloc_failures);
+#ifndef PRODUCT
+      if (PrintDeoptimizationDetails) {
+        ttyLocker ttyl;
+        for (int j = 0; j < monitors->length(); j++) {
+          MonitorInfo* mi = monitors->at(j);
+          if (mi->eliminated()) {
+            if (first) {
+              first = false;
+              tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
+            }
+            if (mi->owner_is_scalar_replaced()) {
+              Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
+              tty->print_cr("     failed reallocation for klass %s", k->external_name());
+            } else {
+              tty->print_cr("     object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
+            }
+          }
+        }
+      }
+#endif // !PRODUCT
+    }
+  }
+}
+#endif // COMPILER2_OR_JVMCI
 
 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
@@ -201,95 +287,33 @@
   bool realloc_failures = false;
 
 #if COMPILER2_OR_JVMCI
+#if INCLUDE_JVMCI
+  bool jvmci_enabled = true;
+#else
+  bool jvmci_enabled = false;
+#endif
+
   // Reallocate the non-escaping objects and restore their fields. Then
   // relock objects if synchronization on them was eliminated.
-#if !INCLUDE_JVMCI
-  if (DoEscapeAnalysis || EliminateNestedLocks) {
-    if (EliminateAllocations) {
-#endif // INCLUDE_JVMCI
-      assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
-      GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
+  if (jvmci_enabled || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateAllocations)) {
+    realloc_failures = eliminate_allocations(thread, exec_mode, cm, deoptee, map, chunk);
+  }
+#endif // COMPILER2_OR_JVMCI
+
+  // Revoke biases, done with in java state.
+  // No safepoints allowed after this
+  revoke_from_deopt_handler(thread, deoptee, &map);
 
-      // The flag return_oop() indicates call sites which return oop
-      // in compiled code. Such sites include java method calls,
-      // runtime calls (for example, used to allocate new objects/arrays
-      // on slow code path) and any other calls generated in compiled code.
-      // It is not guaranteed that we can get such information here only
-      // by analyzing bytecode in deoptimized frames. This is why this flag
-      // is set during method compilation (see Compile::Process_OopMap_Node()).
-      // If the previous frame was popped or if we are dispatching an exception,
-      // we don't have an oop result.
-      bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt);
-      Handle return_value;
-      if (save_oop_result) {
-        // Reallocation may trigger GC. If deoptimization happened on return from
-        // call which returns oop we need to save it since it is not in oopmap.
-        oop result = deoptee.saved_oop_result(&map);
-        assert(oopDesc::is_oop_or_null(result), "must be oop");
-        return_value = Handle(thread, result);
-        assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
-        if (TraceDeoptimization) {
-          ttyLocker ttyl;
-          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
-        }
-      }
-      if (objects != NULL) {
-        JRT_BLOCK
-          realloc_failures = realloc_objects(thread, &deoptee, &map, objects, THREAD);
-        JRT_END
-        bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
-        reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
-#ifndef PRODUCT
-        if (TraceDeoptimization) {
-          ttyLocker ttyl;
-          tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
-          print_objects(objects, realloc_failures);
-        }
-#endif
-      }
-      if (save_oop_result) {
-        // Restore result.
-        deoptee.set_saved_oop_result(&map, return_value());
-      }
-#if !INCLUDE_JVMCI
-    }
-    if (EliminateLocks) {
-#endif // INCLUDE_JVMCI
-#ifndef PRODUCT
-      bool first = true;
-#endif
-      for (int i = 0; i < chunk->length(); i++) {
-        compiledVFrame* cvf = chunk->at(i);
-        assert (cvf->scope() != NULL,"expect only compiled java frames");
-        GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
-        if (monitors->is_nonempty()) {
-          relock_objects(monitors, thread, realloc_failures);
-#ifndef PRODUCT
-          if (PrintDeoptimizationDetails) {
-            ttyLocker ttyl;
-            for (int j = 0; j < monitors->length(); j++) {
-              MonitorInfo* mi = monitors->at(j);
-              if (mi->eliminated()) {
-                if (first) {
-                  first = false;
-                  tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
-                }
-                if (mi->owner_is_scalar_replaced()) {
-                  Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
-                  tty->print_cr("     failed reallocation for klass %s", k->external_name());
-                } else {
-                  tty->print_cr("     object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
-                }
-              }
-            }
-          }
-#endif // !PRODUCT
-        }
-      }
-#if !INCLUDE_JVMCI
-    }
+  // Ensure that no safepoint is taken after pointers have been stored
+  // in fields of rematerialized objects.  If a safepoint occurs from here on
+  // out the java state residing in the vframeArray will be missed.
+  // Locks may be rebaised in a safepoint.
+  NoSafepointVerifier no_safepoint;
+
+#if COMPILER2_OR_JVMCI
+  if (jvmci_enabled || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks)) {
+    eliminate_locks(thread, chunk, realloc_failures);
   }
-#endif // INCLUDE_JVMCI
 #endif // COMPILER2_OR_JVMCI
 
   ScopeDesc* trap_scope = chunk->at(0)->scope();
@@ -305,11 +329,6 @@
     guarantee(exceptionObject() != NULL, "exception oop can not be null");
   }
 
-  // Ensure that no safepoint is taken after pointers have been stored
-  // in fields of rematerialized objects.  If a safepoint occurs from here on
-  // out the java state residing in the vframeArray will be missed.
-  NoSafepointVerifier no_safepoint;
-
   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
 #if COMPILER2_OR_JVMCI
   if (realloc_failures) {
@@ -779,10 +798,33 @@
   return bt;
 JRT_END
 
+class DeoptimizeMarkedTC : public ThreadClosure {
+ public:
+  virtual void do_thread(Thread* thread) {
+    assert(thread->is_Java_thread(), "must be");
+    JavaThread* jt = (JavaThread*)thread;
+    jt->deoptimize_marked_methods();
+  }
+};
 
-int Deoptimization::deoptimize_dependents() {
-  Threads::deoptimized_wrt_marked_nmethods();
-  return 0;
+void Deoptimization::deoptimize_all_marked() {
+  ResourceMark rm;
+  DeoptimizationMarker dm;
+
+  if (SafepointSynchronize::is_at_safepoint()) {
+    DeoptimizeMarkedTC deopt;
+    // Make the dependent methods not entrant
+    CodeCache::make_marked_nmethods_not_entrant();
+    Threads::java_threads_do(&deopt);
+  } else {
+    // Make the dependent methods not entrant
+    {
+      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      CodeCache::make_marked_nmethods_not_entrant();
+    }
+    DeoptimizeMarkedTC deopt;
+    Handshake::execute(&deopt);
+  }
 }
 
 Deoptimization::DeoptAction Deoptimization::_unloaded_action
@@ -1397,14 +1439,7 @@
   }
 }
 
-
-void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
-  if (!UseBiasedLocking) {
-    return;
-  }
-
-  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
-
+static void get_monitors_from_stack(GrowableArray<Handle>* objects_to_revoke, JavaThread* thread, frame fr, RegisterMap* map) {
   // Unfortunately we don't have a RegisterMap available in most of
   // the places we want to call this routine so we need to walk the
   // stack again to update the register map.
@@ -1428,11 +1463,20 @@
     cvf = compiledVFrame::cast(cvf->sender());
   }
   collect_monitors(cvf, objects_to_revoke);
+}
 
-  if (SafepointSynchronize::is_at_safepoint()) {
-    BiasedLocking::revoke_at_safepoint(objects_to_revoke);
-  } else {
-    BiasedLocking::revoke(objects_to_revoke, thread);
+void Deoptimization::revoke_from_deopt_handler(JavaThread* thread, frame fr, RegisterMap* map) {
+  if (!UseBiasedLocking) {
+    return;
+  }
+  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
+  get_monitors_from_stack(objects_to_revoke, thread, fr, map);
+
+  int len = objects_to_revoke->length();
+  for (int i = 0; i < len; i++) {
+    oop obj = (objects_to_revoke->at(i))();
+    BiasedLocking::revoke_own_lock(objects_to_revoke->at(i), thread);
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 }
 
@@ -1464,10 +1508,6 @@
   fr.deoptimize(thread);
 }
 
-void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
-  deoptimize(thread, fr, map, Reason_constraint);
-}
-
 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
   // Deoptimize only if the frame comes from compile code.
   // Do not deoptimize the frame which is already patched
@@ -1477,11 +1517,7 @@
   }
   ResourceMark rm;
   DeoptimizationMarker dm;
-  if (UseBiasedLocking) {
-    revoke_biases_of_monitors(thread, fr, map);
-  }
   deoptimize_single_frame(thread, fr, reason);
-
 }
 
 #if INCLUDE_JVMCI
@@ -1642,9 +1678,6 @@
   {
     ResourceMark rm;
 
-    // Revoke biases of any monitors in the frame to ensure we can migrate them
-    revoke_biases_of_monitors(thread, fr, &reg_map);
-
     DeoptReason reason = trap_request_reason(trap_request);
     DeoptAction action = trap_request_action(trap_request);
 #if INCLUDE_JVMCI
--- a/src/hotspot/share/runtime/deoptimization.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/deoptimization.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -137,13 +137,19 @@
     Unpack_LIMIT                = 4
   };
 
+  static void deoptimize_all_marked();
+
+ private:
   // Checks all compiled methods. Invalid methods are deleted and
   // corresponding activations are deoptimized.
   static int deoptimize_dependents();
 
+  // Revoke biased locks at deopt.
+  static void revoke_from_deopt_handler(JavaThread* thread, frame fr, RegisterMap* map);
+
+ public:
   // Deoptimizes a frame lazily. nmethod gets patched deopt happens on return to the frame
-  static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
-  static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason);
+  static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason = Reason_constraint);
 
 #if INCLUDE_JVMCI
   static address deoptimize_for_missing_exception_handler(CompiledMethod* cm);
@@ -154,12 +160,8 @@
   // Does the actual work for deoptimizing a single frame
   static void deoptimize_single_frame(JavaThread* thread, frame fr, DeoptReason reason);
 
-  // Helper function to revoke biases of all monitors in frame if UseBiasedLocking
-  // is enabled
-  static void revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map);
-
 #if COMPILER2_OR_JVMCI
-JVMCI_ONLY(public:)
+ public:
 
   // Support for restoring non-escaping objects
   static bool realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS);
--- a/src/hotspot/share/runtime/mutex.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/mutex.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -64,7 +64,7 @@
        event,
        access         = event          +   1,
        tty            = access         +   2,
-       special        = tty            +   1,
+       special        = tty            +   2,
        suspend_resume = special        +   1,
        oopstorage     = suspend_resume +   2,
        leaf           = oopstorage     +   2,
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -39,6 +39,7 @@
 // Consider using GCC's __read_mostly.
 
 Mutex*   Patching_lock                = NULL;
+Mutex*   CompiledMethod_lock          = NULL;
 Monitor* SystemDictionary_lock        = NULL;
 Mutex*   ProtectionDomainSet_lock     = NULL;
 Mutex*   SharedDictionary_lock        = NULL;
@@ -93,7 +94,6 @@
 Monitor* Notify_lock                  = NULL;
 Mutex*   ProfilePrint_lock            = NULL;
 Mutex*   ExceptionCache_lock          = NULL;
-Mutex*   OsrList_lock                 = NULL;
 Mutex*   NMethodSweeperStats_lock     = NULL;
 #ifndef PRODUCT
 Mutex*   FullGCALot_lock              = NULL;
@@ -233,6 +233,7 @@
   def(ClassLoaderDataGraph_lock    , PaddedMutex  , nonleaf,     true,  Monitor::_safepoint_check_always);
 
   def(Patching_lock                , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);      // used for safepointing and code patching.
+  def(CompiledMethod_lock          , PaddedMutex  , special-1,   true,  Monitor::_safepoint_check_never);
   def(Service_lock                 , PaddedMonitor, special,     true,  Monitor::_safepoint_check_never);      // used for service thread operations
   def(JmethodIdCreation_lock       , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always); // used for creating jmethodIDs.
 
@@ -248,7 +249,6 @@
   def(SymbolArena_lock             , PaddedMutex  , leaf+2,      true,  Monitor::_safepoint_check_never);
   def(ProfilePrint_lock            , PaddedMutex  , leaf,        false, Monitor::_safepoint_check_always); // serial profile printing
   def(ExceptionCache_lock          , PaddedMutex  , leaf,        false, Monitor::_safepoint_check_always); // serial profile printing
-  def(OsrList_lock                 , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
   def(Debug1_lock                  , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 #ifndef PRODUCT
   def(FullGCALot_lock              , PaddedMutex  , leaf,        false, Monitor::_safepoint_check_always); // a lock to make FullGCALot MT safe
--- a/src/hotspot/share/runtime/mutexLocker.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -32,6 +32,7 @@
 // Mutexes used in the VM.
 
 extern Mutex*   Patching_lock;                   // a lock used to guard code patching of compiled code
+extern Mutex*   CompiledMethod_lock;             // a lock used to guard a compiled method and OSR queues
 extern Monitor* SystemDictionary_lock;           // a lock on the system dictionary
 extern Mutex*   ProtectionDomainSet_lock;        // a lock on the pd_set list in the system dictionary
 extern Mutex*   SharedDictionary_lock;           // a lock on the CDS shared dictionary
@@ -90,7 +91,6 @@
 extern Monitor* Notify_lock;                     // a lock used to synchronize the start-up of the vm
 extern Mutex*   ProfilePrint_lock;               // a lock used to serialize the printing of profiles
 extern Mutex*   ExceptionCache_lock;             // a lock used to synchronize exception cache updates
-extern Mutex*   OsrList_lock;                    // a lock used to serialize access to OSR queues
 extern Mutex*   NMethodSweeperStats_lock;        // a lock used to serialize access to sweeper statistics
 
 #ifndef PRODUCT
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -2902,7 +2902,12 @@
       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type, critical_entry);
 
       if (nm != NULL) {
-        method->set_code(method, nm);
+        {
+          MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+          if (nm->make_in_use()) {
+            method->set_code(method, nm);
+          }
+        }
 
         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
         if (directive->PrintAssemblyOption) {
--- a/src/hotspot/share/runtime/thread.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/thread.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -2894,7 +2894,7 @@
 #endif // PRODUCT
 
 
-void JavaThread::deoptimized_wrt_marked_nmethods() {
+void JavaThread::deoptimize_marked_methods() {
   if (!has_last_Java_frame()) return;
   // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
   StackFrameStream fst(this, UseBiasedLocking);
@@ -2905,7 +2905,6 @@
   }
 }
 
-
 // If the caller is a NamedThread, then remember, in the current scope,
 // the given JavaThread in its _processed_thread field.
 class RememberProcessedThread: public StackObj {
@@ -4638,13 +4637,6 @@
   threads_do(&handles_closure);
 }
 
-void Threads::deoptimized_wrt_marked_nmethods() {
-  ALL_JAVA_THREADS(p) {
-    p->deoptimized_wrt_marked_nmethods();
-  }
-}
-
-
 // Get count Java threads that are waiting to enter the specified monitor.
 GrowableArray<JavaThread*>* Threads::get_pending_threads(ThreadsList * t_list,
                                                          int count,
--- a/src/hotspot/share/runtime/thread.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/thread.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -1886,7 +1886,7 @@
   void deoptimize();
   void make_zombies();
 
-  void deoptimized_wrt_marked_nmethods();
+  void deoptimize_marked_methods();
 
  public:
   // Returns the running thread as a JavaThread
--- a/src/hotspot/share/runtime/tieredThresholdPolicy.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/tieredThresholdPolicy.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -445,6 +445,7 @@
         if (mh->has_compiled_code()) {
           mh->code()->make_not_entrant();
         }
+        MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
         Method::set_code(mh, mh->aot_code());
       }
     }
--- a/src/hotspot/share/runtime/vmOperations.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/vmOperations.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -115,18 +115,6 @@
   }
 }
 
-void VM_Deoptimize::doit() {
-  // We do not want any GCs to happen while we are in the middle of this VM operation
-  ResourceMark rm;
-  DeoptimizationMarker dm;
-
-  // Deoptimize all activations depending on marked nmethods
-  Deoptimization::deoptimize_dependents();
-
-  // Make the dependent methods not entrant
-  CodeCache::make_marked_nmethods_not_entrant();
-}
-
 void VM_MarkActiveNMethods::doit() {
   NMethodSweeper::mark_active_nmethods();
 }
--- a/src/hotspot/share/runtime/vmOperations.hpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/runtime/vmOperations.hpp	Thu Sep 19 10:52:22 2019 +0200
@@ -49,7 +49,6 @@
   template(ClearICs)                              \
   template(ForceSafepoint)                        \
   template(ForceAsyncSafepoint)                   \
-  template(Deoptimize)                            \
   template(DeoptimizeFrame)                       \
   template(DeoptimizeAll)                         \
   template(ZombieAll)                             \
@@ -318,14 +317,6 @@
   VM_GTestExecuteAtSafepoint() {}
 };
 
-class VM_Deoptimize: public VM_Operation {
- public:
-  VM_Deoptimize() {}
-  VMOp_Type type() const                        { return VMOp_Deoptimize; }
-  void doit();
-  bool allow_nested_vm_operations() const        { return true; }
-};
-
 class VM_MarkActiveNMethods: public VM_Operation {
  public:
   VM_MarkActiveNMethods() {}
--- a/src/hotspot/share/services/dtraceAttacher.cpp	Thu Sep 19 09:50:11 2019 +0200
+++ b/src/hotspot/share/services/dtraceAttacher.cpp	Thu Sep 19 10:52:22 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,23 +33,6 @@
 
 #ifdef SOLARIS
 
-class VM_DeoptimizeTheWorld : public VM_Operation {
- public:
-  VMOp_Type type() const {
-    return VMOp_DeoptimizeTheWorld;
-  }
-  void doit() {
-    CodeCache::mark_all_nmethods_for_deoptimization();
-    ResourceMark rm;
-    DeoptimizationMarker dm;
-    // Deoptimize all activations depending on marked methods
-    Deoptimization::deoptimize_dependents();
-
-    // Mark the dependent methods non entrant
-    CodeCache::make_marked_nmethods_not_entrant();
-  }
-};
-
 static void set_bool_flag(const char* name, bool value) {
   JVMFlag* flag = JVMFlag::find_flag(name);
   JVMFlag::boolAtPut(flag, &value, JVMFlag::ATTACH_ON_DEMAND);
@@ -74,8 +57,8 @@
 
   if (changed) {
     // one or more flags changed, need to deoptimize
-    VM_DeoptimizeTheWorld op;
-    VMThread::execute(&op);
+    CodeCache::mark_all_nmethods_for_deoptimization();
+    Deoptimization::deoptimize_all_marked();
   }
 }
 
@@ -97,8 +80,8 @@
   }
   if (changed) {
     // one or more flags changed, need to deoptimize
-    VM_DeoptimizeTheWorld op;
-    VMThread::execute(&op);
+    CodeCache::mark_all_nmethods_for_deoptimization();
+    Deoptimization::deoptimize_all_marked();
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationAllTest.java	Thu Sep 19 10:52:22 2019 +0200
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test UnexpectedDeoptimizationAllTest
+ * @key stress
+ * @summary stressing code cache by forcing unexpected deoptimizations of all methods
+ * @library /test/lib /
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ *
+ * @build sun.hotspot.WhiteBox compiler.codecache.stress.Helper compiler.codecache.stress.TestCaseImpl
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
+ *                   -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
+ *                   -XX:-SegmentedCodeCache
+ *                   compiler.codecache.stress.UnexpectedDeoptimizationAllTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
+ *                   -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
+ *                   -XX:+SegmentedCodeCache
+ *                   compiler.codecache.stress.UnexpectedDeoptimizationAllTest
+ */
+
+package compiler.codecache.stress;
+
+public class UnexpectedDeoptimizationAllTest implements Runnable {
+
+    public static void main(String[] args) {
+        new CodeCacheStressRunner(new UnexpectedDeoptimizationAllTest()).runTest();
+    }
+
+    @Override
+    public void run() {
+        Helper.WHITE_BOX.deoptimizeAll();
+        try {
+            Thread.sleep(10);
+        } catch (Exception e) {
+        }
+    }
+
+}