src/hotspot/share/code/nmethod.cpp
changeset 55479 80b27dc96ca3
parent 55454 8892555795cd
child 55562 2f464d628942
--- a/src/hotspot/share/code/nmethod.cpp	Mon Jun 24 16:51:23 2019 -0400
+++ b/src/hotspot/share/code/nmethod.cpp	Mon Jun 24 22:38:17 2019 -0400
@@ -50,7 +50,6 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
@@ -1178,7 +1177,11 @@
   // have the Method* live here, in case we unload the nmethod because
   // it is pointing to some oop (other than the Method*) being unloaded.
   if (_method != NULL) {
-    _method->unlink_code(this);
+    // OSR methods point to the Method*, but the Method* does not
+    // point back!
+    if (_method->code() == this) {
+      _method->clear_code(); // Break a cycle
+    }
   }
 
   // Make the class unloaded - i.e., change state and notify sweeper
@@ -1260,9 +1263,16 @@
   }
 }
 
-void nmethod::unlink_from_method() {
-  if (method() != NULL) {
-    method()->unlink_code(this);
+void nmethod::unlink_from_method(bool acquire_lock) {
+  // We need to check if both the _code and _from_compiled_code_entry_point
+  // refer to this nmethod because there is a race in setting these two fields
+  // in Method* as seen in bugid 4947125.
+  // If the vep() points to the zombie nmethod, the memory for the nmethod
+  // could be flushed and the compiler and vtable stubs could still call
+  // through it.
+  if (method() != NULL && (method()->code() == this ||
+                           method()->from_compiled_entry() == verified_entry_point())) {
+    method()->clear_code(acquire_lock);
   }
 }
 
@@ -1289,24 +1299,24 @@
 
   // during patching, depending on the nmethod state we must notify the GC that
   // code has been unloaded, unregistering it. We cannot do this right while
-  // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
+  // holding the Patching_lock because we need to use the CodeCache_lock. This
   // would be prone to deadlocks.
   // This flag is used to remember whether we need to later lock and unregister.
   bool nmethod_needs_unregister = false;
 
-  // invalidate osr nmethod before acquiring the patching lock since
-  // they both acquire leaf locks and we don't want a deadlock.
-  // This logic is equivalent to the logic below for patching the
-  // verified entry point of regular methods. We check that the
-  // nmethod is in use to ensure that it is invalidated only once.
-  if (is_osr_method() && is_in_use()) {
-    // this effectively makes the osr nmethod not entrant
-    invalidate_osr_method();
-  }
-
   {
+    // invalidate osr nmethod before acquiring the patching lock since
+    // they both acquire leaf locks and we don't want a deadlock.
+    // This logic is equivalent to the logic below for patching the
+    // verified entry point of regular methods. We check that the
+    // nmethod is in use to ensure that it is invalidated only once.
+    if (is_osr_method() && is_in_use()) {
+      // this effectively makes the osr nmethod not entrant
+      invalidate_osr_method();
+    }
+
     // Enter critical section.  Does not block for safepoint.
-    MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 
     if (_state == state) {
       // another thread already performed this transition so nothing
@@ -1350,9 +1360,8 @@
     log_state_change();
 
     // Remove nmethod from method.
-    unlink_from_method();
-
-  } // leave critical region under CompiledMethod_lock
+    unlink_from_method(false /* already owns Patching_lock */);
+  } // leave critical region under Patching_lock
 
 #if INCLUDE_JVMCI
   // Invalidate can't occur while holding the Patching lock