7024970: 2/3 assert(ServiceThread::is_service_thread(Thread::current())) failed: Service thread must post enqueue
authordcubed
Tue, 15 Mar 2011 06:37:31 -0700
changeset 8661 3dc8a18ac563
parent 8660 de3c4dedef24
child 8662 6d4f3724c1e6
7024970: 2/3 assert(ServiceThread::is_service_thread(Thread::current())) failed: Service thread must post enqueue Summary: Change nmethod_lock() to also prevent zombification of the nmethod. CompiledMethodUnload events also need to lock the nmethod. Clean ups in nmethod::make_not_entrant_or_zombie() Reviewed-by: dholmes, kamg, never, dsamersoff, ysr, coleenp, acorn
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/nmethod.hpp
hotspot/src/share/vm/prims/jvmtiImpl.cpp
hotspot/src/share/vm/prims/jvmtiImpl.hpp
--- a/hotspot/src/share/vm/code/nmethod.cpp	Tue Mar 15 06:35:10 2011 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Tue Mar 15 06:37:31 2011 -0700
@@ -1180,14 +1180,17 @@
   set_stack_traversal_mark(NMethodSweeper::traversal_count());
 }
 
-// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
+// Tell if a non-entrant method can be converted to a zombie (i.e.,
+// there are no activations on the stack, not in use by the VM,
+// and not in use by the ServiceThread)
 bool nmethod::can_not_entrant_be_converted() {
   assert(is_not_entrant(), "must be a non-entrant method");
 
   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
   // count can be greater than the stack traversal count before it hits the
   // nmethod for the second time.
-  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
+  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
+         !is_locked_by_vm();
 }
 
 void nmethod::inc_decompile_count() {
@@ -1294,6 +1297,7 @@
 // Common functionality for both make_not_entrant and make_zombie
 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
+  assert(!is_zombie(), "should not already be a zombie");
 
   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
@@ -1301,11 +1305,6 @@
   No_Safepoint_Verifier nsv;
 
   {
-    // If the method is already zombie there is nothing to do
-    if (is_zombie()) {
-      return false;
-    }
-
     // invalidate osr nmethod before acquiring the patching lock since
     // they both acquire leaf locks and we don't want a deadlock.
     // This logic is equivalent to the logic below for patching the
@@ -1375,13 +1374,12 @@
       flush_dependencies(NULL);
     }
 
-    {
-      // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
-      // and it hasn't already been reported for this nmethod then report it now.
-      // (the event may have been reported earilier if the GC marked it for unloading).
-      Pause_No_Safepoint_Verifier pnsv(&nsv);
-      post_compiled_method_unload();
-    }
+    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
+    // event and it hasn't already been reported for this nmethod then
+    // report it now. The event may have been reported earilier if the GC
+    // marked it for unloading). JvmtiDeferredEventQueue support means
+    // we no longer go to a safepoint here.
+    post_compiled_method_unload();
 
 #ifdef ASSERT
     // It's no longer safe to access the oops section since zombie
@@ -1566,7 +1564,7 @@
   if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
     assert(!unload_reported(), "already unloaded");
     JvmtiDeferredEvent event =
-      JvmtiDeferredEvent::compiled_method_unload_event(
+      JvmtiDeferredEvent::compiled_method_unload_event(this,
           _jmethod_id, insts_begin());
     if (SafepointSynchronize::is_at_safepoint()) {
       // Don't want to take the queueing lock. Add it as pending and
@@ -2171,10 +2169,12 @@
   lock_nmethod(_nm);
 }
 
-void nmethodLocker::lock_nmethod(nmethod* nm) {
+// Only JvmtiDeferredEvent::compiled_method_unload_event()
+// should pass zombie_ok == true.
+void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
   if (nm == NULL)  return;
   Atomic::inc(&nm->_lock_count);
-  guarantee(!nm->is_zombie(), "cannot lock a zombie method");
+  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
 }
 
 void nmethodLocker::unlock_nmethod(nmethod* nm) {
--- a/hotspot/src/share/vm/code/nmethod.hpp	Tue Mar 15 06:35:10 2011 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Tue Mar 15 06:37:31 2011 -0700
@@ -194,7 +194,10 @@
 
   NOT_PRODUCT(bool _has_debug_info; )
 
-  // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
+  // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
+  // and is not made into a zombie. However, once the nmethod is made into
+  // a zombie, it will be locked one final time if CompiledMethodUnload
+  // event processing needs to be done.
   jint  _lock_count;
 
   // not_entrant method removal. Each mark_sweep pass will update
@@ -522,8 +525,9 @@
   void flush();
 
  public:
-  // If returning true, it is unsafe to remove this nmethod even though it is a zombie
-  // nmethod, since the VM might have a reference to it. Should only be called from a  safepoint.
+  // When true is returned, it is unsafe to remove this nmethod even if
+  // it is a zombie, since the VM or the ServiceThread might still be
+  // using it.
   bool is_locked_by_vm() const                    { return _lock_count >0; }
 
   // See comment at definition of _last_seen_on_stack
@@ -689,13 +693,20 @@
 
 };
 
-// Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
+// Locks an nmethod so its code will not get removed and it will not
+// be made into a zombie, even if it is a not_entrant method. After the
+// nmethod becomes a zombie, if CompiledMethodUnload event processing
+// needs to be done, then lock_nmethod() is used directly to keep the
+// generated code from being reused too early.
 class nmethodLocker : public StackObj {
   nmethod* _nm;
 
  public:
 
-  static void lock_nmethod(nmethod* nm);   // note: nm can be NULL
+  // note: nm can be NULL
+  // Only JvmtiDeferredEvent::compiled_method_unload_event()
+  // should pass zombie_ok == true.
+  static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
   static void unlock_nmethod(nmethod* nm); // (ditto)
 
   nmethodLocker(address pc); // derive nm from pc
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Tue Mar 15 06:35:10 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Tue Mar 15 06:37:31 2011 -0700
@@ -919,15 +919,24 @@
     nmethod* nm) {
   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
   event._event_data.compiled_method_load = nm;
-  nmethodLocker::lock_nmethod(nm); // will be unlocked when posted
+  // Keep the nmethod alive until the ServiceThread can process
+  // this deferred event.
+  nmethodLocker::lock_nmethod(nm);
   return event;
 }
 
 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
-    jmethodID id, const void* code) {
+    nmethod* nm, jmethodID id, const void* code) {
   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
+  event._event_data.compiled_method_unload.nm = nm;
   event._event_data.compiled_method_unload.method_id = id;
   event._event_data.compiled_method_unload.code_begin = code;
+  // Keep the nmethod alive until the ServiceThread can process
+  // this deferred event. This will keep the memory for the
+  // generated code from being reused too early. We pass
+  // zombie_ok == true here so that our nmethod that was just
+  // made into a zombie can be locked.
+  nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
   return event;
 }
 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
@@ -946,14 +955,19 @@
     case TYPE_COMPILED_METHOD_LOAD: {
       nmethod* nm = _event_data.compiled_method_load;
       JvmtiExport::post_compiled_method_load(nm);
+      // done with the deferred event so unlock the nmethod
       nmethodLocker::unlock_nmethod(nm);
       break;
     }
-    case TYPE_COMPILED_METHOD_UNLOAD:
+    case TYPE_COMPILED_METHOD_UNLOAD: {
+      nmethod* nm = _event_data.compiled_method_unload.nm;
       JvmtiExport::post_compiled_method_unload(
         _event_data.compiled_method_unload.method_id,
         _event_data.compiled_method_unload.code_begin);
+      // done with the deferred event so unlock the nmethod
+      nmethodLocker::unlock_nmethod(nm);
       break;
+    }
     case TYPE_DYNAMIC_CODE_GENERATED:
       JvmtiExport::post_dynamic_code_generated_internal(
         _event_data.dynamic_code_generated.name,
--- a/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Tue Mar 15 06:35:10 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Tue Mar 15 06:37:31 2011 -0700
@@ -458,6 +458,7 @@
   union {
     nmethod* compiled_method_load;
     struct {
+      nmethod* nm;
       jmethodID method_id;
       const void* code_begin;
     } compiled_method_unload;
@@ -477,7 +478,7 @@
   // Factory methods
   static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
     KERNEL_RETURN_(JvmtiDeferredEvent());
-  static JvmtiDeferredEvent compiled_method_unload_event(
+  static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm,
       jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
   static JvmtiDeferredEvent dynamic_code_generated_event(
       const char* name, const void* begin, const void* end)