hotspot/src/share/vm/runtime/sharedRuntime.cpp
changeset 22223 82e95c562133
parent 22209 25c1e0025b51
child 22241 58579a498f3a
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Mon Dec 09 10:03:39 2013 +0100
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Dec 06 12:11:51 2013 -0800
@@ -1178,12 +1178,12 @@
   CodeBlob* caller_cb = caller_frame.cb();
   guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
   nmethod* caller_nm = caller_cb->as_nmethod_or_null();
+
   // make sure caller is not getting deoptimized
   // and removed before we are done with it.
   // CLEANUP - with lazy deopt shouldn't need this lock
   nmethodLocker caller_lock(caller_nm);
 
-
   // determine call info & receiver
   // note: a) receiver is NULL for static calls
   //       b) an exception is thrown if receiver is NULL for non-static calls
@@ -1198,6 +1198,11 @@
          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
 
+  // We do not patch the call site if the caller nmethod has been made non-entrant.
+  if (!caller_nm->is_in_use()) {
+    return callee_method;
+  }
+
 #ifndef PRODUCT
   // tracing/debugging/statistics
   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
@@ -1237,6 +1242,10 @@
   // Make sure the callee nmethod does not get deoptimized and removed before
   // we are done patching the code.
   nmethod* callee_nm = callee_method->code();
+  if (callee_nm != NULL && !callee_nm->is_in_use()) {
+    // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
+    callee_nm = NULL;
+  }
   nmethodLocker nl_callee(callee_nm);
 #ifdef ASSERT
   address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
@@ -1258,15 +1267,24 @@
   {
     MutexLocker ml_patch(CompiledIC_lock);
 
+    // Lock blocks for safepoint during which both nmethods can change state.
+
     // Now that we are ready to patch if the Method* was redefined then
     // don't update call site and let the caller retry.
-
-    if (!callee_method->is_old()) {
+    // Don't update call site if caller nmethod has been made non-entrant
+    // as it is a waste of time.
+    // Don't update call site if callee nmethod was unloaded or deoptimized.
+    // Don't update call site if callee nmethod was replaced by an other nmethod
+    // which may happen when multiply alive nmethod (tiered compilation)
+    // will be supported.
+    if (!callee_method->is_old() && caller_nm->is_in_use() &&
+        (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) {
 #ifdef ASSERT
       // We must not try to patch to jump to an already unloaded method.
       if (dest_entry_point != 0) {
-        assert(CodeCache::find_blob(dest_entry_point) != NULL,
-               "should not unload nmethod while locked");
+        CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
+        assert((cb != NULL) && cb->is_nmethod() && (((nmethod*)cb) == callee_nm),
+               "should not call unloaded nmethod");
       }
 #endif
       if (is_virtual) {