src/hotspot/share/code/compiledMethod.cpp
changeset 52385 5c679ec60888
parent 52384 d6dc479bcdd3
child 52405 c0c6cdea32f1
--- a/src/hotspot/share/code/compiledMethod.cpp	Thu Nov 01 14:57:26 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp	Fri Nov 02 08:33:59 2018 +0100
@@ -27,6 +27,8 @@
 #include "code/compiledMethod.inline.hpp"
 #include "code/scopeDesc.hpp"
 #include "code/codeCache.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/gcBehaviours.hpp"
 #include "interpreter/bytecode.inline.hpp"
 #include "logging/log.hpp"
 #include "logging/logTag.hpp"
@@ -37,16 +39,29 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 
-CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
+CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
+                               int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
+                               bool caller_must_gc_arguments)
   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
-  _mark_for_deoptimization_status(not_marked), _method(method) {
+    _mark_for_deoptimization_status(not_marked),
+    _is_unloading_state(0),
+    _method(method)
+{
   init_defaults();
+  clear_unloading_state();
 }
 
-CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
-  : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
-  _mark_for_deoptimization_status(not_marked), _method(method) {
+CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
+                               int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
+                               OopMapSet* oop_maps, bool caller_must_gc_arguments)
+  : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
+             frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
+    _mark_for_deoptimization_status(not_marked),
+    _is_unloading_state(0),
+    _method(method)
+{
   init_defaults();
+  clear_unloading_state();
 }
 
 void CompiledMethod::init_defaults() {
@@ -54,7 +69,6 @@
   _has_method_handle_invokes  = 0;
   _lazy_critical_native       = 0;
   _has_wide_vectors           = 0;
-  _unloading_clock            = 0;
 }
 
 bool CompiledMethod::is_method_handle_return(address return_pc) {
@@ -385,26 +399,6 @@
   ic->set_to_clean();
 }
 
-unsigned char CompiledMethod::_global_unloading_clock = 0;
-
-void CompiledMethod::increase_unloading_clock() {
-  _global_unloading_clock++;
-  if (_global_unloading_clock == 0) {
-    // _nmethods are allocated with _unloading_clock == 0,
-    // so 0 is never used as a clock value.
-    _global_unloading_clock = 1;
-  }
-}
-
-void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
-  OrderAccess::release_store(&_unloading_clock, unloading_clock);
-}
-
-unsigned char CompiledMethod::unloading_clock() {
-  return OrderAccess::load_acquire(&_unloading_clock);
-}
-
-
 // static_stub_Relocations may have dangling references to
 // nmethods so trim them out here.  Otherwise it looks like
 // compiled code is maintaining a link to dead metadata.
@@ -438,84 +432,30 @@
 #endif
 }
 
-// This is called at the end of the strong tracing/marking phase of a
-// GC to unload an nmethod if it contains otherwise unreachable
-// oops.
-
-void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
-  // Make sure the oop's ready to receive visitors
-  assert(!is_zombie() && !is_unloaded(),
-         "should not call follow on zombie or unloaded nmethod");
-
-  address low_boundary = oops_reloc_begin();
-
-  if (do_unloading_oops(low_boundary, is_alive)) {
-    return;
-  }
-
-#if INCLUDE_JVMCI
-  if (do_unloading_jvmci()) {
-    return;
-  }
-#endif
-
-  // Cleanup exception cache and inline caches happens
-  // after all the unloaded methods are found.
-}
-
 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 template <class CompiledICorStaticCall>
-static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
-                                         bool parallel, bool clean_all) {
+static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
+                                         bool clean_all) {
   // Ok, to lookup references to zombies here
   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
   if (nm != NULL) {
-    if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
-      // The nmethod has not been processed yet.
-      return true;
-    }
-
     // Clean inline caches pointing to both zombie and not_entrant methods
-    if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
+    if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
       ic->set_to_clean(from->is_alive());
       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
     }
   }
-
-  return false;
-}
-
-static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
-                                         bool parallel, bool clean_all = false) {
-  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
-}
-
-static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
-                                         bool parallel, bool clean_all = false) {
-  return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
 }
 
-bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
-  ResourceMark rm;
-
-  // Make sure the oop's ready to receive visitors
-  assert(!is_zombie() && !is_unloaded(),
-         "should not call follow on zombie or unloaded nmethod");
-
-  address low_boundary = oops_reloc_begin();
+static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
+                                         bool clean_all) {
+  clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
+}
 
-  if (do_unloading_oops(low_boundary, is_alive)) {
-    return false;
-  }
-
-#if INCLUDE_JVMCI
-  if (do_unloading_jvmci()) {
-    return false;
-  }
-#endif
-
-  return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
+static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
+                                         bool clean_all) {
+  clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
 }
 
 // Cleans caches in nmethods that point to either classes that are unloaded
@@ -525,29 +465,70 @@
 // nmethods are unloaded.  Return postponed=true in the parallel case for
 // inline caches found that point to nmethods that are not yet visited during
 // the do_unloading walk.
-bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
+void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
+  ResourceMark rm;
 
   // Exception cache only needs to be called if unloading occurred
   if (unloading_occurred) {
     clean_exception_cache();
   }
 
-  bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
+  cleanup_inline_caches_impl(unloading_occurred, false);
 
   // All static stubs need to be cleaned.
   clean_ic_stubs();
 
   // Check that the metadata embedded in the nmethod is alive
   DEBUG_ONLY(metadata_do(check_class));
+}
 
-  return postponed;
+// The IsUnloadingStruct represents a tuple comprising a result of
+// IsUnloadingBehaviour::is_unloading() for a given unloading cycle.
+struct IsUnloadingStruct {
+  unsigned int _is_unloading:1;
+  unsigned int _unloading_cycle:2;
+};
+
+// The IsUnloadingUnion allows treating the tuple of the IsUnloadingStruct
+// like a uint8_t, making it possible to read and write the tuple atomically.
+union IsUnloadingUnion {
+  IsUnloadingStruct _inflated;
+  uint8_t _value;
+};
+
+bool CompiledMethod::is_unloading() {
+  IsUnloadingUnion state;
+  state._value = RawAccess<MO_RELAXED>::load(&_is_unloading_state);
+  if (state._inflated._is_unloading == 1) {
+    return true;
+  }
+  if (state._inflated._unloading_cycle == CodeCache::unloading_cycle()) {
+    return state._inflated._is_unloading == 1;
+  }
+
+  // The IsUnloadingBehaviour is responsible for checking if there are any dead
+  // oops in the CompiledMethod, by calling oops_do on it.
+  bool result = IsUnloadingBehaviour::current()->is_unloading(this);
+
+  state._inflated._unloading_cycle = CodeCache::unloading_cycle();
+  state._inflated._is_unloading = result ? 1 : 0;
+
+  RawAccess<MO_RELAXED>::store(&_is_unloading_state, state._value);
+
+  return result;
+}
+
+void CompiledMethod::clear_unloading_state() {
+  IsUnloadingUnion state;
+  state._inflated._unloading_cycle = CodeCache::unloading_cycle();
+  state._inflated._is_unloading = 0;
+  RawAccess<MO_RELAXED>::store(&_is_unloading_state, state._value);
 }
 
 // Called to clean up after class unloading for live nmethods and from the sweeper
 // for all methods.
-bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
+void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
-  bool postponed = false;
   ResourceMark rm;
 
   // Find all calls in an nmethod and clear the ones that point to non-entrant,
@@ -564,19 +545,18 @@
         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
       }
 
-      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
       break;
 
     case relocInfo::opt_virtual_call_type:
-      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
       break;
 
     case relocInfo::static_call_type:
-      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
+      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all);
       break;
 
     case relocInfo::oop_type:
-      // handled by do_unloading_oops already
       break;
 
     case relocInfo::metadata_type:
@@ -586,38 +566,6 @@
       break;
     }
   }
-
-  return postponed;
-}
-
-void CompiledMethod::do_unloading_parallel_postponed() {
-  ResourceMark rm;
-
-  // Make sure the oop's ready to receive visitors
-  assert(!is_zombie(),
-         "should not call follow on zombie nmethod");
-
-  RelocIterator iter(this, oops_reloc_begin());
-  while(iter.next()) {
-
-    switch (iter.type()) {
-
-    case relocInfo::virtual_call_type:
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
-      break;
-
-    case relocInfo::opt_virtual_call_type:
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
-      break;
-
-    case relocInfo::static_call_type:
-      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
-      break;
-
-    default:
-      break;
-    }
-  }
 }
 
 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found