8212989: Allow CompiledMethod ExceptionCache have unloaded klasses
authoreosterlund
Mon, 05 Nov 2018 08:01:39 +0100
changeset 52405 c0c6cdea32f1
parent 52404 32408804acd4
child 52406 1a38bfb0ffc9
8212989: Allow CompiledMethod ExceptionCache have unloaded klasses Reviewed-by: kvn, pliden
src/hotspot/share/code/codeCache.cpp
src/hotspot/share/code/codeCache.hpp
src/hotspot/share/code/compiledMethod.cpp
src/hotspot/share/code/compiledMethod.hpp
src/hotspot/share/code/compiledMethod.inline.hpp
src/hotspot/share/code/nmethod.cpp
--- a/src/hotspot/share/code/codeCache.cpp	Mon Nov 05 10:31:47 2018 +0100
+++ b/src/hotspot/share/code/codeCache.cpp	Mon Nov 05 08:01:39 2018 +0100
@@ -145,6 +145,7 @@
 address CodeCache::_high_bound = 0;
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
+ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
 
 // Initialize arrays of CodeHeap subsets
 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
@@ -895,6 +896,34 @@
 #endif
 }
 
+// Defer freeing of concurrently cleaned ExceptionCache entries until
+// after a global handshake operation.
+void CodeCache::release_exception_cache(ExceptionCache* entry) {
+  if (SafepointSynchronize::is_at_safepoint()) {
+    delete entry;
+  } else {
+    for (;;) {
+      ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
+      entry->set_purge_list_next(purge_list_head);
+      if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) {
+        break;
+      }
+    }
+  }
+}
+
+// Delete exception caches that have been concurrently unlinked,
+// followed by a global handshake operation.
+void CodeCache::purge_exception_caches() {
+  ExceptionCache* curr = _exception_cache_purge_list;
+  while (curr != NULL) {
+    ExceptionCache* next = curr->purge_list_next();
+    delete curr;
+    curr = next;
+  }
+  _exception_cache_purge_list = NULL;
+}
+
 void CodeCache::gc_prologue() { }
 
 void CodeCache::gc_epilogue() {
--- a/src/hotspot/share/code/codeCache.hpp	Mon Nov 05 10:31:47 2018 +0100
+++ b/src/hotspot/share/code/codeCache.hpp	Mon Nov 05 08:01:39 2018 +0100
@@ -72,8 +72,9 @@
 // existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
 // Solaris and BSD.
 
+class ExceptionCache;
+class KlassDepChange;
 class OopClosure;
-class KlassDepChange;
 
 class CodeCache : AllStatic {
   friend class VMStructs;
@@ -94,6 +95,8 @@
   static nmethod* _scavenge_root_nmethods;              // linked via nm->scavenge_root_link()
   static uint8_t _unloading_cycle;                      // Global state for recognizing old nmethods that need to be unloaded
 
+  static ExceptionCache* volatile _exception_cache_purge_list;
+
   static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
   static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
 
@@ -192,6 +195,8 @@
   static uint16_t unloading_cycle() { return _unloading_cycle; }
   static void increment_unloading_cycle();
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
+  static void release_exception_cache(ExceptionCache* entry);
+  static void purge_exception_caches();
 
   // Apply f to every live code blob in scavengable nmethods. Prune nmethods
   // from the list of scavengable nmethods if f->fix_relocations() and a nmethod
--- a/src/hotspot/share/code/compiledMethod.cpp	Mon Nov 05 10:31:47 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp	Mon Nov 05 08:01:39 2018 +0100
@@ -103,34 +103,84 @@
 
 //-----------------------------------------------------------------------------
 
+ExceptionCache* CompiledMethod::exception_cache_acquire() const {
+  return OrderAccess::load_acquire(&_exception_cache);
+}
+
 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
   assert(new_entry != NULL,"Must be non null");
   assert(new_entry->next() == NULL, "Must be null");
 
-  ExceptionCache *ec = exception_cache();
-  if (ec != NULL) {
-    new_entry->set_next(ec);
+  for (;;) {
+    ExceptionCache *ec = exception_cache();
+    if (ec != NULL) {
+      Klass* ex_klass = ec->exception_type();
+      if (!ex_klass->is_loader_alive()) {
+        // We must guarantee that entries are not inserted with new next pointer
+        // edges to ExceptionCache entries with dead klasses, due to bad interactions
+        // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
+        // the head pointer forward to the first live ExceptionCache, so that the new
+        // next pointers always point at live ExceptionCaches, that are not removed due
+        // to concurrent ExceptionCache cleanup.
+        ExceptionCache* next = ec->next();
+        if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
+          CodeCache::release_exception_cache(ec);
+        }
+        continue;
+      }
+      ec = exception_cache();
+      if (ec != NULL) {
+        new_entry->set_next(ec);
+      }
+    }
+    if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
+      return;
+    }
   }
-  release_set_exception_cache(new_entry);
 }
 
 void CompiledMethod::clean_exception_cache() {
+  // For each nmethod, only a single thread may call this cleanup function
+  // at the same time, whether called in STW cleanup or concurrent cleanup.
+  // Note that if the GC is processing exception cache cleaning in a concurrent phase,
+  // then a single writer may contend with cleaning up the head pointer to the
+  // first ExceptionCache node that has a Klass* that is alive. That is fine,
+  // as long as there is no concurrent cleanup of next pointers from concurrent writers.
+  // And the concurrent writers do not clean up next pointers, only the head.
+  // Also note that concurent readers will walk through Klass* pointers that are not
+  // alive. That does not cause ABA problems, because Klass* is deleted after
+  // a handshake with all threads, after all stale ExceptionCaches have been
+  // unlinked. That is also when the CodeCache::exception_cache_purge_list()
+  // is deleted, with all ExceptionCache entries that were cleaned concurrently.
+  // That similarly implies that CAS operations on ExceptionCache entries do not
+  // suffer from ABA problems as unlinking and deletion is separated by a global
+  // handshake operation.
   ExceptionCache* prev = NULL;
-  ExceptionCache* curr = exception_cache();
+  ExceptionCache* curr = exception_cache_acquire();
 
   while (curr != NULL) {
     ExceptionCache* next = curr->next();
 
-    Klass* ex_klass = curr->exception_type();
-    if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
+    if (!curr->exception_type()->is_loader_alive()) {
       if (prev == NULL) {
-        set_exception_cache(next);
+        // Try to clean head; this is contended by concurrent inserts, that
+        // both lazily clean the head, and insert entries at the head. If
+        // the CAS fails, the operation is restarted.
+        if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
+          prev = NULL;
+          curr = exception_cache_acquire();
+          continue;
+        }
       } else {
+        // It is impossible to during cleanup connect the next pointer to
+        // an ExceptionCache that has not been published before a safepoint
+        // prior to the cleanup. Therefore, release is not required.
         prev->set_next(next);
       }
-      delete curr;
       // prev stays the same.
+
+      CodeCache::release_exception_cache(curr);
     } else {
       prev = curr;
     }
@@ -145,7 +195,7 @@
   // We never grab a lock to read the exception cache, so we may
   // have false negatives. This is okay, as it can only happen during
   // the first few exception lookups for a given nmethod.
-  ExceptionCache* ec = exception_cache();
+  ExceptionCache* ec = exception_cache_acquire();
   while (ec != NULL) {
     address ret_val;
     if ((ret_val = ec->match(exception,pc)) != NULL) {
@@ -172,13 +222,11 @@
   }
 }
 
-//-------------end of code for ExceptionCache--------------
-
 // private method for handling exception cache
 // These methods are private, and used to manipulate the exception cache
 // directly.
 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
-  ExceptionCache* ec = exception_cache();
+  ExceptionCache* ec = exception_cache_acquire();
   while (ec != NULL) {
     if (ec->match_exception_with_space(exception)) {
       return ec;
@@ -188,6 +236,8 @@
   return NULL;
 }
 
+//-------------end of code for ExceptionCache--------------
+
 bool CompiledMethod::is_at_poll_return(address pc) {
   RelocIterator iter(this, pc, pc+1);
   while (iter.next()) {
--- a/src/hotspot/share/code/compiledMethod.hpp	Mon Nov 05 10:31:47 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.hpp	Mon Nov 05 08:01:39 2018 +0100
@@ -48,7 +48,8 @@
   address  _pc[cache_size];
   address  _handler[cache_size];
   volatile int _count;
-  ExceptionCache* _next;
+  ExceptionCache* volatile _next;
+  ExceptionCache* _purge_list_next;
 
   inline address pc_at(int index);
   void set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
@@ -65,8 +66,10 @@
   ExceptionCache(Handle exception, address pc, address handler);
 
   Klass*    exception_type()                { return _exception_type; }
-  ExceptionCache* next()                    { return _next; }
-  void      set_next(ExceptionCache *ec)    { _next = ec; }
+  ExceptionCache* next();
+  void      set_next(ExceptionCache *ec);
+  ExceptionCache* purge_list_next()                 { return _purge_list_next; }
+  void      set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
 
   address match(Handle exception, address pc);
   bool    match_exception_with_space(Handle exception) ;
@@ -293,11 +296,14 @@
   virtual Metadata** metadata_addr_at(int index) const = 0;
   virtual void    set_original_pc(const frame* fr, address pc) = 0;
 
+protected:
   // Exception cache support
-  // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
+  // Note: _exception_cache may be read and cleaned concurrently.
   ExceptionCache* exception_cache() const         { return _exception_cache; }
+  ExceptionCache* exception_cache_acquire() const;
   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
-  void release_set_exception_cache(ExceptionCache *ec);
+
+public:
   address handler_for_exception_and_pc(Handle exception, address pc);
   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
   void clean_exception_cache();
--- a/src/hotspot/share/code/compiledMethod.inline.hpp	Mon Nov 05 10:31:47 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.inline.hpp	Mon Nov 05 08:01:39 2018 +0100
@@ -41,10 +41,6 @@
     ;
 }
 
-inline void CompiledMethod::release_set_exception_cache(ExceptionCache *ec) {
-  OrderAccess::release_store(&_exception_cache, ec);
-}
-
 // -----------------------------------------------------------------------------
 // CompiledMethod::get_deopt_original_pc
 //
--- a/src/hotspot/share/code/nmethod.cpp	Mon Nov 05 10:31:47 2018 +0100
+++ b/src/hotspot/share/code/nmethod.cpp	Mon Nov 05 08:01:39 2018 +0100
@@ -244,6 +244,7 @@
   _count = 0;
   _exception_type = exception->klass();
   _next = NULL;
+  _purge_list_next = NULL;
 
   add_address_and_handler(pc,handler);
 }
@@ -293,6 +294,14 @@
   return false;
 }
 
+ExceptionCache* ExceptionCache::next() {
+  return Atomic::load(&_next);
+}
+
+void ExceptionCache::set_next(ExceptionCache *ec) {
+  Atomic::store(ec, &_next);
+}
+
 //-----------------------------------------------------------------------------