8023191: OSR nmethods should be flushed to free space in CodeCache
authorthartmann
Fri, 18 Mar 2016 09:32:29 +0100
changeset 36802 18b1db5a7e70
parent 36800 37014ee7264c
child 36803 a0e7d10f4437
8023191: OSR nmethods should be flushed to free space in CodeCache Summary: Treat OSR nmethods like normal nmethods and flush them if they are cold/unused. Reviewed-by: kvn
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/nmethod.hpp
hotspot/src/share/vm/oops/instanceKlass.cpp
hotspot/src/share/vm/oops/instanceKlass.hpp
hotspot/src/share/vm/runtime/sweeper.cpp
hotspot/src/share/vm/runtime/sweeper.hpp
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Mar 17 12:04:04 2016 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Mar 18 09:32:29 2016 +0100
@@ -1387,9 +1387,17 @@
 
 void nmethod::invalidate_osr_method() {
   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
+#ifndef ASSERT
+  // Make sure osr nmethod is invalidated only once
+  if (!is_in_use()) {
+    return;
+  }
+#endif
   // Remove from list of active nmethods
-  if (method() != NULL)
-    method()->method_holder()->remove_osr_nmethod(this);
+  if (method() != NULL) {
+    bool removed = method()->method_holder()->remove_osr_nmethod(this);
+    assert(!removed || is_in_use(), "unused osr nmethod should be invalidated");
+  }
 }
 
 void nmethod::log_state_change() const {
@@ -1510,7 +1518,7 @@
   // happens or they get unloaded.
   if (state == zombie) {
     {
-      // Flushing dependecies must be done before any possible
+      // Flushing dependencies must be done before any possible
       // safepoint can sneak in, otherwise the oops used by the
       // dependency logic could have become stale.
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@@ -1526,7 +1534,7 @@
 
     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
     // event and it hasn't already been reported for this nmethod then
-    // report it now. The event may have been reported earilier if the GC
+    // report it now. The event may have been reported earlier if the GC
     // marked it for unloading). JvmtiDeferredEventQueue support means
     // we no longer go to a safepoint here.
     post_compiled_method_unload();
@@ -1554,8 +1562,10 @@
 
 void nmethod::flush() {
   // Note that there are no valid oops in the nmethod anymore.
-  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
-  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
+  assert(!is_osr_method() || is_unloaded() || is_zombie(),
+         "osr nmethod must be unloaded or zombie before flushing");
+  assert(is_zombie() || is_osr_method(), "must be a zombie method");
+  assert(is_marked_for_reclamation() || is_osr_method(), "must be marked for reclamation");
 
   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
   assert_locked_or_safepoint(CodeCache_lock);
@@ -1563,9 +1573,9 @@
   // completely deallocate this method
   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
   if (PrintMethodFlushing) {
-    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
+    tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
                   "/Free CodeCache:" SIZE_FORMAT "Kb",
-                  _compile_id, p2i(this), CodeCache::blob_count(),
+                  is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
                   CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
   }
 
@@ -2917,10 +2927,7 @@
     tty->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
     tty->print(" for method " INTPTR_FORMAT , p2i(method()));
     tty->print(" { ");
-    if (is_in_use())      tty->print("in_use ");
-    if (is_not_entrant()) tty->print("not_entrant ");
-    if (is_zombie())      tty->print("zombie ");
-    if (is_unloaded())    tty->print("unloaded ");
+    tty->print_cr("%s ", state());
     if (on_scavenge_root_list())  tty->print("scavenge_root ");
     tty->print_cr("}:");
   }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Thu Mar 17 12:04:04 2016 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Fri Mar 18 09:32:29 2016 +0100
@@ -207,7 +207,7 @@
   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 
   // Protected by Patching_lock
-  volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
+  volatile unsigned char _state;             // {in_use, not_entrant, zombie, unloaded}
 
   volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
 
@@ -438,7 +438,20 @@
   bool  is_alive() const                          { return _state == in_use || _state == not_entrant; }
   bool  is_not_entrant() const                    { return _state == not_entrant; }
   bool  is_zombie() const                         { return _state == zombie; }
-  bool  is_unloaded() const                       { return _state == unloaded;   }
+  bool  is_unloaded() const                       { return _state == unloaded; }
+
+  // returns a string version of the nmethod state
+  const char* state() const {
+    switch(_state) {
+      case in_use:      return "in use";
+      case not_entrant: return "not_entrant";
+      case zombie:      return "zombie";
+      case unloaded:    return "unloaded";
+      default:
+        fatal("unexpected nmethod state: %d", _state);
+        return NULL;
+    }
+  }
 
 #if INCLUDE_RTM_OPT
   // rtm state accessing and manipulating
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Mar 17 12:04:04 2016 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Mar 18 09:32:29 2016 +0100
@@ -2523,8 +2523,8 @@
   }
 }
 
-
-void InstanceKlass::remove_osr_nmethod(nmethod* n) {
+// Remove osr nmethod from the list. Return true if found and removed.
+bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
   // This is a short non-blocking critical region, so the no safepoint check is ok.
   MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
   assert(n->is_osr_method(), "wrong kind of nmethod");
@@ -2533,6 +2533,7 @@
   int max_level = CompLevel_none;  // Find the max comp level excluding n
   Method* m = n->method();
   // Search for match
+  bool found = false;
   while(cur != NULL && cur != n) {
     if (TieredCompilation && m == cur->method()) {
       // Find max level before n
@@ -2543,6 +2544,7 @@
   }
   nmethod* next = NULL;
   if (cur == n) {
+    found = true;
     next = cur->osr_link();
     if (last == NULL) {
       // Remove first element
@@ -2563,6 +2565,7 @@
     }
     m->set_highest_osr_comp_level(max_level);
   }
+  return found;
 }
 
 int InstanceKlass::mark_osr_nmethods(const Method* m) {
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Thu Mar 17 12:04:04 2016 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Mar 18 09:32:29 2016 +0100
@@ -829,7 +829,7 @@
   nmethod* osr_nmethods_head() const         { return _osr_nmethods_head; };
   void set_osr_nmethods_head(nmethod* h)     { _osr_nmethods_head = h; };
   void add_osr_nmethod(nmethod* n);
-  void remove_osr_nmethod(nmethod* n);
+  bool remove_osr_nmethod(nmethod* n);
   int mark_osr_nmethods(const Method* m);
   nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
 
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Mar 17 12:04:04 2016 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Mar 18 09:32:29 2016 +0100
@@ -355,8 +355,8 @@
   bool forced = _force_sweep;
 
   // Force stack scanning if there is only 10% free space in the code cache.
-  // We force stack scanning only non-profiled code heap gets full, since critical
-  // allocation go to the non-profiled heap and we must be make sure that there is
+  // We force stack scanning only if the non-profiled code heap gets full, since critical
+  // allocations go to the non-profiled heap and we must be make sure that there is
   // enough space.
   double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
   if (free_percent <= StartAggressiveSweepingAt) {
@@ -423,12 +423,19 @@
       // Now ready to process nmethod and give up CodeCache_lock
       {
         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+        // Save information before potentially flushing the nmethod
         int size = nm->total_size();
         bool is_c2_method = nm->is_compiled_by_c2();
+        bool is_osr = nm->is_osr_method();
+        int compile_id = nm->compile_id();
+        intptr_t address = p2i(nm);
+        const char* state_before = nm->state();
+        const char* state_after = "";
 
         MethodStateChange type = process_nmethod(nm);
         switch (type) {
           case Flushed:
+            state_after = "flushed";
             freed_memory += size;
             ++flushed_count;
             if (is_c2_method) {
@@ -436,9 +443,11 @@
             }
             break;
           case MarkedForReclamation:
+            state_after = "marked for reclamation";
             ++marked_for_reclamation_count;
             break;
           case MadeZombie:
+            state_after = "made zombie";
             ++zombified_count;
             break;
           case None:
@@ -446,7 +455,11 @@
           default:
            ShouldNotReachHere();
         }
+        if (PrintMethodFlushing && Verbose && type != None) {
+          tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after);
+        }
       }
+
       _seen++;
       handle_safepoint_request();
     }
@@ -533,7 +546,7 @@
   NMethodMarker(nmethod* nm) {
     JavaThread* current = JavaThread::current();
     assert (current->is_Code_cache_sweeper_thread(), "Must be");
-    _thread = (CodeCacheSweeperThread*)JavaThread::current();
+    _thread = (CodeCacheSweeperThread*)current;
     if (!nm->is_zombie() && !nm->is_unloaded()) {
       // Only expose live nmethods for scanning
       _thread->set_scanned_nmethod(nm);
@@ -545,6 +558,10 @@
 };
 
 void NMethodSweeper::release_nmethod(nmethod* nm) {
+  // Make sure the released nmethod is no longer referenced by the sweeper thread
+  CodeCacheSweeperThread* thread = (CodeCacheSweeperThread*)JavaThread::current();
+  thread->set_scanned_nmethod(NULL);
+
   // Clean up any CompiledICHolders
   {
     ResourceMark rm;
@@ -575,7 +592,7 @@
   if (nm->is_locked_by_vm()) {
     // But still remember to clean-up inline caches for alive nmethods
     if (nm->is_alive()) {
-      // Clean inline caches that point to zombie/non-entrant methods
+      // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
       MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
       SWEEP(nm);
@@ -589,42 +606,44 @@
     // there are no inline caches that refer to it.
     if (nm->is_marked_for_reclamation()) {
       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
-      if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), p2i(nm));
-      }
       release_nmethod(nm);
       assert(result == None, "sanity");
       result = Flushed;
     } else {
-      if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), p2i(nm));
-      }
       nm->mark_for_reclamation();
       // Keep track of code cache state change
       _bytes_changed += nm->total_size();
       SWEEP(nm);
       assert(result == None, "sanity");
       result = MarkedForReclamation;
+      assert(nm->is_marked_for_reclamation(), "nmethod must be marked for reclamation");
     }
   } else if (nm->is_not_entrant()) {
     // If there are no current activations of this method on the
     // stack we can safely convert it to a zombie method
     if (nm->can_convert_to_zombie()) {
-      // Clear ICStubs to prevent back patching stubs of zombie or unloaded
+      // Clear ICStubs to prevent back patching stubs of zombie or flushed
       // nmethods during the next safepoint (see ICStub::finalize).
       {
         MutexLocker cl(CompiledIC_lock);
         nm->clear_ic_stubs();
       }
-      if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), p2i(nm));
-      }
       // Code cache state change is tracked in make_zombie()
       nm->make_zombie();
       SWEEP(nm);
-      assert(result == None, "sanity");
-      result = MadeZombie;
-      assert(nm->is_zombie(), "nmethod must be zombie");
+      if (nm->is_osr_method()) {
+        // No inline caches will ever point to osr methods, so we can just remove it.
+        // Make sure that we unregistered the nmethod with the heap and flushed all
+        // dependencies before removing the nmethod (done in make_zombie()).
+        assert(nm->is_zombie(), "nmethod must be unregistered");
+        release_nmethod(nm);
+        assert(result == None, "sanity");
+        result = Flushed;
+      } else {
+        assert(result == None, "sanity");
+        result = MadeZombie;
+        assert(nm->is_zombie(), "nmethod must be zombie");
+      }
     } else {
       // Still alive, clean up its inline caches
       MutexLocker cl(CompiledIC_lock);
@@ -632,9 +651,13 @@
       SWEEP(nm);
     }
   } else if (nm->is_unloaded()) {
-    // Unloaded code, just make it a zombie
-    if (PrintMethodFlushing && Verbose) {
-      tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), p2i(nm));
+    // Code is unloaded, so there are no activations on the stack.
+    // Convert the nmethod to zombie or flush it directly in the OSR case.
+    {
+      // Clean ICs of unloaded nmethods as well because they may reference other
+      // unloaded nmethods that may be flushed earlier in the sweeper cycle.
+      MutexLocker cl(CompiledIC_lock);
+      nm->cleanup_inline_caches();
     }
     if (nm->is_osr_method()) {
       SWEEP(nm);
@@ -643,12 +666,6 @@
       assert(result == None, "sanity");
       result = Flushed;
     } else {
-      {
-        // Clean ICs of unloaded nmethods as well because they may reference other
-        // unloaded nmethods that may be flushed earlier in the sweeper cycle.
-        MutexLocker cl(CompiledIC_lock);
-        nm->cleanup_inline_caches();
-      }
       // Code cache state change is tracked in make_zombie()
       nm->make_zombie();
       SWEEP(nm);
@@ -657,7 +674,7 @@
     }
   } else {
     possibly_flush(nm);
-    // Clean-up all inline caches that point to zombie/non-reentrant methods
+    // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
     MutexLocker cl(CompiledIC_lock);
     nm->cleanup_inline_caches();
     SWEEP(nm);
@@ -668,10 +685,10 @@
 
 void NMethodSweeper::possibly_flush(nmethod* nm) {
   if (UseCodeCacheFlushing) {
-    if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
+    if (!nm->is_locked_by_vm() && !nm->is_native_method()) {
       bool make_not_entrant = false;
 
-      // Do not make native methods and OSR-methods not-entrant
+      // Do not make native methods not-entrant
       nm->dec_hotness_counter();
       // Get the initial value of the hotness counter. This value depends on the
       // ReservedCodeCacheSize
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Thu Mar 17 12:04:04 2016 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Fri Mar 18 09:32:29 2016 +0100
@@ -45,12 +45,12 @@
 //     and sweep_code_cache() cannot execute at the same time.
 //     To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
 //     be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
-//     invalidation, and (iv) being replaced be a different method version (tiered
-//     compilation). Not-entrant nmethod cannot be called by Java threads, but they
-//     can still be active on the stack. To ensure that active nmethod are not reclaimed,
+//     invalidation, and (iv) being replaced by a different method version (tiered
+//     compilation). Not-entrant nmethods cannot be called by Java threads, but they
+//     can still be active on the stack. To ensure that active nmethods are not reclaimed,
 //     we have to wait until the next marking phase has completed. If a not-entrant
 //     nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
-//     remove the nmethod, all inline caches (IC) that point to the the nmethod must be
+//     remove the nmethod, all inline caches (IC) that point to the nmethod must be
 //     cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
 //     state change happens during separate sweeps. It may take at least 3 sweeps before an
 //     nmethod's space is freed.