6950075: nmethod sweeper should operate concurrently
authornever
Mon, 17 May 2010 16:50:07 -0700
changeset 5533 e8d9ff82ec62
parent 5532 34c4ef11dbed
child 5534 9bea9ee28dfc
6950075: nmethod sweeper should operate concurrently Reviewed-by: never, kvn Contributed-by: eric.caspole@amd.com
hotspot/src/share/vm/code/codeCache.cpp
hotspot/src/share/vm/code/codeCache.hpp
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/nmethod.hpp
hotspot/src/share/vm/compiler/compileBroker.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/safepoint.cpp
hotspot/src/share/vm/runtime/sweeper.cpp
hotspot/src/share/vm/runtime/sweeper.hpp
--- a/hotspot/src/share/vm/code/codeCache.cpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Mon May 17 16:50:07 2010 -0700
@@ -124,6 +124,23 @@
   return (nmethod*)cb;
 }
 
+nmethod* CodeCache::first_nmethod() {
+  assert_locked_or_safepoint(CodeCache_lock);
+  CodeBlob* cb = first();
+  while (cb != NULL && !cb->is_nmethod()) {
+    cb = next(cb);
+  }
+  return (nmethod*)cb;
+}
+
+nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  cb = next(cb);
+  while (cb != NULL && !cb->is_nmethod()) {
+    cb = next(cb);
+  }
+  return (nmethod*)cb;
+}
 
 CodeBlob* CodeCache::allocate(int size) {
   // Do not seize the CodeCache lock here--if the caller has not
@@ -414,7 +431,7 @@
       saved->set_speculatively_disconnected(false);
       saved->set_saved_nmethod_link(NULL);
       if (PrintMethodFlushing) {
-        saved->print_on(tty, " ### nmethod is reconnected");
+        saved->print_on(tty, " ### nmethod is reconnected\n");
       }
       if (LogCompilation && (xtty != NULL)) {
         ttyLocker ttyl;
@@ -432,7 +449,8 @@
 }
 
 void CodeCache::remove_saved_code(nmethod* nm) {
-  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  // For conc swpr this will be called with CodeCache_lock taken by caller
+  assert_locked_or_safepoint(CodeCache_lock);
   assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
   nmethod* saved = _saved_nmethods;
   nmethod* prev = NULL;
@@ -463,7 +481,7 @@
   nm->set_saved_nmethod_link(_saved_nmethods);
   _saved_nmethods = nm;
   if (PrintMethodFlushing) {
-    nm->print_on(tty, " ### nmethod is speculatively disconnected");
+    nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
   }
   if (LogCompilation && (xtty != NULL)) {
     ttyLocker ttyl;
--- a/hotspot/src/share/vm/code/codeCache.hpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Mon May 17 16:50:07 2010 -0700
@@ -102,6 +102,8 @@
   static CodeBlob* next (CodeBlob* cb);
   static CodeBlob* alive(CodeBlob *cb);
   static nmethod* alive_nmethod(CodeBlob *cb);
+  static nmethod* first_nmethod();
+  static nmethod* next_nmethod (CodeBlob* cb);
   static int       nof_blobs()                 { return _number_of_blobs; }
 
   // GC support
--- a/hotspot/src/share/vm/code/nmethod.cpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Mon May 17 16:50:07 2010 -0700
@@ -1014,9 +1014,7 @@
 
 void nmethod::cleanup_inline_caches() {
 
-  assert(SafepointSynchronize::is_at_safepoint() &&
-        !CompiledIC_lock->is_locked() &&
-        !Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
+  assert_locked_or_safepoint(CompiledIC_lock);
 
   // If the method is not entrant or zombie then a JMP is plastered over the
   // first few bytes.  If an oop in the old code was there, that oop
@@ -1071,7 +1069,6 @@
 // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
 bool nmethod::can_not_entrant_be_converted() {
   assert(is_not_entrant(), "must be a non-entrant method");
-  assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
 
   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
   // count can be greater than the stack traversal count before it hits the
@@ -1127,7 +1124,7 @@
     _method = NULL;            // Clear the method of this dead nmethod
   }
   // Make the class unloaded - i.e., change state and notify sweeper
-  check_safepoint();
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   if (is_in_use()) {
     // Transitioning directly from live to unloaded -- so
     // we need to force a cache clean-up; remember this
@@ -1220,17 +1217,6 @@
       assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
     }
 
-    // When the nmethod becomes zombie it is no longer alive so the
-    // dependencies must be flushed.  nmethods in the not_entrant
-    // state will be flushed later when the transition to zombie
-    // happens or they get unloaded.
-    if (state == zombie) {
-      assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
-      flush_dependencies(NULL);
-    } else {
-      assert(state == not_entrant, "other cases may need to be handled differently");
-    }
-
     was_alive = is_in_use(); // Read state under lock
 
     // Change state
@@ -1241,6 +1227,17 @@
 
   } // leave critical region under Patching_lock
 
+  // When the nmethod becomes zombie it is no longer alive so the
+  // dependencies must be flushed.  nmethods in the not_entrant
+  // state will be flushed later when the transition to zombie
+  // happens or they get unloaded.
+  if (state == zombie) {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    flush_dependencies(NULL);
+  } else {
+    assert(state == not_entrant, "other cases may need to be handled differently");
+  }
+
   if (state == not_entrant) {
     Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
   } else {
@@ -1310,21 +1307,13 @@
   return true;
 }
 
-
-#ifndef PRODUCT
-void nmethod::check_safepoint() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-}
-#endif
-
-
 void nmethod::flush() {
   // Note that there are no valid oops in the nmethod anymore.
   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
 
   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
-  check_safepoint();
+  assert_locked_or_safepoint(CodeCache_lock);
 
   // completely deallocate this method
   EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
@@ -1373,7 +1362,7 @@
 // notifies instanceKlasses that are reachable
 
 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
+  assert_locked_or_safepoint(CodeCache_lock);
   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
   "is_alive is non-NULL if and only if we are called during GC");
   if (!has_flushed_dependencies()) {
@@ -2266,7 +2255,6 @@
     tty->print(" for method " INTPTR_FORMAT , (address)method());
     tty->print(" { ");
     if (version())        tty->print("v%d ", version());
-    if (level())          tty->print("l%d ", level());
     if (is_in_use())      tty->print("in_use ");
     if (is_not_entrant()) tty->print("not_entrant ");
     if (is_zombie())      tty->print("zombie ");
--- a/hotspot/src/share/vm/code/nmethod.hpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Mon May 17 16:50:07 2010 -0700
@@ -82,7 +82,6 @@
 struct nmFlags {
   friend class VMStructs;
   unsigned int version:8;                    // version number (0 = first version)
-  unsigned int level:4;                      // optimization level
   unsigned int age:4;                        // age (in # of sweep steps)
 
   unsigned int state:2;                      // {alive, zombie, unloaded)
@@ -410,14 +409,13 @@
   void flush_dependencies(BoolObjectClosure* is_alive);
   bool  has_flushed_dependencies()                { return flags.hasFlushedDependencies; }
   void  set_has_flushed_dependencies()            {
-    check_safepoint();
     assert(!has_flushed_dependencies(), "should only happen once");
     flags.hasFlushedDependencies = 1;
   }
 
   bool  is_marked_for_reclamation() const         { return flags.markedForReclamation; }
-  void  mark_for_reclamation()                    { check_safepoint(); flags.markedForReclamation = 1; }
-  void  unmark_for_reclamation()                  { check_safepoint(); flags.markedForReclamation = 0; }
+  void  mark_for_reclamation()                    { flags.markedForReclamation = 1; }
+  void  unmark_for_reclamation()                  { flags.markedForReclamation = 0; }
 
   bool  has_unsafe_access() const                 { return flags.has_unsafe_access; }
   void  set_has_unsafe_access(bool z)             { flags.has_unsafe_access = z; }
@@ -428,9 +426,6 @@
   bool  is_speculatively_disconnected() const     { return flags.speculatively_disconnected; }
   void  set_speculatively_disconnected(bool z)     { flags.speculatively_disconnected = z; }
 
-  int   level() const                             { return flags.level; }
-  void  set_level(int newLevel)                   { check_safepoint(); flags.level = newLevel; }
-
   int   comp_level() const                        { return _comp_level; }
 
   int   version() const                           { return flags.version; }
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Mon May 17 16:50:07 2010 -0700
@@ -461,12 +461,25 @@
 //
 // Get the next CompileTask from a CompileQueue
 CompileTask* CompileQueue::get() {
+  NMethodSweeper::possibly_sweep();
+
   MutexLocker locker(lock());
 
   // Wait for an available CompileTask.
   while (_first == NULL) {
     // There is no work to be done right now.  Wait.
-    lock()->wait();
+    if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
+      // During the emergency sweeping periods, wake up and sweep occasionally
+      bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
+      if (timedout) {
+        MutexUnlocker ul(lock());
+        // When otherwise not busy, run nmethod sweeping
+        NMethodSweeper::possibly_sweep();
+      }
+    } else {
+      // During normal operation no need to wake up on timer
+      lock()->wait();
+    }
   }
 
   CompileTask* task = _first;
--- a/hotspot/src/share/vm/runtime/globals.hpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon May 17 16:50:07 2010 -0700
@@ -2756,6 +2756,9 @@
   product(intx, NmethodSweepFraction, 4,                                    \
           "Number of invocations of sweeper to cover all nmethods")         \
                                                                             \
+  product(intx, NmethodSweepCheckInterval, 5,                               \
+          "Compilers wake up every n seconds to possibly sweep nmethods")   \
+                                                                            \
   notproduct(intx, MemProfilingInterval, 500,                               \
           "Time between each invocation of the MemProfiler")                \
                                                                             \
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Mon May 17 16:50:07 2010 -0700
@@ -472,7 +472,7 @@
   }
 
   TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
-  NMethodSweeper::sweep();
+  NMethodSweeper::scan_stacks();
 }
 
 
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Mon May 17 16:50:07 2010 -0700
@@ -33,6 +33,8 @@
 jint      NMethodSweeper::_locked_seen = 0;
 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 bool      NMethodSweeper::_rescan = false;
+bool      NMethodSweeper::_do_sweep = false;
+jint      NMethodSweeper::_sweep_started = 0;
 bool      NMethodSweeper::_was_full = false;
 jint      NMethodSweeper::_advise_to_sweep = 0;
 jlong     NMethodSweeper::_last_was_full = 0;
@@ -50,14 +52,20 @@
 };
 static MarkActivationClosure mark_activation_closure;
 
-void NMethodSweeper::sweep() {
+void NMethodSweeper::scan_stacks() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
   if (!MethodFlushing) return;
+  _do_sweep = true;
 
   // No need to synchronize access, since this is always executed at a
   // safepoint.  If we aren't in the middle of scan and a rescan
-  // hasn't been requested then just return.
-  if (_current == NULL && !_rescan) return;
+  // hasn't been requested then just return. If UseCodeCacheFlushing is on and
+  // code cache flushing is in progress, don't skip sweeping to help make progress
+  // clearing space in the code cache.
+  if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
+    _do_sweep = false;
+    return;
+  }
 
   // Make sure CompiledIC_lock in unlocked, since we might update some
   // inline caches. If it is, we just bail-out and try later.
@@ -68,7 +76,7 @@
   if (_current == NULL) {
     _seen        = 0;
     _invocations = NmethodSweepFraction;
-    _current     = CodeCache::first();
+    _current     = CodeCache::first_nmethod();
     _traversals  += 1;
     if (PrintMethodFlushing) {
       tty->print_cr("### Sweep: stack traversal %d", _traversals);
@@ -81,48 +89,9 @@
     _not_entrant_seen_on_stack = 0;
   }
 
-  if (PrintMethodFlushing && Verbose) {
-    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
-  }
-
-  // We want to visit all nmethods after NmethodSweepFraction invocations.
-  // If invocation is 1 we do the rest
-  int todo = CodeCache::nof_blobs();
-  if (_invocations != 1) {
-    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
-    _invocations--;
-  }
-
-  for(int i = 0; i < todo && _current != NULL; i++) {
-    CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
-    if (_current->is_nmethod()) {
-      process_nmethod((nmethod *)_current);
-    }
-    _seen++;
-    _current = next;
-  }
-  // Because we could stop on a codeBlob other than an nmethod we skip forward
-  // to the next nmethod (if any). codeBlobs other than nmethods can be freed
-  // async to us and make _current invalid while we sleep.
-  while (_current != NULL && !_current->is_nmethod()) {
-    _current = CodeCache::next(_current);
-  }
-
-  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
-    // we've completed a scan without making progress but there were
-    // nmethods we were unable to process either because they were
-    // locked or were still on stack.  We don't have to aggresively
-    // clean them up so just stop scanning.  We could scan once more
-    // but that complicates the control logic and it's unlikely to
-    // matter much.
-    if (PrintMethodFlushing) {
-      tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
-    }
-  }
-
   if (UseCodeCacheFlushing) {
     if (!CodeCache::needs_flushing()) {
-      // In a safepoint, no race with setters
+      // scan_stacks() runs during a safepoint, no race with setters
       _advise_to_sweep = 0;
     }
 
@@ -155,13 +124,99 @@
   }
 }
 
+void NMethodSweeper::possibly_sweep() {
+  if ((!MethodFlushing) || (!_do_sweep)) return;
+
+  if (_invocations > 0) {
+    // Only one thread at a time will sweep
+    jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
+    if (old != 0) {
+      return;
+    }
+    sweep_code_cache();
+  }
+  _sweep_started = 0;
+}
+
+void NMethodSweeper::sweep_code_cache() {
+#ifdef ASSERT
+  jlong sweep_start;
+  if(PrintMethodFlushing) {
+    sweep_start = os::javaTimeMillis();
+  }
+#endif
+  if (PrintMethodFlushing && Verbose) {
+    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
+  }
+
+  // We want to visit all nmethods after NmethodSweepFraction invocations.
+  // If invocation is 1 we do the rest
+  int todo = CodeCache::nof_blobs();
+  if (_invocations > 1) {
+    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
+  }
+
+  // Compilers may check to sweep more often than stack scans happen,
+  // don't keep trying once it is all scanned
+  _invocations--;
+
+  assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
+  assert(!CodeCache_lock->owned_by_self(), "just checking");
+
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+
+    for(int i = 0; i < todo && _current != NULL; i++) {
+
+      // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
+      // Other blobs can be deleted by other threads
+      // Read next before we potentially delete current
+      CodeBlob* next = CodeCache::next_nmethod(_current);
+
+      // Now ready to process nmethod and give up CodeCache_lock
+      {
+        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+        process_nmethod((nmethod *)_current);
+      }
+      _seen++;
+      _current = next;
+    }
+
+    // Skip forward to the next nmethod (if any). Code blobs other than nmethods
+    // can be freed async to us and make _current invalid while we sleep.
+    _current = CodeCache::next_nmethod(_current);
+  }
+
+  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
+    // we've completed a scan without making progress but there were
+    // nmethods we were unable to process either because they were
+    // locked or were still on stack.  We don't have to aggresively
+    // clean them up so just stop scanning.  We could scan once more
+    // but that complicates the control logic and it's unlikely to
+    // matter much.
+    if (PrintMethodFlushing) {
+      tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
+    }
+  }
+
+#ifdef ASSERT
+  if(PrintMethodFlushing) {
+    jlong sweep_end             = os::javaTimeMillis();
+    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
+  }
+#endif
+}
+
 
 void NMethodSweeper::process_nmethod(nmethod *nm) {
+  assert(!CodeCache_lock->owned_by_self(), "just checking");
+
   // Skip methods that are currently referenced by the VM
   if (nm->is_locked_by_vm()) {
     // But still remember to clean-up inline caches for alive nmethods
     if (nm->is_alive()) {
       // Clean-up all inline caches that points to zombie/non-reentrant methods
+      MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
     } else {
       _locked_seen++;
@@ -178,6 +233,7 @@
       if (PrintMethodFlushing && Verbose) {
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
       }
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       nm->flush();
     } else {
       if (PrintMethodFlushing && Verbose) {
@@ -197,10 +253,11 @@
       _rescan = true;
     } else {
       // Still alive, clean up its inline caches
+      MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
       // we coudn't transition this nmethod so don't immediately
       // request a rescan.  If this method stays on the stack for a
-      // long time we don't want to keep rescanning at every safepoint.
+      // long time we don't want to keep rescanning the code cache.
       _not_entrant_seen_on_stack++;
     }
   } else if (nm->is_unloaded()) {
@@ -209,6 +266,7 @@
       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       nm->flush();
     } else {
       nm->make_zombie();
@@ -227,6 +285,7 @@
     }
 
     // Clean-up all inline caches that points to zombie/non-reentrant methods
+    MutexLocker cl(CompiledIC_lock);
     nm->cleanup_inline_caches();
   }
 }
@@ -235,8 +294,8 @@
 // they will call a vm op that comes here. This code attempts to speculatively
 // unload the oldest half of the nmethods (based on the compile job id) by
 // saving the old code in a list in the CodeCache. Then
-// execution resumes. If a method so marked is not called by the second
-// safepoint from the current one, the nmethod will be marked non-entrant and
+// execution resumes. If a method so marked is not called by the second sweeper
+// stack traversal after the current one, the nmethod will be marked non-entrant and
 // got rid of by normal sweeping. If the method is called, the methodOop's
 // _code field is restored and the methodOop/nmethod
 // go back to their normal state.
@@ -364,8 +423,8 @@
     xtty->end_elem();
   }
 
-  // Shut off compiler. Sweeper will run exiting from this safepoint
-  // and turn it back on if it clears enough space
+  // Shut off compiler. Sweeper will start over with a new stack scan and
+  // traversal cycle and turn it back on if it clears enough space.
   if (was_full()) {
     _last_was_full = os::javaTimeMillis();
     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Mon May 17 11:32:56 2010 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Mon May 17 16:50:07 2010 -0700
@@ -35,6 +35,8 @@
 
   static bool      _rescan;          // Indicates that we should do a full rescan of the
                                      // of the code cache looking for work to do.
+  static bool      _do_sweep;        // Flag to skip the conc sweep if no stack scan happened
+  static jint      _sweep_started;   // Flag to control conc sweeper
   static int       _locked_seen;     // Number of locked nmethods encountered during the scan
   static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
 
@@ -48,7 +50,9 @@
  public:
   static long traversal_count() { return _traversals; }
 
-  static void sweep();  // Invoked at the end of each safepoint
+  static void scan_stacks();      // Invoked at the end of each safepoint
+  static void sweep_code_cache(); // Concurrent part of sweep job
+  static void possibly_sweep();   // Compiler threads call this to sweep
 
   static void notify(nmethod* nm) {
     // Perform a full scan of the code cache from the beginning.  No