src/hotspot/share/runtime/safepoint.cpp
changeset 51376 181e6a03249b
parent 51338 aa3bfacc912c
child 51598 c88019b32bc4
--- a/src/hotspot/share/runtime/safepoint.cpp	Fri Aug 10 09:30:26 2018 -0400
+++ b/src/hotspot/share/runtime/safepoint.cpp	Fri Aug 10 09:36:01 2018 -0400
@@ -354,26 +354,24 @@
           // See the comments in synchronizer.cpp for additional remarks on spinning.
           //
           // In the future we might:
-          // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
+          // -- Modify the safepoint scheme to avoid potentially unbounded spinning.
           //    This is tricky as the path used by a thread exiting the JVM (say on
           //    on JNI call-out) simply stores into its state field.  The burden
           //    is placed on the VM thread, which must poll (spin).
-          // 2. Find something useful to do while spinning.  If the safepoint is GC-related
+          // -- Find something useful to do while spinning.  If the safepoint is GC-related
           //    we might aggressively scan the stacks of threads that are already safe.
-          // 3. Use Solaris schedctl to examine the state of the still-running mutators.
-          //    If all the mutators are ONPROC there's no reason to sleep or yield.
-          // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
-          // 5. Check system saturation.  If the system is not fully saturated then
+          // -- YieldTo() any still-running mutators that are ready but OFFPROC.
+          // -- Check system saturation.  If the system is not fully saturated then
           //    simply spin and avoid sleep/yield.
-          // 6. As still-running mutators rendezvous they could unpark the sleeping
+          // -- As still-running mutators rendezvous they could unpark the sleeping
           //    VMthread.  This works well for still-running mutators that become
           //    safe.  The VMthread must still poll for mutators that call-out.
-          // 7. Drive the policy on time-since-begin instead of iterations.
-          // 8. Consider making the spin duration a function of the # of CPUs:
+          // -- Drive the policy on time-since-begin instead of iterations.
+          // -- Consider making the spin duration a function of the # of CPUs:
           //    Spin = (((ncpus-1) * M) + K) + F(still_running)
           //    Alternately, instead of counting iterations of the outer loop
           //    we could count the # of threads visited in the inner loop, above.
-          // 9. On windows consider using the return value from SwitchThreadTo()
+          // -- On windows consider using the return value from SwitchThreadTo()
           //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
 
           if (int(iterations) == -1) { // overflow - something is wrong.
@@ -561,20 +559,6 @@
         // Start suspended threads
         jtiwh.rewind();
         for (; JavaThread *current = jtiwh.next(); ) {
-          // A problem occurring on Solaris is when attempting to restart threads
-          // the first #cpus - 1 go well, but then the VMThread is preempted when we get
-          // to the next one (since it has been running the longest).  We then have
-          // to wait for a cpu to become available before we can continue restarting
-          // threads.
-          // FIXME: This causes the performance of the VM to degrade when active and with
-          // large numbers of threads.  Apparently this is due to the synchronous nature
-          // of suspending threads.
-          //
-          // TODO-FIXME: the comments above are vestigial and no longer apply.
-          // Furthermore, using solaris' schedctl in this particular context confers no benefit
-          if (VMThreadHintNoPreempt) {
-            os::hint_no_preempt();
-          }
           ThreadSafepointState* cur_state = current->safepoint_state();
           assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
           cur_state->restart();