Merge
authorsspitsyn
Wed, 18 Oct 2017 22:10:28 +0000
changeset 47653 e8a76ff6e1a4
parent 47650 80e75cbb08d6 (diff)
parent 47652 6de1ff734cf1 (current diff)
child 47656 e0b35048532e
Merge
--- a/make/hotspot/lib/JvmFeatures.gmk	Wed Oct 18 21:05:09 2017 +0000
+++ b/make/hotspot/lib/JvmFeatures.gmk	Wed Oct 18 22:10:28 2017 +0000
@@ -132,6 +132,7 @@
       cms/ g1/ parallel/
   JVM_EXCLUDE_FILES += \
       concurrentGCThread.cpp \
+      suspendibleThreadSet.cpp \
       plab.cpp
   JVM_EXCLUDE_FILES += \
       g1MemoryPool.cpp \
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -55,6 +55,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -5224,6 +5225,11 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm);
+    WeakProcessor::weak_oops_do(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure);
+  }
+
   // This is the point where the entire marking should have completed.
   verify_work_stacks_empty();
 
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -46,6 +46,7 @@
 #include "gc/shared/spaceDecorator.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -999,6 +1000,8 @@
   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
   pt.print_all_references();
 
+  WeakProcessor::weak_oops_do(&is_alive, &keep_alive, &evacuate_followers);
+
   if (!promotion_failed()) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
--- a/src/hotspot/share/gc/g1/concurrentG1RefineThread.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/concurrentG1RefineThread.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -30,12 +30,12 @@
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/g1/g1Policy.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/concurrentGCPhaseManager.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -26,7 +26,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1CardLiveData.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -57,7 +57,6 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
@@ -68,8 +67,10 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/referenceProcessor.inline.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
@@ -4128,17 +4129,6 @@
   }
 };
 
-void G1CollectedHeap::process_weak_jni_handles() {
-  double ref_proc_start = os::elapsedTime();
-
-  G1STWIsAliveClosure is_alive(this);
-  G1KeepAliveClosure keep_alive(this);
-  JNIHandles::weak_oops_do(&is_alive, &keep_alive);
-
-  double ref_proc_time = os::elapsedTime() - ref_proc_start;
-  g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
-}
-
 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
   // Any reference objects, in the collection set, that were 'discovered'
   // by the CM ref processor should have already been copied (either by
@@ -4369,14 +4359,23 @@
     process_discovered_references(per_thread_states);
   } else {
     ref_processor_stw()->verify_no_references_recorded();
-    process_weak_jni_handles();
+  }
+
+  G1STWIsAliveClosure is_alive(this);
+  G1KeepAliveClosure keep_alive(this);
+
+  {
+    double start = os::elapsedTime();
+
+    WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
+
+    double time_ms = (os::elapsedTime() - start) * 1000.0;
+    g1_policy()->phase_times()->record_ref_proc_time(time_ms);
   }
 
   if (G1StringDedup::is_enabled()) {
     double fixup_start = os::elapsedTime();
 
-    G1STWIsAliveClosure is_alive(this);
-    G1KeepAliveClosure keep_alive(this);
     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
 
     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Oct 18 22:10:28 2017 +0000
@@ -303,8 +303,6 @@
 
   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 
-  void process_weak_jni_handles();
-
   // These are macros so that, if the assert fires, we get the correct
   // line number, file, etc.
 
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -38,7 +38,6 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -46,8 +45,10 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
@@ -1603,6 +1604,23 @@
   // Is alive closure.
   G1CMIsAliveClosure g1_is_alive(g1h);
 
+  // Instances of the 'Keep Alive' and 'Complete GC' closures used
+  // in serial reference processing. Note these closures are also
+  // used for serially processing (by the the current thread) the
+  // JNI references during parallel reference processing.
+  //
+  // These closures do not need to synchronize with the worker
+  // threads involved in parallel reference processing as these
+  // instances are executed serially by the current thread (e.g.
+  // reference processing is not multi-threaded and is thus
+  // performed by the current thread instead of a gang worker).
+  //
+  // The gang tasks involved in parallel reference processing create
+  // their own instances of these closures, which do their own
+  // synchronization among themselves.
+  G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
+  G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
+
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
@@ -1617,23 +1635,6 @@
     rp->setup_policy(clear_all_soft_refs);
     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
 
-    // Instances of the 'Keep Alive' and 'Complete GC' closures used
-    // in serial reference processing. Note these closures are also
-    // used for serially processing (by the the current thread) the
-    // JNI references during parallel reference processing.
-    //
-    // These closures do not need to synchronize with the worker
-    // threads involved in parallel reference processing as these
-    // instances are executed serially by the current thread (e.g.
-    // reference processing is not multi-threaded and is thus
-    // performed by the current thread instead of a gang worker).
-    //
-    // The gang tasks involved in parallel reference processing create
-    // their own instances of these closures, which do their own
-    // synchronization among themselves.
-    G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
-    G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
-
     // We need at least one active thread. If reference processing
     // is not multi-threaded we use the current (VMThread) thread,
     // otherwise we use the work gang from the G1CollectedHeap and
@@ -1687,6 +1688,11 @@
     assert(!rp->discovery_enabled(), "Post condition");
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
+    WeakProcessor::weak_oops_do(&g1_is_alive, &g1_keep_alive, &g1_drain_mark_stack);
+  }
+
   if (has_overflown()) {
     // We can not trust g1_is_alive if the marking stack overflowed
     return;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Wed Oct 18 22:10:28 2017 +0000
@@ -29,7 +29,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 
--- a/src/hotspot/share/gc/g1/g1MarkSweep.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1MarkSweep.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -43,6 +43,7 @@
 #include "gc/shared/modRefBarrierSet.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/space.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -181,6 +182,13 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) trace("Weak Processing", gc_timer());
+    WeakProcessor::weak_oops_do(&GenMarkSweep::is_alive,
+                                &GenMarkSweep::keep_alive,
+                                &GenMarkSweep::follow_stack_closure);
+  }
+
   // This is the point where the entire marking should have completed.
   assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
 
@@ -272,7 +280,7 @@
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
+  WeakProcessor::oops_do(&GenMarkSweep::adjust_pointer_closure);
 
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -36,8 +36,8 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/g1/g1StringDedupThread.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "runtime/mutexLocker.hpp"
 
 G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
--- a/src/hotspot/share/gc/g1/suspendibleThreadSet.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/semaphore.hpp"
-#include "runtime/thread.inline.hpp"
-
-uint   SuspendibleThreadSet::_nthreads          = 0;
-uint   SuspendibleThreadSet::_nthreads_stopped  = 0;
-bool   SuspendibleThreadSet::_suspend_all       = false;
-double SuspendibleThreadSet::_suspend_all_start = 0.0;
-
-static Semaphore* _synchronize_wakeup = NULL;
-
-void SuspendibleThreadSet_init() {
-  assert(_synchronize_wakeup == NULL, "STS already initialized");
-  _synchronize_wakeup = new Semaphore();
-}
-
-bool SuspendibleThreadSet::is_synchronized() {
-  assert_lock_strong(STS_lock);
-  assert(_nthreads_stopped <= _nthreads, "invariant");
-  return _nthreads_stopped == _nthreads;
-}
-
-void SuspendibleThreadSet::join() {
-  assert(!Thread::current()->is_suspendible_thread(), "Thread already joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  while (_suspend_all) {
-    ml.wait(Mutex::_no_safepoint_check_flag);
-  }
-  _nthreads++;
-  DEBUG_ONLY(Thread::current()->set_suspendible_thread();)
-}
-
-void SuspendibleThreadSet::leave() {
-  assert(Thread::current()->is_suspendible_thread(), "Thread not joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_nthreads > 0, "Invalid");
-  DEBUG_ONLY(Thread::current()->clear_suspendible_thread();)
-  _nthreads--;
-  if (_suspend_all && is_synchronized()) {
-    // This leave completes a request, so inform the requestor.
-    _synchronize_wakeup->signal();
-  }
-}
-
-void SuspendibleThreadSet::yield() {
-  assert(Thread::current()->is_suspendible_thread(), "Must have joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  if (_suspend_all) {
-    _nthreads_stopped++;
-    if (is_synchronized()) {
-      if (ConcGCYieldTimeout > 0) {
-        double now = os::elapsedTime();
-        guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
-      }
-      // This yield completes the request, so inform the requestor.
-      _synchronize_wakeup->signal();
-    }
-    while (_suspend_all) {
-      ml.wait(Mutex::_no_safepoint_check_flag);
-    }
-    assert(_nthreads_stopped > 0, "Invalid");
-    _nthreads_stopped--;
-  }
-}
-
-void SuspendibleThreadSet::synchronize() {
-  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
-  if (ConcGCYieldTimeout > 0) {
-    _suspend_all_start = os::elapsedTime();
-  }
-  {
-    MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-    assert(!_suspend_all, "Only one at a time");
-    _suspend_all = true;
-    if (is_synchronized()) {
-      return;
-    }
-  } // Release lock before semaphore wait.
-
-  // Semaphore initial count is zero.  To reach here, there must be at
-  // least one not yielded thread in the set, e.g. is_synchronized()
-  // was false before the lock was released.  A thread in the set will
-  // signal the semaphore iff it is the last to yield or leave while
-  // there is an active suspend request.  So there will be exactly one
-  // signal, which will increment the semaphore count to one, which
-  // will then be consumed by this wait, returning it to zero.  No
-  // thread can exit yield or enter the set until desynchronize is
-  // called, so there are no further opportunities for the semaphore
-  // being signaled until we get back here again for some later
-  // synchronize call.  Hence, there is no need to re-check for
-  // is_synchronized after the wait; it will always be true there.
-  _synchronize_wakeup->wait();
-
-#ifdef ASSERT
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_suspend_all, "STS not synchronizing");
-  assert(is_synchronized(), "STS not synchronized");
-#endif
-}
-
-void SuspendibleThreadSet::desynchronize() {
-  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_suspend_all, "STS not synchronizing");
-  assert(is_synchronized(), "STS not synchronized");
-  _suspend_all = false;
-  ml.notify_all();
-}
--- a/src/hotspot/share/gc/g1/suspendibleThreadSet.hpp	Wed Oct 18 21:05:09 2017 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
-#define SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
-
-#include "memory/allocation.hpp"
-
-// A SuspendibleThreadSet is a set of threads that can be suspended.
-// A thread can join and later leave the set, and periodically yield.
-// If some thread (not in the set) requests, via synchronize(), that
-// the threads be suspended, then the requesting thread is blocked
-// until all the threads in the set have yielded or left the set. Threads
-// may not enter the set when an attempted suspension is in progress. The
-// suspending thread later calls desynchronize(), allowing the suspended
-// threads to continue.
-class SuspendibleThreadSet : public AllStatic {
-  friend class SuspendibleThreadSetJoiner;
-  friend class SuspendibleThreadSetLeaver;
-
-private:
-  static uint   _nthreads;
-  static uint   _nthreads_stopped;
-  static bool   _suspend_all;
-  static double _suspend_all_start;
-
-  static bool is_synchronized();
-
-  // Add the current thread to the set. May block if a suspension is in progress.
-  static void join();
-
-  // Removes the current thread from the set.
-  static void leave();
-
-public:
-  // Returns true if an suspension is in progress.
-  static bool should_yield() { return _suspend_all; }
-
-  // Suspends the current thread if a suspension is in progress.
-  static void yield();
-
-  // Returns when all threads in the set are suspended.
-  static void synchronize();
-
-  // Resumes all suspended threads in the set.
-  static void desynchronize();
-};
-
-class SuspendibleThreadSetJoiner : public StackObj {
-private:
-  bool _active;
-
-public:
-  SuspendibleThreadSetJoiner(bool active = true) : _active(active) {
-    if (_active) {
-      SuspendibleThreadSet::join();
-    }
-  }
-
-  ~SuspendibleThreadSetJoiner() {
-    if (_active) {
-      SuspendibleThreadSet::leave();
-    }
-  }
-
-  bool should_yield() {
-    if (_active) {
-      return SuspendibleThreadSet::should_yield();
-    } else {
-      return false;
-    }
-  }
-
-  void yield() {
-    assert(_active, "Thread has not joined the suspendible thread set");
-    SuspendibleThreadSet::yield();
-  }
-};
-
-class SuspendibleThreadSetLeaver : public StackObj {
-private:
-  bool _active;
-
-public:
-  SuspendibleThreadSetLeaver(bool active = true) : _active(active) {
-    if (_active) {
-      SuspendibleThreadSet::leave();
-    }
-  }
-
-  ~SuspendibleThreadSetLeaver() {
-    if (_active) {
-      SuspendibleThreadSet::join();
-    }
-  }
-};
-
-#endif // SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -47,6 +47,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
@@ -542,6 +543,11 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
+    WeakProcessor::weak_oops_do(is_alive_closure(), mark_and_push_closure(), follow_stack_closure());
+  }
+
   // This is the point where the entire marking should have completed.
   assert(_marking_stack.is_empty(), "Marking should have completed");
 
@@ -617,7 +623,7 @@
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(adjust_pointer_closure());
+  WeakProcessor::oops_do(adjust_pointer_closure());
 
   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -52,6 +52,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.inline.hpp"
@@ -2118,6 +2119,11 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
+    WeakProcessor::weak_oops_do(is_alive_closure(), &mark_and_push_closure, &follow_stack_closure);
+  }
+
   // This is the point where the entire marking should have completed.
   assert(cm->marking_stacks_empty(), "Marking should have completed");
 
@@ -2170,8 +2176,7 @@
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(&oop_closure);
+  WeakProcessor::oops_do(&oop_closure);
 
   CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -45,6 +45,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
@@ -406,14 +407,15 @@
 
     scavenge_midpoint.update();
 
+    PSKeepAliveClosure keep_alive(promotion_manager);
+    PSEvacuateFollowersClosure evac_followers(promotion_manager);
+
     // Process reference objects discovered during scavenge
     {
       GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
 
       reference_processor()->setup_policy(false); // not always_clear
       reference_processor()->set_active_mt_degree(active_workers);
-      PSKeepAliveClosure keep_alive(promotion_manager);
-      PSEvacuateFollowersClosure evac_followers(promotion_manager);
       ReferenceProcessorStats stats;
       ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q());
       if (reference_processor()->processing_is_mt()) {
@@ -441,6 +443,11 @@
     }
 
     {
+      GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
+      WeakProcessor::weak_oops_do(&_is_alive_closure, &keep_alive, &evac_followers);
+    }
+
+    {
       GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer);
       // Unlink any dead interned Strings and process the remaining live ones.
       PSScavengeRootsClosure root_closure(promotion_manager);
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -41,6 +41,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
@@ -658,6 +659,8 @@
   gc_tracer.report_tenuring_threshold(tenuring_threshold());
   pt.print_all_references();
 
+  WeakProcessor::weak_oops_do(&is_alive, &keep_alive, &evacuate_followers);
+
   if (!_promotion_failed) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -43,6 +43,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/space.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -217,6 +218,11 @@
     gc_tracer()->report_gc_reference_stats(stats);
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
+    WeakProcessor::weak_oops_do(&is_alive, &keep_alive, &follow_stack_closure);
+  }
+
   // This is the point where the entire marking should have completed.
   assert(_marking_stack.is_empty(), "Marking should have completed");
 
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -42,6 +42,7 @@
 #include "gc/shared/space.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
@@ -652,7 +653,7 @@
 }
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
-  JNIHandles::weak_oops_do(root_closure);
+  WeakProcessor::oops_do(root_closure);
   _young_gen->ref_processor()->weak_oops_do(root_closure);
   _old_gen->ref_processor()->weak_oops_do(root_closure);
 }
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -36,7 +36,6 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
-#include "runtime/jniHandles.hpp"
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
@@ -245,51 +244,16 @@
                                is_alive, keep_alive, complete_gc, task_executor, phase_times);
   }
 
-  // Weak global JNI references. It would make more sense (semantically) to
-  // traverse these simultaneously with the regular weak references above, but
-  // that is not how the JDK1.2 specification is. See #4126360. Native code can
-  // thus use JNI weak references to circumvent the phantom references and
-  // resurrect a "post-mortem" object.
-  {
-    GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", phase_times->gc_timer());
-    if (task_executor != NULL) {
-      task_executor->set_single_threaded_mode();
-    }
-    process_phaseJNI(is_alive, keep_alive, complete_gc);
+  if (task_executor != NULL) {
+    // Record the work done by the parallel workers.
+    task_executor->set_single_threaded_mode();
   }
 
   phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
 
-  log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
-
   return stats;
 }
 
-#ifndef PRODUCT
-// Calculate the number of jni handles.
-size_t ReferenceProcessor::count_jni_refs() {
-  class CountHandleClosure: public OopClosure {
-  private:
-    size_t _count;
-  public:
-    CountHandleClosure(): _count(0) {}
-    void do_oop(oop* unused)       { _count++; }
-    void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
-    size_t count() { return _count; }
-  };
-  CountHandleClosure global_handle_count;
-  JNIHandles::weak_oops_do(&global_handle_count);
-  return global_handle_count.count();
-}
-#endif
-
-void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
-                                          OopClosure*        keep_alive,
-                                          VoidClosure*       complete_gc) {
-  JNIHandles::weak_oops_do(is_alive, keep_alive);
-  complete_gc->do_void();
-}
-
 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor*  task_executor,
                                                        ReferenceProcessorPhaseTimes* phase_times) {
   // Enqueue references that are not made active again, and
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp	Wed Oct 18 22:10:28 2017 +0000
@@ -246,10 +246,6 @@
                                   AbstractRefProcTaskExecutor*  task_executor,
                                   ReferenceProcessorPhaseTimes* phase_times);
 
-  void process_phaseJNI(BoolObjectClosure* is_alive,
-                        OopClosure*        keep_alive,
-                        VoidClosure*       complete_gc);
-
   // Work methods used by the method process_discovered_reflist
   // Phase1: keep alive all those referents that are otherwise
   // dead but which must be kept alive by policy (and their closure).
@@ -341,9 +337,6 @@
 
   void clear_discovered_references(DiscoveredList& refs_list);
 
-  // Calculate the number of jni handles.
-  size_t count_jni_refs();
-
   void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN;
 
   // Balances reference queues.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/semaphore.hpp"
+#include "runtime/thread.inline.hpp"
+
+uint   SuspendibleThreadSet::_nthreads          = 0;
+uint   SuspendibleThreadSet::_nthreads_stopped  = 0;
+bool   SuspendibleThreadSet::_suspend_all       = false;
+double SuspendibleThreadSet::_suspend_all_start = 0.0;
+
+static Semaphore* _synchronize_wakeup = NULL;
+
+void SuspendibleThreadSet_init() {
+  assert(_synchronize_wakeup == NULL, "STS already initialized");
+  _synchronize_wakeup = new Semaphore();
+}
+
+bool SuspendibleThreadSet::is_synchronized() {
+  assert_lock_strong(STS_lock);
+  assert(_nthreads_stopped <= _nthreads, "invariant");
+  return _nthreads_stopped == _nthreads;
+}
+
+void SuspendibleThreadSet::join() {
+  assert(!Thread::current()->is_suspendible_thread(), "Thread already joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  while (_suspend_all) {
+    ml.wait(Mutex::_no_safepoint_check_flag);
+  }
+  _nthreads++;
+  DEBUG_ONLY(Thread::current()->set_suspendible_thread();)
+}
+
+void SuspendibleThreadSet::leave() {
+  assert(Thread::current()->is_suspendible_thread(), "Thread not joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_nthreads > 0, "Invalid");
+  DEBUG_ONLY(Thread::current()->clear_suspendible_thread();)
+  _nthreads--;
+  if (_suspend_all && is_synchronized()) {
+    // This leave completes a request, so inform the requestor.
+    _synchronize_wakeup->signal();
+  }
+}
+
+void SuspendibleThreadSet::yield() {
+  assert(Thread::current()->is_suspendible_thread(), "Must have joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  if (_suspend_all) {
+    _nthreads_stopped++;
+    if (is_synchronized()) {
+      if (ConcGCYieldTimeout > 0) {
+        double now = os::elapsedTime();
+        guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
+      }
+      // This yield completes the request, so inform the requestor.
+      _synchronize_wakeup->signal();
+    }
+    while (_suspend_all) {
+      ml.wait(Mutex::_no_safepoint_check_flag);
+    }
+    assert(_nthreads_stopped > 0, "Invalid");
+    _nthreads_stopped--;
+  }
+}
+
+void SuspendibleThreadSet::synchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  if (ConcGCYieldTimeout > 0) {
+    _suspend_all_start = os::elapsedTime();
+  }
+  {
+    MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+    assert(!_suspend_all, "Only one at a time");
+    _suspend_all = true;
+    if (is_synchronized()) {
+      return;
+    }
+  } // Release lock before semaphore wait.
+
+  // Semaphore initial count is zero.  To reach here, there must be at
+  // least one not yielded thread in the set, e.g. is_synchronized()
+  // was false before the lock was released.  A thread in the set will
+  // signal the semaphore iff it is the last to yield or leave while
+  // there is an active suspend request.  So there will be exactly one
+  // signal, which will increment the semaphore count to one, which
+  // will then be consumed by this wait, returning it to zero.  No
+  // thread can exit yield or enter the set until desynchronize is
+  // called, so there are no further opportunities for the semaphore
+  // being signaled until we get back here again for some later
+  // synchronize call.  Hence, there is no need to re-check for
+  // is_synchronized after the wait; it will always be true there.
+  _synchronize_wakeup->wait();
+
+#ifdef ASSERT
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_suspend_all, "STS not synchronizing");
+  assert(is_synchronized(), "STS not synchronized");
+#endif
+}
+
+void SuspendibleThreadSet::desynchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_suspend_all, "STS not synchronizing");
+  assert(is_synchronized(), "STS not synchronized");
+  _suspend_all = false;
+  ml.notify_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.hpp	Wed Oct 18 22:10:28 2017 +0000
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
+#define SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
+
+#include "memory/allocation.hpp"
+
+// A SuspendibleThreadSet is a set of threads that can be suspended.
+// A thread can join and later leave the set, and periodically yield.
+// If some thread (not in the set) requests, via synchronize(), that
+// the threads be suspended, then the requesting thread is blocked
+// until all the threads in the set have yielded or left the set. Threads
+// may not enter the set when an attempted suspension is in progress. The
+// suspending thread later calls desynchronize(), allowing the suspended
+// threads to continue.
+class SuspendibleThreadSet : public AllStatic {
+  friend class SuspendibleThreadSetJoiner;
+  friend class SuspendibleThreadSetLeaver;
+
+private:
+  static uint   _nthreads;
+  static uint   _nthreads_stopped;
+  static bool   _suspend_all;
+  static double _suspend_all_start;
+
+  static bool is_synchronized();
+
+  // Add the current thread to the set. May block if a suspension is in progress.
+  static void join();
+
+  // Removes the current thread from the set.
+  static void leave();
+
+public:
+  // Returns true if an suspension is in progress.
+  static bool should_yield() { return _suspend_all; }
+
+  // Suspends the current thread if a suspension is in progress.
+  static void yield();
+
+  // Returns when all threads in the set are suspended.
+  static void synchronize();
+
+  // Resumes all suspended threads in the set.
+  static void desynchronize();
+};
+
+class SuspendibleThreadSetJoiner : public StackObj {
+private:
+  bool _active;
+
+public:
+  SuspendibleThreadSetJoiner(bool active = true) : _active(active) {
+    if (_active) {
+      SuspendibleThreadSet::join();
+    }
+  }
+
+  ~SuspendibleThreadSetJoiner() {
+    if (_active) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+
+  bool should_yield() {
+    if (_active) {
+      return SuspendibleThreadSet::should_yield();
+    } else {
+      return false;
+    }
+  }
+
+  void yield() {
+    assert(_active, "Thread has not joined the suspendible thread set");
+    SuspendibleThreadSet::yield();
+  }
+};
+
+class SuspendibleThreadSetLeaver : public StackObj {
+private:
+  bool _active;
+
+public:
+  SuspendibleThreadSetLeaver(bool active = true) : _active(active) {
+    if (_active) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+
+  ~SuspendibleThreadSetLeaver() {
+    if (_active) {
+      SuspendibleThreadSet::join();
+    }
+  }
+};
+
+#endif // SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/weakProcessor.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/weakProcessor.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "runtime/jniHandles.hpp"
+
+void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete) {
+  JNIHandles::weak_oops_do(is_alive, keep_alive);
+  JvmtiExport::weak_oops_do(is_alive, keep_alive);
+
+  if (complete != NULL) {
+    complete->do_void();
+  }
+}
+
+void WeakProcessor::oops_do(OopClosure* closure) {
+  AlwaysTrueClosure always_true;
+  weak_oops_do(&always_true, closure, NULL);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/weakProcessor.hpp	Wed Oct 18 22:10:28 2017 +0000
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
+#define SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+
+// Helper class to aid in root scanning and cleaning of weak oops in the VM.
+//
+// New containers of weak oops added to this class will automatically
+// be cleaned by all GCs, including the young generation GCs.
+class WeakProcessor : AllStatic {
+public:
+  // Visit all oop*s and apply the keep_alive closure if the referenced
+  // object is considered alive by the is_alive closure, otherwise do some
+  // container specific cleanup of element holding the oop.
+  //
+  // The complete closure is used as a post-processing step,
+  // called after all container have been processed.
+  static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete = NULL);
+
+  // Visit all oop*s and apply the given closure.
+  static void oops_do(OopClosure* closure);
+};
+
+#endif // SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
--- a/src/hotspot/share/runtime/jniHandles.cpp	Wed Oct 18 21:05:09 2017 +0000
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Wed Oct 18 22:10:28 2017 +0000
@@ -27,7 +27,6 @@
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
@@ -424,12 +423,6 @@
       break;
     }
   }
-
-  /*
-   * JVMTI data structures may also contain weak oops.  The iteration of them
-   * is placed here so that we don't need to add it to each of the collectors.
-   */
-  JvmtiExport::weak_oops_do(is_alive, f);
 }