8138762: Refactor setup of evacuation closures in G1
authormgerdin
Wed, 14 Oct 2015 14:50:43 +0200
changeset 33213 b937f634f56e
parent 33212 906b3d079b13
child 33218 32b706c7c6a0
8138762: Refactor setup of evacuation closures in G1 Summary: Introduce policy class containing the root scan closures. Reviewed-by: ehelin, stefank
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc/g1/g1OopClosures.cpp
hotspot/src/share/vm/gc/g1/g1OopClosures.hpp
hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp
hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp
hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp
hotspot/src/share/vm/gc/g1/g1RemSet.cpp
hotspot/src/share/vm/gc/g1/g1RemSet.hpp
hotspot/src/share/vm/gc/g1/g1RootClosures.cpp
hotspot/src/share/vm/gc/g1/g1RootClosures.hpp
hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp
hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp
--- a/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp	Wed Oct 14 14:50:43 2015 +0200
@@ -22,6 +22,9 @@
  *
  */
 
+#ifndef SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
+#define SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
+
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "memory/iterator.hpp"
 
@@ -53,3 +56,6 @@
 
   void do_code_blob(CodeBlob* cb);
 };
+
+#endif // SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
+
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Oct 14 14:50:43 2015 +0200
@@ -44,6 +44,7 @@
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1YCTypes.hpp"
@@ -4138,80 +4139,6 @@
   }
 }
 
-void G1ParCopyHelper::mark_object(oop obj) {
-  assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
-
-  // We know that the object is not moving so it's safe to read its size.
-  _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
-}
-
-void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
-  assert(from_obj->is_forwarded(), "from obj should be forwarded");
-  assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
-  assert(from_obj != to_obj, "should not be self-forwarded");
-
-  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
-  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
-
-  // The object might be in the process of being copied by another
-  // worker so we cannot trust that its to-space image is
-  // well-formed. So we have to read its size from its from-space
-  // image which we know should not be changing.
-  _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
-}
-
-template <class T>
-void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
-  if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
-    _scanned_klass->record_modified_oops();
-  }
-}
-
-template <G1Barrier barrier, G1Mark do_mark_object>
-template <class T>
-void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-
-  if (oopDesc::is_null(heap_oop)) {
-    return;
-  }
-
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-
-  assert(_worker_id == _par_scan_state->worker_id(), "sanity");
-
-  const InCSetState state = _g1->in_cset_state(obj);
-  if (state.is_in_cset()) {
-    oop forwardee;
-    markOop m = obj->mark();
-    if (m->is_marked()) {
-      forwardee = (oop) m->decode_pointer();
-    } else {
-      forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
-    }
-    assert(forwardee != NULL, "forwardee should not be NULL");
-    oopDesc::encode_store_heap_oop(p, forwardee);
-    if (do_mark_object != G1MarkNone && forwardee != obj) {
-      // If the object is self-forwarded we don't need to explicitly
-      // mark it, the evacuation failure protocol will do so.
-      mark_forwarded_object(obj, forwardee);
-    }
-
-    if (barrier == G1BarrierKlass) {
-      do_klass_barrier(p, forwardee);
-    }
-  } else {
-    if (state.is_humongous()) {
-      _g1->set_humongous_is_live(obj);
-    }
-    // The object is not in collection set. If we're a root scanning
-    // closure during an initial mark pause then attempt to mark the object.
-    if (do_mark_object == G1MarkFromRoot) {
-      mark_object(obj);
-    }
-  }
-}
-
 class G1ParEvacuateFollowersClosure : public VoidClosure {
 private:
   double _start_term;
@@ -4264,32 +4191,6 @@
   } while (!offer_termination());
 }
 
-class G1KlassScanClosure : public KlassClosure {
- G1ParCopyHelper* _closure;
- bool             _process_only_dirty;
- int              _count;
- public:
-  G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
-      : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
-  void do_klass(Klass* klass) {
-    // If the klass has not been dirtied we know that there's
-    // no references into  the young gen and we can skip it.
-   if (!_process_only_dirty || klass->has_modified_oops()) {
-      // Clean the klass since we're going to scavenge all the metadata.
-      klass->clear_modified_oops();
-
-      // Tell the closure that this klass is the Klass to scavenge
-      // and is the one to dirty if oops are left pointing into the young gen.
-      _closure->set_scanned_klass(klass);
-
-      klass->oops_do(_closure);
-
-      _closure->set_scanned_klass(NULL);
-    }
-    _count++;
-  }
-};
-
 class G1ParTask : public AbstractGangTask {
 protected:
   G1CollectedHeap*         _g1h;
@@ -4310,42 +4211,6 @@
       _n_workers(n_workers)
   {}
 
-  RefToScanQueueSet* queues() { return _queues; }
-
-  RefToScanQueue *work_queue(int i) {
-    return queues()->queue(i);
-  }
-
-  ParallelTaskTerminator* terminator() { return &_terminator; }
-
-  // Helps out with CLD processing.
-  //
-  // During InitialMark we need to:
-  // 1) Scavenge all CLDs for the young GC.
-  // 2) Mark all objects directly reachable from strong CLDs.
-  template <G1Mark do_mark_object>
-  class G1CLDClosure : public CLDClosure {
-    G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
-    G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
-    G1KlassScanClosure                                _klass_in_cld_closure;
-    bool                                              _claim;
-
-   public:
-    G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
-                 bool only_young, bool claim)
-        : _oop_closure(oop_closure),
-          _oop_in_klass_closure(oop_closure->g1(),
-                                oop_closure->pss()),
-          _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
-          _claim(claim) {
-
-    }
-
-    void do_cld(ClassLoaderData* cld) {
-      cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
-    }
-  };
-
   void work(uint worker_id) {
     if (worker_id >= _n_workers) return;  // no work needed this round
 
@@ -4361,62 +4226,18 @@
       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
       pss->set_ref_processor(rp);
 
-      bool only_young = _g1h->collector_state()->gcs_are_young();
-
-      // Non-IM young GC.
-      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss);
-      G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
-                                                                               only_young, // Only process dirty klasses.
-                                                                               false);     // No need to claim CLDs.
-      // IM young GC.
-      //    Strong roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss);
-      G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
-                                                                               false, // Process all klasses.
-                                                                               true); // Need to claim CLDs.
-      //    Weak roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
-      G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
-                                                                                    false, // Process all klasses.
-                                                                                    true); // Need to claim CLDs.
-
-      OopClosure* strong_root_cl;
-      OopClosure* weak_root_cl;
-      CLDClosure* strong_cld_cl;
-      CLDClosure* weak_cld_cl;
-
-      bool trace_metadata = false;
-
-      if (_g1h->collector_state()->during_initial_mark_pause()) {
-        // We also need to mark copied objects.
-        strong_root_cl = &scan_mark_root_cl;
-        strong_cld_cl  = &scan_mark_cld_cl;
-        if (ClassUnloadingWithConcurrentMark) {
-          weak_root_cl = &scan_mark_weak_root_cl;
-          weak_cld_cl  = &scan_mark_weak_cld_cl;
-          trace_metadata = true;
-        } else {
-          weak_root_cl = &scan_mark_root_cl;
-          weak_cld_cl  = &scan_mark_cld_cl;
-        }
-      } else {
-        strong_root_cl = &scan_only_root_cl;
-        weak_root_cl   = &scan_only_root_cl;
-        strong_cld_cl  = &scan_only_cld_cl;
-        weak_cld_cl    = &scan_only_cld_cl;
-      }
-
       double start_strong_roots_sec = os::elapsedTime();
-      _root_processor->evacuate_roots(strong_root_cl,
-                                      weak_root_cl,
-                                      strong_cld_cl,
-                                      weak_cld_cl,
-                                      trace_metadata,
-                                      worker_id);
+
+      _root_processor->evacuate_roots(pss->closures(), worker_id);
 
       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
+
+      // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
+      // treating the nmethods visited to act as roots for concurrent marking.
+      // We only want to make sure that the oops in the nmethods are adjusted with regard to the
+      // objects copied by the current evacuation.
       size_t cards_scanned = _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
-                                                                             weak_root_cl,
+                                                                             pss->closures()->weak_codeblobs(),
                                                                              worker_id);
 
       _pss->add_cards_scanned(worker_id, cards_scanned);
@@ -5077,19 +4898,8 @@
     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
     pss->set_ref_processor(NULL);
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
-
-    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-
-    if (_g1h->collector_state()->during_initial_mark_pause()) {
-      // We also need to mark copied objects.
-      copy_non_heap_cl = &copy_mark_non_heap_cl;
-    }
-
     // Keep alive closure.
-    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
+    G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
 
     // Complete GC closure
     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
@@ -5177,23 +4987,12 @@
     pss->set_ref_processor(NULL);
     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
-
-    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-
-    if (_g1h->collector_state()->during_initial_mark_pause()) {
-      // We also need to mark copied objects.
-      copy_non_heap_cl = &copy_mark_non_heap_cl;
-    }
-
     // Is alive closure
     G1AlwaysAliveClosure always_alive(_g1h);
 
     // Copying keep alive closure. Applied to referent objects that need
     // to be copied.
-    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
+    G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
 
     ReferenceProcessor* rp = _g1h->ref_processor_cm();
 
@@ -5283,23 +5082,8 @@
   pss->set_ref_processor(NULL);
   assert(pss->queue_is_empty(), "pre-condition");
 
-  // We do not embed a reference processor in the copying/scanning
-  // closures while we're actually processing the discovered
-  // reference objects.
-
-  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss);
-
-  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
-
-  OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-
-  if (collector_state()->during_initial_mark_pause()) {
-    // We also need to mark copied objects.
-    copy_non_heap_cl = &copy_mark_non_heap_cl;
-  }
-
   // Keep alive closure.
-  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
+  G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
 
   // Serial Complete GC closure
   G1STWDrainQueueClosure drain_queue(this, pss);
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Wed Oct 14 14:50:43 2015 +0200
@@ -58,5 +58,23 @@
          "The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads);
 }
 
+void G1KlassScanClosure::do_klass(Klass* klass) {
+  // If the klass has not been dirtied we know that there's
+  // no references into  the young gen and we can skip it.
+  if (!_process_only_dirty || klass->has_modified_oops()) {
+    // Clean the klass since we're going to scavenge all the metadata.
+    klass->clear_modified_oops();
+
+    // Tell the closure that this klass is the Klass to scavenge
+    // and is the one to dirty if oops are left pointing into the young gen.
+    _closure->set_scanned_klass(klass);
+
+    klass->oops_do(_closure);
+
+    _closure->set_scanned_klass(NULL);
+  }
+  _count++;
+}
+
 // Generate G1 specialized oop_oop_iterate functions.
 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Wed Oct 14 14:50:43 2015 +0200
@@ -94,18 +94,18 @@
   // Mark the object if it's not already marked. This is used to mark
   // objects pointed to by roots that are guaranteed not to move
   // during the GC (i.e., non-CSet objects). It is MT-safe.
-  void mark_object(oop obj);
+  inline void mark_object(oop obj);
 
   // Mark the object if it's not already marked. This is used to mark
   // objects pointed to by roots that have been forwarded during a
   // GC. It is MT-safe.
-  void mark_forwarded_object(oop from_obj, oop to_obj);
+  inline void mark_forwarded_object(oop from_obj, oop to_obj);
  public:
   G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state);
   G1ParCopyHelper(G1CollectedHeap* g1);
 
   void set_scanned_klass(Klass* k) { _scanned_klass = k; }
-  template <class T> void do_klass_barrier(T* p, oop new_obj);
+  template <class T> inline void do_klass_barrier(T* p, oop new_obj);
 };
 
 enum G1Barrier {
@@ -137,16 +137,17 @@
   template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-
-  G1CollectedHeap*      g1()  { return _g1; };
-  G1ParScanThreadState* pss() { return _par_scan_state; }
 };
 
-typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierNone,  G1MarkFromRoot>         G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierNone,  G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
-// We use a separate closure to handle references during evacuation
-// failure processing.
+class G1KlassScanClosure : public KlassClosure {
+ G1ParCopyHelper* _closure;
+ bool             _process_only_dirty;
+ int              _count;
+ public:
+  G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
+      : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
+  void do_klass(Klass* klass);
+};
 
 class FilterIntoCSClosure: public ExtendedOopClosure {
   G1CollectedHeap* _g1;
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Wed Oct 14 14:50:43 2015 +0200
@@ -225,4 +225,78 @@
   }
 }
 
+template <class T>
+void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
+  if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
+    _scanned_klass->record_modified_oops();
+  }
+}
+
+void G1ParCopyHelper::mark_object(oop obj) {
+  assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
+
+  // We know that the object is not moving so it's safe to read its size.
+  _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
+}
+
+void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
+  assert(from_obj->is_forwarded(), "from obj should be forwarded");
+  assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
+  assert(from_obj != to_obj, "should not be self-forwarded");
+
+  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
+  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
+
+  // The object might be in the process of being copied by another
+  // worker so we cannot trust that its to-space image is
+  // well-formed. So we have to read its size from its from-space
+  // image which we know should not be changing.
+  _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
+}
+
+template <G1Barrier barrier, G1Mark do_mark_object>
+template <class T>
+void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+
+  if (oopDesc::is_null(heap_oop)) {
+    return;
+  }
+
+  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+
+  assert(_worker_id == _par_scan_state->worker_id(), "sanity");
+
+  const InCSetState state = _g1->in_cset_state(obj);
+  if (state.is_in_cset()) {
+    oop forwardee;
+    markOop m = obj->mark();
+    if (m->is_marked()) {
+      forwardee = (oop) m->decode_pointer();
+    } else {
+      forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
+    }
+    assert(forwardee != NULL, "forwardee should not be NULL");
+    oopDesc::encode_store_heap_oop(p, forwardee);
+    if (do_mark_object != G1MarkNone && forwardee != obj) {
+      // If the object is self-forwarded we don't need to explicitly
+      // mark it, the evacuation failure protocol will do so.
+      mark_forwarded_object(obj, forwardee);
+    }
+
+    if (barrier == G1BarrierKlass) {
+      do_klass_barrier(p, forwardee);
+    }
+  } else {
+    if (state.is_humongous()) {
+      _g1->set_humongous_is_live(obj);
+    }
+    // The object is not in collection set. If we're a root scanning
+    // closure during an initial mark pause then attempt to mark the object.
+    if (do_mark_object == G1MarkFromRoot) {
+      mark_object(obj);
+    }
+  }
+}
+
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Wed Oct 14 14:50:43 2015 +0200
@@ -27,6 +27,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -37,7 +38,7 @@
     _refs(g1h->task_queue(worker_id)),
     _dcq(&g1h->dirty_card_queue_set()),
     _ct_bs(g1h->g1_barrier_set()),
-    _g1_rem(g1h->g1_rem_set()),
+    _closures(NULL),
     _hash_seed(17),
     _worker_id(worker_id),
     _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
@@ -69,6 +70,8 @@
   // need to be moved to the next space.
   _dest[InCSetState::Young]        = InCSetState::Old;
   _dest[InCSetState::Old]          = InCSetState::Old;
+
+  _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
 }
 
 // Pass locally gathered statistics to global state.
@@ -86,6 +89,7 @@
 
 G1ParScanThreadState::~G1ParScanThreadState() {
   delete _plab_allocator;
+  delete _closures;
   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Wed Oct 14 14:50:43 2015 +0200
@@ -36,6 +36,7 @@
 #include "oops/oop.hpp"
 
 class G1PLABAllocator;
+class G1EvacuationRootClosures;
 class HeapRegion;
 class outputStream;
 
@@ -45,7 +46,7 @@
   RefToScanQueue*  _refs;
   DirtyCardQueue   _dcq;
   G1SATBCardTableModRefBS* _ct_bs;
-  G1RemSet*         _g1_rem;
+  G1EvacuationRootClosures* _closures;
 
   G1PLABAllocator*  _plab_allocator;
 
@@ -109,6 +110,7 @@
     }
   }
 
+  G1EvacuationRootClosures* closures() { return _closures; }
   uint worker_id() { return _worker_id; }
 
   // Returns the current amount of waste due to alignment or not being able to fit
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Oct 14 14:50:43 2015 +0200
@@ -26,7 +26,6 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
-#include "gc/g1/g1CodeBlobClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -228,15 +227,13 @@
 };
 
 size_t G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
-                        OopClosure* non_heap_roots,
+                        CodeBlobClosure* heap_region_codeblobs,
                         uint worker_i) {
   double rs_time_start = os::elapsedTime();
 
-  G1CodeBlobClosure code_root_cl(non_heap_roots);
-
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
-  ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
+  ScanRSClosure scanRScl(oc, heap_region_codeblobs, worker_i);
 
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
@@ -303,7 +300,7 @@
 }
 
 size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
-                                             OopClosure* non_heap_roots,
+                                             CodeBlobClosure* heap_region_codeblobs,
                                              uint worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
@@ -326,7 +323,7 @@
   DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
 
   updateRS(&into_cset_dcq, worker_i);
-  size_t cards_scanned = scanRS(oc, non_heap_roots, worker_i);
+  size_t cards_scanned = scanRS(oc, heap_region_codeblobs, worker_i);
 
   // We now clear the cached values of _cset_rs_update_cl for this worker
   _cset_rs_update_cl[worker_i] = NULL;
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Wed Oct 14 14:50:43 2015 +0200
@@ -95,7 +95,7 @@
   // Returns the number of cards scanned while looking for pointers
   // into the collection set.
   size_t oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
-                                     OopClosure* non_heap_roots,
+                                     CodeBlobClosure* heap_region_codeblobs,
                                      uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
@@ -107,7 +107,7 @@
   void cleanup_after_oops_into_collection_set_do();
 
   size_t scanRS(G1ParPushHeapRSClosure* oc,
-                OopClosure* non_heap_roots,
+                CodeBlobClosure* heap_region_codeblobs,
                 uint worker_i);
 
   void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RootClosures.cpp	Wed Oct 14 14:50:43 2015 +0200
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/g1/bufferingOopClosure.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1OopClosures.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
+
+class G1ParScanThreadState;
+
+// Simple holder object for a complete set of closures used by the G1 evacuation code.
+template <G1Mark Mark>
+class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
+public:
+  G1ParCopyClosure<G1BarrierNone,  Mark> _oops;
+  G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
+  G1KlassScanClosure                     _klass_in_cld_closure;
+  CLDToKlassAndOopClosure                _clds;
+  G1CodeBlobClosure                      _codeblobs;
+  BufferingOopClosure                    _buffered_oops;
+
+  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
+    _oops(g1h, pss),
+    _oop_in_klass(g1h, pss),
+    _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
+    _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
+    _codeblobs(&_oops),
+    _buffered_oops(&_oops) {}
+};
+
+class G1EvacuationClosures : public G1EvacuationRootClosures {
+  G1SharedClosures<G1MarkNone> _closures;
+
+public:
+  G1EvacuationClosures(G1CollectedHeap* g1h,
+                       G1ParScanThreadState* pss,
+                       bool gcs_are_young) :
+      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
+
+  OopClosure* weak_oops()   { return &_closures._buffered_oops; }
+  OopClosure* strong_oops() { return &_closures._buffered_oops; }
+
+  CLDClosure* weak_clds()             { return &_closures._clds; }
+  CLDClosure* strong_clds()           { return &_closures._clds; }
+  CLDClosure* thread_root_clds()      { return NULL; }
+  CLDClosure* second_pass_weak_clds() { return NULL; }
+
+  CodeBlobClosure* strong_codeblobs()      { return &_closures._codeblobs; }
+  CodeBlobClosure* weak_codeblobs()        { return &_closures._codeblobs; }
+
+  void flush()                 { _closures._buffered_oops.done(); }
+  double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
+
+  OopClosure* raw_strong_oops() { return &_closures._oops; }
+
+  bool trace_metadata()         { return false; }
+};
+
+// Closures used during initial mark.
+// The treatment of "weak" roots is selectable through the template parameter,
+// this is usually used to control unloading of classes and interned strings.
+template <G1Mark MarkWeak>
+class G1InitalMarkClosures : public G1EvacuationRootClosures {
+  G1SharedClosures<G1MarkFromRoot> _strong;
+  G1SharedClosures<MarkWeak>       _weak;
+
+  // Filter method to help with returning the appropriate closures
+  // depending on the class template parameter.
+  template <G1Mark Mark, typename T>
+  T* null_if(T* t) {
+    if (Mark == MarkWeak) {
+      return NULL;
+    }
+    return t;
+  }
+
+public:
+  G1InitalMarkClosures(G1CollectedHeap* g1h,
+                       G1ParScanThreadState* pss) :
+      _strong(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true),
+      _weak(g1h, pss,   /* process_only_dirty_klasses */ false, /* must_claim_cld */ true) {}
+
+  OopClosure* weak_oops()   { return &_weak._buffered_oops; }
+  OopClosure* strong_oops() { return &_strong._buffered_oops; }
+
+  // If MarkWeak is G1MarkPromotedFromRoot then the weak CLDs must be processed in a second pass.
+  CLDClosure* weak_clds()             { return null_if<G1MarkPromotedFromRoot>(&_weak._clds); }
+  CLDClosure* strong_clds()           { return &_strong._clds; }
+
+  // If MarkWeak is G1MarkFromRoot then all CLDs are processed by the weak and strong variants
+  // return a NULL closure for the following specialized versions in that case.
+  CLDClosure* thread_root_clds()      { return null_if<G1MarkFromRoot>(&_strong._clds); }
+  CLDClosure* second_pass_weak_clds() { return null_if<G1MarkFromRoot>(&_weak._clds); }
+
+  CodeBlobClosure* strong_codeblobs()      { return &_strong._codeblobs; }
+  CodeBlobClosure* weak_codeblobs()        { return &_weak._codeblobs; }
+
+  void flush() {
+    _strong._buffered_oops.done();
+    _weak._buffered_oops.done();
+  }
+
+  double closure_app_seconds() {
+    return _strong._buffered_oops.closure_app_seconds() +
+           _weak._buffered_oops.closure_app_seconds();
+  }
+
+  OopClosure* raw_strong_oops() { return &_strong._oops; }
+
+  // If we are not marking all weak roots then we are tracing
+  // which metadata is alive.
+  bool trace_metadata()         { return MarkWeak == G1MarkPromotedFromRoot; }
+};
+
+G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
+  if (g1h->collector_state()->during_initial_mark_pause()) {
+    if (ClassUnloadingWithConcurrentMark) {
+      return new G1InitalMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
+    } else {
+      return new G1InitalMarkClosures<G1MarkFromRoot>(g1h, pss);
+    }
+  } else {
+    return new G1EvacuationClosures(g1h, pss, g1h->collector_state()->gcs_are_young());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RootClosures.hpp	Wed Oct 14 14:50:43 2015 +0200
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
+#define SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+
+class G1CollectedHeap;
+class G1ParScanThreadState;
+
+class G1RootClosures : public CHeapObj<mtGC> {
+public:
+  // Closures to process raw oops in the root set.
+  virtual OopClosure* weak_oops() = 0;
+  virtual OopClosure* strong_oops() = 0;
+
+  // Closures to process CLDs in the root set.
+  virtual CLDClosure* weak_clds() = 0;
+  virtual CLDClosure* strong_clds() = 0;
+
+  // Applied to the CLDs reachable from the thread stacks.
+  virtual CLDClosure* thread_root_clds() = 0;
+
+  // Applied to code blobs reachable as strong roots.
+  virtual CodeBlobClosure* strong_codeblobs() = 0;
+};
+
+class G1EvacuationRootClosures : public G1RootClosures {
+public:
+  // Flush any buffered state and deferred processing
+  virtual void flush() = 0;
+  virtual double closure_app_seconds() = 0;
+
+  // Applied to the weakly reachable CLDs when all strongly reachable
+  // CLDs are guaranteed to have been processed.
+  virtual CLDClosure* second_pass_weak_clds() = 0;
+
+  // Get a raw oop closure for processing oops, bypassing the flushing above.
+  virtual OopClosure* raw_strong_oops() = 0;
+
+  // Applied to code blobs treated as weak roots.
+  virtual CodeBlobClosure* weak_codeblobs() = 0;
+
+  // Is this closure used for tracing metadata?
+  virtual bool trace_metadata() = 0;
+
+  static G1EvacuationRootClosures* create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h);
+};
+
+#endif // SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Wed Oct 14 14:50:43 2015 +0200
@@ -33,7 +33,7 @@
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "memory/allocation.inline.hpp"
@@ -70,40 +70,19 @@
     _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
     _n_workers_discovered_strong_classes(0) {}
 
-void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
-                                     OopClosure* scan_non_heap_weak_roots,
-                                     CLDClosure* scan_strong_clds,
-                                     CLDClosure* scan_weak_clds,
-                                     bool trace_metadata,
-                                     uint worker_i) {
-  // First scan the shared roots.
+void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
   double ext_roots_start = os::elapsedTime();
   G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
 
-  BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
-  BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
-
-  OopClosure* const weak_roots = &buf_scan_non_heap_weak_roots;
-  OopClosure* const strong_roots = &buf_scan_non_heap_roots;
-
-  // CodeBlobClosures are not interoperable with BufferingOopClosures
-  G1CodeBlobClosure root_code_blobs(scan_non_heap_roots);
-
-  process_java_roots(strong_roots,
-                     trace_metadata ? scan_strong_clds : NULL,
-                     scan_strong_clds,
-                     trace_metadata ? NULL : scan_weak_clds,
-                     &root_code_blobs,
-                     phase_times,
-                     worker_i);
+  process_java_roots(closures, phase_times, worker_i);
 
   // This is the point where this worker thread will not find more strong CLDs/nmethods.
   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
-  if (trace_metadata) {
+  if (closures->trace_metadata()) {
     worker_has_discovered_all_strong_classes();
   }
 
-  process_vm_roots(strong_roots, weak_roots, phase_times, worker_i);
+  process_vm_roots(closures, phase_times, worker_i);
 
   {
     // Now the CM ref_processor roots.
@@ -113,11 +92,11 @@
       // concurrent mark ref processor as roots and keep entries
       // (which are added by the marking threads) on them live
       // until they can be processed at the end of marking.
-      _g1h->ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
+      _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
     }
   }
 
-  if (trace_metadata) {
+  if (closures->trace_metadata()) {
     {
       G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
       // Barrier to make sure all workers passed
@@ -127,18 +106,18 @@
 
     // Now take the complement of the strong CLDs.
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
-    ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
+    assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
+    ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
   } else {
     phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
     phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
+    assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
   }
 
   // Finish up any enqueued closure apps (attributed as object copy time).
-  buf_scan_non_heap_roots.done();
-  buf_scan_non_heap_weak_roots.done();
+  closures->flush();
 
-  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
-      + buf_scan_non_heap_weak_roots.closure_app_seconds();
+  double obj_copy_time_sec = closures->closure_app_seconds();
 
   phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
 
@@ -159,22 +138,68 @@
   _process_strong_tasks.all_tasks_completed(n_workers());
 }
 
+// Adaptor to pass the closures to the strong roots in the VM.
+class StrongRootsClosures : public G1RootClosures {
+  OopClosure* _roots;
+  CLDClosure* _clds;
+  CodeBlobClosure* _blobs;
+public:
+  StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
+      _roots(roots), _clds(clds), _blobs(blobs) {}
+
+  OopClosure* weak_oops()   { return NULL; }
+  OopClosure* strong_oops() { return _roots; }
+
+  CLDClosure* weak_clds()        { return NULL; }
+  CLDClosure* strong_clds()      { return _clds; }
+  CLDClosure* thread_root_clds() { return _clds; }
+
+  CodeBlobClosure* strong_codeblobs() { return _blobs; }
+};
+
 void G1RootProcessor::process_strong_roots(OopClosure* oops,
                                            CLDClosure* clds,
                                            CodeBlobClosure* blobs) {
+  StrongRootsClosures closures(oops, clds, blobs);
 
-  process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0);
-  process_vm_roots(oops, NULL, NULL, 0);
+  process_java_roots(&closures, NULL, 0);
+  process_vm_roots(&closures, NULL, 0);
 
   _process_strong_tasks.all_tasks_completed(n_workers());
 }
 
+// Adaptor to pass the closures to all the roots in the VM.
+class AllRootsClosures : public G1RootClosures {
+  OopClosure* _roots;
+  CLDClosure* _clds;
+public:
+  AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
+      _roots(roots), _clds(clds) {}
+
+  OopClosure* weak_oops() { return _roots; }
+  OopClosure* strong_oops() { return _roots; }
+
+  // By returning the same CLDClosure for both weak and strong CLDs we ensure
+  // that a single walk of the CLDG will invoke the closure on all CLDs i the
+  // system.
+  CLDClosure* weak_clds() { return _clds; }
+  CLDClosure* strong_clds() { return _clds; }
+  // We don't want to visit CLDs more than once, so we return NULL for the
+  // thread root CLDs.
+  CLDClosure* thread_root_clds() { return NULL; }
+
+  // We don't want to visit code blobs more than once, so we return NULL for the
+  // strong case and walk the entire code cache as a separate step.
+  CodeBlobClosure* strong_codeblobs() { return NULL; }
+};
+
 void G1RootProcessor::process_all_roots(OopClosure* oops,
                                         CLDClosure* clds,
                                         CodeBlobClosure* blobs) {
+  AllRootsClosures closures(oops, clds);
 
-  process_java_roots(oops, NULL, clds, clds, NULL, NULL, 0);
-  process_vm_roots(oops, oops, NULL, 0);
+  process_java_roots(&closures, NULL, 0);
+  process_vm_roots(&closures, NULL, 0);
 
   if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
     CodeCache::blobs_do(blobs);
@@ -183,35 +208,36 @@
   _process_strong_tasks.all_tasks_completed(n_workers());
 }
 
-void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
-                                         CLDClosure* thread_stack_clds,
-                                         CLDClosure* strong_clds,
-                                         CLDClosure* weak_clds,
-                                         CodeBlobClosure* strong_code,
+void G1RootProcessor::process_java_roots(G1RootClosures* closures,
                                          G1GCPhaseTimes* phase_times,
                                          uint worker_i) {
-  assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
+  assert(closures->thread_root_clds() == NULL || closures->weak_clds() == NULL, "There is overlap between those, only one may be set");
   // Iterating over the CLDG and the Threads are done early to allow us to
   // first process the strong CLDs and nmethods and then, after a barrier,
   // let the thread process the weak CLDs and nmethods.
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
-      ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
+      ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
     bool is_par = n_workers() > 1;
-    Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
+    Threads::possibly_parallel_oops_do(is_par,
+                                       closures->strong_oops(),
+                                       closures->thread_root_clds(),
+                                       closures->strong_codeblobs());
   }
 }
 
-void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
-                                       OopClosure* weak_roots,
+void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
                                        G1GCPhaseTimes* phase_times,
                                        uint worker_i) {
+  OopClosure* strong_roots = closures->strong_oops();
+  OopClosure* weak_roots = closures->weak_oops();
+
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp	Wed Oct 14 09:33:45 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp	Wed Oct 14 14:50:43 2015 +0200
@@ -32,8 +32,10 @@
 class CLDClosure;
 class CodeBlobClosure;
 class G1CollectedHeap;
+class G1EvacuationRootClosures;
 class G1GCPhaseTimes;
 class G1ParPushHeapRSClosure;
+class G1RootClosures;
 class Monitor;
 class OopClosure;
 class SubTasksDone;
@@ -71,16 +73,11 @@
   void worker_has_discovered_all_strong_classes();
   void wait_until_all_strong_classes_discovered();
 
-  void process_java_roots(OopClosure* scan_non_heap_roots,
-                          CLDClosure* thread_stack_clds,
-                          CLDClosure* scan_strong_clds,
-                          CLDClosure* scan_weak_clds,
-                          CodeBlobClosure* scan_strong_code,
+  void process_java_roots(G1RootClosures* closures,
                           G1GCPhaseTimes* phase_times,
                           uint worker_i);
 
-  void process_vm_roots(OopClosure* scan_non_heap_roots,
-                        OopClosure* scan_non_heap_weak_roots,
+  void process_vm_roots(G1RootClosures* closures,
                         G1GCPhaseTimes* phase_times,
                         uint worker_i);
 
@@ -90,12 +87,7 @@
   // Apply closures to the strongly and weakly reachable roots in the system
   // in a single pass.
   // Record and report timing measurements for sub phases using the worker_i
-  void evacuate_roots(OopClosure* scan_non_heap_roots,
-                      OopClosure* scan_non_heap_weak_roots,
-                      CLDClosure* scan_strong_clds,
-                      CLDClosure* scan_weak_clds,
-                      bool trace_metadata,
-                      uint worker_i);
+  void evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i);
 
   // Apply oops, clds and blobs to all strongly reachable roots in the system
   void process_strong_roots(OopClosure* oops,