8062943: REDO - Parallelize clearing the next mark bitmap
authormlarsson
Tue, 25 Nov 2014 11:59:55 +0100
changeset 27885 7786b3940066
parent 27884 563a811ad398
child 27886 79c703ddca55
8062943: REDO - Parallelize clearing the next mark bitmap Reviewed-by: kbarrett, tschatzl
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Nov 24 23:28:48 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Nov 25 11:59:55 2014 +0100
@@ -180,9 +180,32 @@
   }
 };
 
+class ParClearNextMarkBitmapTask : public AbstractGangTask {
+  ClearBitmapHRClosure* _cl;
+  HeapRegionClaimer     _hrclaimer;
+  bool                  _suspendible; // If the task is suspendible, workers must join the STS.
+
+public:
+  ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
+      _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
+
+  void work(uint worker_id) {
+    if (_suspendible) {
+      SuspendibleThreadSet::join();
+    }
+    G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
+    if (_suspendible) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+};
+
 void CMBitMap::clearAll() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
-  G1CollectedHeap::heap()->heap_region_iterate(&cl);
+  uint n_workers = g1h->workers()->active_workers();
+  ParClearNextMarkBitmapTask task(&cl, n_workers, false);
+  g1h->workers()->run_task(&task);
   guarantee(cl.complete(), "Must have completed iteration.");
   return;
 }
@@ -861,7 +884,8 @@
   guarantee(!g1h->mark_in_progress(), "invariant");
 
   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
-  g1h->heap_region_iterate(&cl);
+  ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
+  _parallel_workers->run_task(&task);
 
   // Clear the liveness counting data. If the marking has been aborted, the abort()
   // call already did that.
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Nov 24 23:28:48 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Tue Nov 25 11:59:55 2014 +0100
@@ -280,7 +280,6 @@
       // We may have aborted just before the remark. Do not bother clearing the
       // bitmap then, as it has been done during mark abort.
       if (!cm()->has_aborted()) {
-        SuspendibleThreadSetJoiner sts;
         _cm->clearNextBitmap();
       } else {
         assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Nov 24 23:28:48 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Nov 25 11:59:55 2014 +0100
@@ -2552,8 +2552,9 @@
 void
 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
                                          uint worker_id,
-                                         HeapRegionClaimer *hrclaimer) const {
-  _hrm.par_iterate(cl, worker_id, hrclaimer);
+                                         HeapRegionClaimer *hrclaimer,
+                                         bool concurrent) const {
+  _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
 }
 
 // Clear the cached CSet starting regions and (more importantly)
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Nov 24 23:28:48 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Nov 25 11:59:55 2014 +0100
@@ -1380,10 +1380,13 @@
   // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
   // to each of the regions, by attempting to claim the region using the
   // HeapRegionClaimer and, if successful, applying the closure to the claimed
-  // region.
+  // region. The concurrent argument should be set to true if iteration is
+  // performed concurrently, during which no assumptions are made for consistent
+  // attributes of the heap regions (as they might be modified while iterating).
   void heap_region_par_iterate(HeapRegionClosure* cl,
                                uint worker_id,
-                               HeapRegionClaimer* hrclaimer) const;
+                               HeapRegionClaimer* hrclaimer,
+                               bool concurrent = false) const;
 
   // Clear the cached cset start regions and (more importantly)
   // the time stamps. Called when we reset the GC time stamp.
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Mon Nov 24 23:28:48 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Tue Nov 25 11:59:55 2014 +0100
@@ -260,7 +260,7 @@
   return num_regions;
 }
 
-void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 
   // Every worker will actually look at all regions, skipping over regions that
@@ -279,7 +279,11 @@
     // We'll ignore "continues humongous" regions (we'll process them
     // when we come across their corresponding "start humongous"
     // region) and regions already claimed.
-    if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
+    // However, if the iteration is specified as concurrent, the values for
+    // is_starts_humongous and is_continues_humongous can not be trusted,
+    // and we should just blindly iterate over regions regardless of their
+    // humongous status.
+    if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
       continue;
     }
     // OK, try to claim it
@@ -287,7 +291,9 @@
       continue;
     }
     // Success!
-    if (r->is_starts_humongous()) {
+    // As mentioned above, special treatment of humongous regions can only be
+    // done if we are iterating non-concurrently.
+    if (!concurrent && r->is_starts_humongous()) {
       // If the region is "starts humongous" we'll iterate over its
       // "continues humongous" first; in fact we'll do them
       // first. The order is important. In one case, calling the
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Mon Nov 24 23:28:48 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Tue Nov 25 11:59:55 2014 +0100
@@ -222,7 +222,7 @@
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
 
-  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
+  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.