8197569: Refactor eager reclaim for concurrent remembered set rebuilding
authortschatzl
Mon, 26 Mar 2018 16:51:41 +0200
changeset 49604 ca5978b8378b
parent 49603 6ce4db4460ca
child 49605 784f3f2dea14
8197569: Refactor eager reclaim for concurrent remembered set rebuilding Summary: Expose information about eager reclaim region selection. Reviewed-by: sjohanss, sangheki
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.hpp
src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Mar 26 10:19:31 2018 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Mar 26 16:51:41 2018 +0200
@@ -2583,6 +2583,16 @@
   return buffer_size * buffer_num + extra_cards;
 }
 
+bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
+  // We don't nominate objects with many remembered set entries, on
+  // the assumption that such objects are likely still live.
+  HeapRegionRemSet* rem_set = r->rem_set();
+
+  return G1EagerReclaimHumongousObjectsWithStaleRefs ?
+         rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
+         G1EagerReclaimHumongousObjects && rem_set->is_empty();
+}
+
 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
  private:
   size_t _total_humongous;
@@ -2590,23 +2600,14 @@
 
   DirtyCardQueue _dcq;
 
-  // We don't nominate objects with many remembered set entries, on
-  // the assumption that such objects are likely still live.
-  bool is_remset_small(HeapRegion* region) const {
-    HeapRegionRemSet* const rset = region->rem_set();
-    return G1EagerReclaimHumongousObjectsWithStaleRefs
-      ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
-      : rset->is_empty();
-  }
-
-  bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
+  bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
     assert(region->is_starts_humongous(), "Must start a humongous object");
 
     oop obj = oop(region->bottom());
 
     // Dead objects cannot be eager reclaim candidates. Due to class
     // unloading it is unsafe to query their classes so we return early.
-    if (heap->is_obj_dead(obj, region)) {
+    if (g1h->is_obj_dead(obj, region)) {
       return false;
     }
 
@@ -2646,7 +2647,8 @@
     // important use case for eager reclaim, and this special handling
     // may reduce needed headroom.
 
-    return obj->is_typeArray() && is_remset_small(region);
+    return obj->is_typeArray() &&
+           g1h->is_potential_eager_reclaim_candidate(region);
   }
 
  public:
@@ -4818,10 +4820,7 @@
                              obj->is_typeArray()
                             );
 
-    // Need to clear mark bit of the humongous object if already set.
-    if (next_bitmap->is_marked(r->bottom())) {
-      next_bitmap->clear(r->bottom());
-    }
+    g1h->concurrent_mark()->humongous_object_eagerly_reclaimed(r);
     _humongous_objects_reclaimed++;
     do {
       HeapRegion* next = g1h->next_region_in_humongous(r);
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Mar 26 10:19:31 2018 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Mar 26 16:51:41 2018 +0200
@@ -564,6 +564,9 @@
   void gc_prologue(bool full);
   void gc_epilogue(bool full);
 
+  // Does the given region fulfill remembered set based eager reclaim candidate requirements?
+  bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
+
   // Modify the reclaim candidate set and test for presence.
   // These are only valid for starts_humongous regions.
   inline void set_humongous_reclaim_candidate(uint region, bool value);
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Mar 26 10:19:31 2018 -0400
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Mar 26 16:51:41 2018 +0200
@@ -515,6 +515,14 @@
   set_concurrent_marking_in_progress();
 }
 
+void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
+  assert(SafepointSynchronize::is_at_safepoint(), "May only be called at a safepoint.");
+
+  // Need to clear mark bit of the humongous object if already set and during a marking cycle.
+  if (_next_mark_bitmap->is_marked(r->bottom())) {
+    _next_mark_bitmap->clear(r->bottom());
+  }
+}
 
 void G1ConcurrentMark::reset_marking_state() {
   _global_mark_stack.set_empty();
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Mon Mar 26 10:19:31 2018 -0400
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Mon Mar 26 16:51:41 2018 +0200
@@ -447,6 +447,8 @@
   // true, periodically insert checks to see if this method should exit prematurely.
   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 public:
+  // Notification for eagerly reclaimed regions to clean up.
+  void humongous_object_eagerly_reclaimed(HeapRegion* r);
   // Manipulation of the global mark stack.
   // The push and pop operations are used by tasks for transfers
   // between task-local queues and the global mark stack.