8057658: Enable G1 FullGC extensions
authorsjohanss
Tue, 09 Sep 2014 00:05:25 +0200
changeset 26839 021bfc544c6f
parent 26838 344fb68e970a
child 26840 f3c7613a7bbb
8057658: Enable G1 FullGC extensions Summary: Refactored the G1 FullGC code to enable it to be extended. Reviewed-by: mgerdin, brutisso
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
hotspot/src/share/vm/memory/space.hpp
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Tue Sep 09 00:05:25 2014 +0200
@@ -86,6 +86,12 @@
    void set_used(size_t bytes) {
      _summary_bytes_used = bytes;
    }
+
+   virtual HeapRegion* new_heap_region(uint hrs_index,
+                                       G1BlockOffsetSharedArray* sharedOffsetArray,
+                                       MemRegion mr) {
+     return new HeapRegion(hrs_index, sharedOffsetArray, mr);
+   }
 };
 
 // The default allocator for G1.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Sep 09 00:05:25 2014 +0200
@@ -621,6 +621,10 @@
 
 public:
 
+  G1Allocator* allocator() {
+    return _allocator;
+  }
+
   G1MonitoringSupport* g1mm() {
     assert(_g1mm != NULL, "should have been initialized");
     return _g1mm;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Sep 09 00:05:25 2014 +0200
@@ -193,76 +193,6 @@
   gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
 }
 
-class G1PrepareCompactClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  ModRefBarrierSet* _mrbs;
-  CompactPoint _cp;
-  HeapRegionSetCount _humongous_regions_removed;
-
-  bool is_cp_initialized() const {
-    return _cp.space != NULL;
-  }
-
-  void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
-    // If this is the first live region that we came across which we can compact,
-    // initialize the CompactPoint.
-    if (!is_cp_initialized()) {
-      _cp.space = hr;
-      _cp.threshold = hr->initialize_threshold();
-    }
-    hr->prepare_for_compaction(&_cp);
-    // Also clear the part of the card table that will be unused after
-    // compaction.
-    _mrbs->clear(MemRegion(hr->compaction_top(), end));
-  }
-
-  void free_humongous_region(HeapRegion* hr) {
-    HeapWord* end = hr->end();
-    FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
-
-    assert(hr->startsHumongous(),
-           "Only the start of a humongous region should be freed.");
-
-    hr->set_containing_set(NULL);
-    _humongous_regions_removed.increment(1u, hr->capacity());
-
-    _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
-    prepare_for_compaction(hr, end);
-    dummy_free_list.remove_all();
-  }
-
-public:
-  G1PrepareCompactClosure()
-  : _g1h(G1CollectedHeap::heap()),
-    _mrbs(_g1h->g1_barrier_set()),
-    _cp(NULL),
-    _humongous_regions_removed() { }
-
-  void update_sets() {
-    // We'll recalculate total used bytes and recreate the free list
-    // at the end of the GC, so no point in updating those values here.
-    HeapRegionSetCount empty_set;
-    _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
-  }
-
-  bool doHeapRegion(HeapRegion* hr) {
-    if (hr->isHumongous()) {
-      if (hr->startsHumongous()) {
-        oop obj = oop(hr->bottom());
-        if (obj->is_gc_marked()) {
-          obj->forward_to(obj);
-        } else  {
-          free_humongous_region(hr);
-        }
-      } else {
-        assert(hr->continuesHumongous(), "Invalid humongous.");
-      }
-    } else {
-      prepare_for_compaction(hr, hr->end());
-    }
-    return false;
-  }
-};
 
 void G1MarkSweep::mark_sweep_phase2() {
   // Now all live objects are marked, compute the new object addresses.
@@ -271,14 +201,10 @@
   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   // tracking expects us to do so. See comment under phase4.
 
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
   GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
   GenMarkSweep::trace("2");
 
-  G1PrepareCompactClosure blk;
-  g1h->heap_region_iterate(&blk);
-  blk.update_sets();
+  prepare_compaction();
 }
 
 class G1AdjustPointersClosure: public HeapRegionClosure {
@@ -373,3 +299,68 @@
   g1h->heap_region_iterate(&blk);
 
 }
+
+void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  g1h->heap_region_iterate(blk);
+  blk->update_sets();
+}
+
+void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
+  HeapWord* end = hr->end();
+  FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
+
+  assert(hr->startsHumongous(),
+         "Only the start of a humongous region should be freed.");
+
+  hr->set_containing_set(NULL);
+  _humongous_regions_removed.increment(1u, hr->capacity());
+
+  _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
+  prepare_for_compaction(hr, end);
+  dummy_free_list.remove_all();
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
+  // If this is the first live region that we came across which we can compact,
+  // initialize the CompactPoint.
+  if (!is_cp_initialized()) {
+    _cp.space = hr;
+    _cp.threshold = hr->initialize_threshold();
+  }
+  prepare_for_compaction_work(&_cp, hr, end);
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
+                                                          HeapRegion* hr,
+                                                          HeapWord* end) {
+  hr->prepare_for_compaction(cp);
+  // Also clear the part of the card table that will be unused after
+  // compaction.
+  _mrbs->clear(MemRegion(hr->compaction_top(), end));
+}
+
+void G1PrepareCompactClosure::update_sets() {
+  // We'll recalculate total used bytes and recreate the free list
+  // at the end of the GC, so no point in updating those values here.
+  HeapRegionSetCount empty_set;
+  _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
+}
+
+bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
+  if (hr->isHumongous()) {
+    if (hr->startsHumongous()) {
+      oop obj = oop(hr->bottom());
+      if (obj->is_gc_marked()) {
+        obj->forward_to(obj);
+      } else  {
+        free_humongous_region(hr);
+      }
+    } else {
+      assert(hr->continuesHumongous(), "Invalid humongous.");
+    }
+  } else {
+    prepare_for_compaction(hr, hr->end());
+  }
+  return false;
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Tue Sep 09 00:05:25 2014 +0200
@@ -43,7 +43,7 @@
 // compaction.
 //
 // Class unloading will only occur when a full gc is invoked.
-
+class G1PrepareCompactClosure;
 
 class G1MarkSweep : AllStatic {
   friend class VM_G1MarkSweep;
@@ -70,6 +70,30 @@
   static void mark_sweep_phase4();
 
   static void allocate_stacks();
+  static void prepare_compaction();
+  static void prepare_compaction_work(G1PrepareCompactClosure* blk);
+};
+
+class G1PrepareCompactClosure : public HeapRegionClosure {
+ protected:
+  G1CollectedHeap* _g1h;
+  ModRefBarrierSet* _mrbs;
+  CompactPoint _cp;
+  HeapRegionSetCount _humongous_regions_removed;
+
+  virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
+  void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
+  void free_humongous_region(HeapRegion* hr);
+  bool is_cp_initialized() const { return _cp.space != NULL; }
+
+ public:
+  G1PrepareCompactClosure() :
+    _g1h(G1CollectedHeap::heap()),
+    _mrbs(_g1h->g1_barrier_set()),
+    _humongous_regions_removed() { }
+
+  void update_sets();
+  bool doHeapRegion(HeapRegion* hr);
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp	Tue Sep 09 00:05:25 2014 +0200
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1MarkSweep.hpp"
+
+void G1MarkSweep::prepare_compaction() {
+  G1PrepareCompactClosure blk;
+  G1MarkSweep::prepare_compaction_work(&blk);
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Sep 09 00:05:25 2014 +0200
@@ -322,9 +322,10 @@
 
 HeapRegion::HeapRegion(uint hrm_index,
                        G1BlockOffsetSharedArray* sharedOffsetArray,
-                       MemRegion mr, AllocationContext_t context) :
+                       MemRegion mr) :
     G1OffsetTableContigSpace(sharedOffsetArray, mr),
-    _hrm_index(hrm_index), _allocation_context(context),
+    _hrm_index(hrm_index),
+    _allocation_context(AllocationContext::system()),
     _humongous_start_region(NULL),
     _in_collection_set(false),
     _next_in_special_set(NULL),
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Sep 09 00:05:25 2014 +0200
@@ -306,8 +306,7 @@
  public:
   HeapRegion(uint hrm_index,
              G1BlockOffsetSharedArray* sharedOffsetArray,
-             MemRegion mr,
-             AllocationContext_t context = AllocationContext::system());
+             MemRegion mr);
 
   // Initializing the HeapRegion not only resets the data structure, but also
   // resets the BOT for that heap region.
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Tue Sep 09 00:05:25 2014 +0200
@@ -66,10 +66,11 @@
 #endif
 
 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
-  HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index);
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
   assert(reserved().contains(mr), "invariant");
-  return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr);
+  return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
 }
 
 void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
--- a/hotspot/src/share/vm/memory/space.hpp	Fri Sep 05 12:36:37 2014 -0700
+++ b/hotspot/src/share/vm/memory/space.hpp	Tue Sep 09 00:05:25 2014 +0200
@@ -331,11 +331,10 @@
   CompactibleSpace* space;
   HeapWord* threshold;
 
-  CompactPoint(Generation* _gen) :
-    gen(_gen), space(NULL), threshold(0) {}
+  CompactPoint(Generation* g = NULL) :
+    gen(g), space(NULL), threshold(0) {}
 };
 
-
 // A space that supports compaction operations.  This is usually, but not
 // necessarily, a space that is normally contiguous.  But, for example, a
 // free-list-based space whose normal collection is a mark-sweep without