8225478: Make G1CMRootRegions independent of HeapRegions
Summary: Remove dependency of HeapRegion from G1CMRootRegions class
Reviewed-by: tschatzl, kbarrett
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue Jun 11 17:20:51 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Wed Jun 12 10:34:29 2019 +0200
@@ -4640,7 +4640,7 @@
bool const during_im = collector_state()->in_initial_mark_gc();
if (during_im && allocated_bytes > 0) {
- _cm->root_regions()->add(alloc_region);
+ _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
}
_hr_printer.retire(alloc_region);
}
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue Jun 11 17:20:51 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Wed Jun 12 10:34:29 2019 +0200
@@ -257,30 +257,38 @@
_free_list = NULL;
}
-G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
- _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
- _max_regions(max_regions),
- _num_root_regions(0),
- _claimed_root_regions(0),
- _scan_in_progress(false),
- _should_abort(false) { }
-
-G1CMRootRegions::~G1CMRootRegions() {
- FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
+G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
+ _root_regions(NULL),
+ _max_regions(max_regions),
+ _num_root_regions(0),
+ _claimed_root_regions(0),
+ _scan_in_progress(false),
+ _should_abort(false) {
+ _root_regions = new MemRegion[_max_regions];
+ if (_root_regions == NULL) {
+ vm_exit_during_initialization("Could not allocate root MemRegion set.");
+ }
}
-void G1CMRootRegions::reset() {
+G1CMRootMemRegions::~G1CMRootMemRegions() {
+ delete[] _root_regions;
+}
+
+void G1CMRootMemRegions::reset() {
_num_root_regions = 0;
}
-void G1CMRootRegions::add(HeapRegion* hr) {
+void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
- assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
- _root_regions[idx] = hr;
+ assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
+ assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
+ "end (" PTR_FORMAT ")", p2i(start), p2i(end));
+ _root_regions[idx].set_start(start);
+ _root_regions[idx].set_end(end);
}
-void G1CMRootRegions::prepare_for_scan() {
+void G1CMRootMemRegions::prepare_for_scan() {
assert(!scan_in_progress(), "pre-condition");
_scan_in_progress = _num_root_regions > 0;
@@ -289,7 +297,7 @@
_should_abort = false;
}
-HeapRegion* G1CMRootRegions::claim_next() {
+const MemRegion* G1CMRootMemRegions::claim_next() {
if (_should_abort) {
// If someone has set the should_abort flag, we return NULL to
// force the caller to bail out of their loop.
@@ -302,26 +310,26 @@
size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
if (claimed_index < _num_root_regions) {
- return _root_regions[claimed_index];
+ return &_root_regions[claimed_index];
}
return NULL;
}
-uint G1CMRootRegions::num_root_regions() const {
+uint G1CMRootMemRegions::num_root_regions() const {
return (uint)_num_root_regions;
}
-void G1CMRootRegions::notify_scan_done() {
+void G1CMRootMemRegions::notify_scan_done() {
MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
_scan_in_progress = false;
RootRegionScan_lock->notify_all();
}
-void G1CMRootRegions::cancel_scan() {
+void G1CMRootMemRegions::cancel_scan() {
notify_scan_done();
}
-void G1CMRootRegions::scan_finished() {
+void G1CMRootMemRegions::scan_finished() {
assert(scan_in_progress(), "pre-condition");
if (!_should_abort) {
@@ -333,7 +341,7 @@
notify_scan_done();
}
-bool G1CMRootRegions::wait_until_scan_finished() {
+bool G1CMRootMemRegions::wait_until_scan_finished() {
if (!scan_in_progress()) {
return false;
}
@@ -875,14 +883,21 @@
return result;
}
-void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
- assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
- "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
+void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
+#ifdef ASSERT
+ HeapWord* last = region->last();
+ HeapRegion* hr = _g1h->heap_region_containing(last);
+ assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
+ "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
+ assert(hr->next_top_at_mark_start() == region->start(),
+ "MemRegion start should be equal to nTAMS");
+#endif
+
G1RootRegionScanClosure cl(_g1h, this, worker_id);
const uintx interval = PrefetchScanIntervalInBytes;
- HeapWord* curr = hr->next_top_at_mark_start();
- const HeapWord* end = hr->top();
+ HeapWord* curr = region->start();
+ const HeapWord* end = region->end();
while (curr < end) {
Prefetch::read(curr, interval);
oop obj = oop(curr);
@@ -902,11 +917,11 @@
assert(Thread::current()->is_ConcurrentGC_thread(),
"this should only be done by a conc GC thread");
- G1CMRootRegions* root_regions = _cm->root_regions();
- HeapRegion* hr = root_regions->claim_next();
- while (hr != NULL) {
- _cm->scan_root_region(hr, worker_id);
- hr = root_regions->claim_next();
+ G1CMRootMemRegions* root_regions = _cm->root_regions();
+ const MemRegion* region = root_regions->claim_next();
+ while (region != NULL) {
+ _cm->scan_root_region(region, worker_id);
+ region = root_regions->claim_next();
}
}
};
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Tue Jun 11 17:20:51 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Wed Jun 12 10:34:29 2019 +0200
@@ -222,18 +222,20 @@
template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
};
-// Root Regions are regions that contain objects from nTAMS to top. These are roots
-// for marking, i.e. their referenced objects must be kept alive to maintain the
+// Root MemRegions are memory areas that contain objects which references are
+// roots wrt to the marking. They must be scanned before marking to maintain the
// SATB invariant.
-// We could scan and mark them through during the initial-mark pause, but for
+// Typically they contain the areas from nTAMS to top of the regions.
+// We could scan and mark through these objects during the initial-mark pause, but for
// pause time reasons we move this work to the concurrent phase.
// We need to complete this procedure before the next GC because it might determine
// that some of these "root objects" are dead, potentially dropping some required
// references.
-// Root regions comprise of the complete contents of survivor regions, and any
-// objects copied into old gen during GC.
-class G1CMRootRegions {
- HeapRegion** _root_regions;
+// Root MemRegions comprise of the contents of survivor regions at the end
+// of the GC, and any objects copied into the old gen during GC.
+class G1CMRootMemRegions {
+ // The set of root MemRegions.
+ MemRegion* _root_regions;
size_t const _max_regions;
volatile size_t _num_root_regions; // Actual number of root regions.
@@ -246,13 +248,13 @@
void notify_scan_done();
public:
- G1CMRootRegions(uint const max_regions);
- ~G1CMRootRegions();
+ G1CMRootMemRegions(uint const max_regions);
+ ~G1CMRootMemRegions();
// Reset the data structure to allow addition of new root regions.
void reset();
- void add(HeapRegion* hr);
+ void add(HeapWord* start, HeapWord* end);
// Reset the claiming / scanning of the root regions.
void prepare_for_scan();
@@ -264,9 +266,9 @@
// false otherwise.
bool scan_in_progress() { return _scan_in_progress; }
- // Claim the next root region to scan atomically, or return NULL if
+ // Claim the next root MemRegion to scan atomically, or return NULL if
// all have been claimed.
- HeapRegion* claim_next();
+ const MemRegion* claim_next();
// The number of root regions to scan.
uint num_root_regions() const;
@@ -310,7 +312,7 @@
MemRegion const _heap;
// Root region tracking and claiming
- G1CMRootRegions _root_regions;
+ G1CMRootMemRegions _root_regions;
// For grey objects
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
@@ -501,7 +503,7 @@
size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
bool mark_stack_empty() const { return _global_mark_stack.is_empty(); }
- G1CMRootRegions* root_regions() { return &_root_regions; }
+ G1CMRootMemRegions* root_regions() { return &_root_regions; }
void concurrent_cycle_start();
// Abandon current marking iteration due to a Full GC.
@@ -554,8 +556,8 @@
// them.
void scan_root_regions();
- // Scan a single root region from nTAMS to top and mark everything reachable from it.
- void scan_root_region(HeapRegion* hr, uint worker_id);
+ // Scan a single root MemRegion to mark everything reachable from it.
+ void scan_root_region(const MemRegion* region, uint worker_id);
// Do concurrent phase of marking, to a tentative transitive closure.
void mark_from_roots();
--- a/src/hotspot/share/memory/memRegion.hpp Tue Jun 11 17:20:51 2019 -0700
+++ b/src/hotspot/share/memory/memRegion.hpp Wed Jun 12 10:34:29 2019 +0200
@@ -32,13 +32,13 @@
// A very simple data structure representing a contigous region
// region of address space.
-// Note that MemRegions are passed by value, not by reference.
+// Note that MemRegions are typically passed by value, not by reference.
// The intent is that they remain very small and contain no
// objects. The copy constructor and destructor must be trivial,
// to support optimization for pass-by-value.
-// These should never be allocated in heap but we do
-// create MemRegions (in CardTableBarrierSet) in heap so operator
-// new and operator new [] added for this special case.
+// These should almost never be allocated in heap but we do
+// create MemRegions (in CardTable and G1CMRootMemRegions) on the heap so operator
+// new and operator new [] were added for these special cases.
class MemRegion {
friend class VMStructs;