src/hotspot/share/gc/g1/g1CollectionSet.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54843 25c329958c70
child 58679 9c3209ff7550
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Thu Oct 17 20:27:44 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Thu Oct 17 20:53:35 2019 +0100
@@ -61,20 +61,18 @@
   _collection_set_max_length(0),
   _num_optional_regions(0),
   _bytes_used_before(0),
-  _recorded_rs_lengths(0),
+  _recorded_rs_length(0),
   _inc_build_state(Inactive),
   _inc_part_start(0),
   _inc_bytes_used_before(0),
-  _inc_recorded_rs_lengths(0),
-  _inc_recorded_rs_lengths_diffs(0),
+  _inc_recorded_rs_length(0),
+  _inc_recorded_rs_length_diff(0),
   _inc_predicted_elapsed_time_ms(0.0),
-  _inc_predicted_elapsed_time_ms_diffs(0.0) {
+  _inc_predicted_elapsed_time_ms_diff(0.0) {
 }
 
 G1CollectionSet::~G1CollectionSet() {
-  if (_collection_set_regions != NULL) {
-    FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
-  }
+  FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
   free_optional_regions();
   clear_candidates();
 }
@@ -108,8 +106,8 @@
   _candidates = NULL;
 }
 
-void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
-  _recorded_rs_lengths = rs_lengths;
+void G1CollectionSet::set_recorded_rs_length(size_t rs_length) {
+  _recorded_rs_length = rs_length;
 }
 
 // Add the heap region at the head of the non-incremental collection set
@@ -127,7 +125,7 @@
   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
 
   _bytes_used_before += hr->used();
-  _recorded_rs_lengths += hr->rem_set()->occupied();
+  _recorded_rs_length += hr->rem_set()->occupied();
   _old_region_length++;
 
   _g1h->old_set_remove(hr);
@@ -148,10 +146,10 @@
 
   _inc_bytes_used_before = 0;
 
-  _inc_recorded_rs_lengths = 0;
-  _inc_recorded_rs_lengths_diffs = 0;
+  _inc_recorded_rs_length = 0;
+  _inc_recorded_rs_length_diff = 0;
   _inc_predicted_elapsed_time_ms = 0.0;
-  _inc_predicted_elapsed_time_ms_diffs = 0.0;
+  _inc_predicted_elapsed_time_ms_diff = 0.0;
 
   update_incremental_marker();
 }
@@ -160,32 +158,32 @@
   assert(_inc_build_state == Active, "Precondition");
   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
 
-  // The two "main" fields, _inc_recorded_rs_lengths and
+  // The two "main" fields, _inc_recorded_rs_length and
   // _inc_predicted_elapsed_time_ms, are updated by the thread
   // that adds a new region to the CSet. Further updates by the
   // concurrent refinement thread that samples the young RSet lengths
-  // are accumulated in the *_diffs fields. Here we add the diffs to
+  // are accumulated in the *_diff fields. Here we add the diffs to
   // the "main" fields.
 
-  if (_inc_recorded_rs_lengths_diffs >= 0) {
-    _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
+  if (_inc_recorded_rs_length_diff >= 0) {
+    _inc_recorded_rs_length += _inc_recorded_rs_length_diff;
   } else {
     // This is defensive. The diff should in theory be always positive
     // as RSets can only grow between GCs. However, given that we
     // sample their size concurrently with other threads updating them
     // it's possible that we might get the wrong size back, which
     // could make the calculations somewhat inaccurate.
-    size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
-    if (_inc_recorded_rs_lengths >= diffs) {
-      _inc_recorded_rs_lengths -= diffs;
+    size_t diffs = (size_t) (-_inc_recorded_rs_length_diff);
+    if (_inc_recorded_rs_length >= diffs) {
+      _inc_recorded_rs_length -= diffs;
     } else {
-      _inc_recorded_rs_lengths = 0;
+      _inc_recorded_rs_length = 0;
     }
   }
-  _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
+  _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diff;
 
-  _inc_recorded_rs_lengths_diffs = 0;
-  _inc_predicted_elapsed_time_ms_diffs = 0.0;
+  _inc_recorded_rs_length_diff = 0;
+  _inc_predicted_elapsed_time_ms_diff = 0.0;
 }
 
 void G1CollectionSet::clear() {
@@ -217,10 +215,13 @@
   }
 }
 
-void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
+void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
+                                                    HeapRegionClaimer* hr_claimer,
+                                                    uint worker_id,
+                                                    uint total_workers) const {
   assert_at_safepoint();
 
-  size_t len = _collection_set_cur_length - _inc_part_start;
+  size_t len = increment_length();
   if (len == 0) {
     return;
   }
@@ -229,9 +230,12 @@
   size_t cur_pos = start_pos;
 
   do {
-    HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos + _inc_part_start]);
-    bool result = cl->do_heap_region(r);
-    guarantee(!result, "Must not cancel iteration");
+    uint region_idx = _collection_set_regions[cur_pos + _inc_part_start];
+    if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
+      HeapRegion* r = _g1h->region_at(region_idx);
+      bool result = cl->do_heap_region(r);
+      guarantee(!result, "Must not cancel iteration");
+    }
 
     cur_pos++;
     if (cur_pos == len) {
@@ -246,23 +250,23 @@
   assert(hr->is_young(), "Precondition");
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 
-  // We could have updated _inc_recorded_rs_lengths and
+  // We could have updated _inc_recorded_rs_length and
   // _inc_predicted_elapsed_time_ms directly but we'd need to do
   // that atomically, as this code is executed by a concurrent
   // refinement thread, potentially concurrently with a mutator thread
   // allocating a new region and also updating the same fields. To
   // avoid the atomic operations we accumulate these updates on two
-  // separate fields (*_diffs) and we'll just add them to the "main"
+  // separate fields (*_diff) and we'll just add them to the "main"
   // fields at the start of a GC.
 
   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
-  ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
-  _inc_recorded_rs_lengths_diffs += rs_lengths_diff;
+  ssize_t rs_length_diff = (ssize_t) new_rs_length - old_rs_length;
+  _inc_recorded_rs_length_diff += rs_length_diff;
 
   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
-  _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
+  _inc_predicted_elapsed_time_ms_diff += elapsed_ms_diff;
 
   hr->set_recorded_rs_length(new_rs_length);
   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
@@ -273,8 +277,10 @@
   assert(_inc_build_state == Active, "Precondition");
 
   size_t collection_set_length = _collection_set_cur_length;
-  assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
-  hr->set_young_index_in_cset((int)collection_set_length);
+  // We use UINT_MAX as "invalid" marker in verification.
+  assert(collection_set_length < (UINT_MAX - 1),
+         "Collection set is too large with " SIZE_FORMAT " entries", collection_set_length);
+  hr->set_young_index_in_cset((uint)collection_set_length + 1);
 
   _collection_set_regions[collection_set_length] = hr->hrm_index();
   // Concurrent readers must observe the store of the value in the array before an
@@ -310,7 +316,7 @@
     hr->set_recorded_rs_length(rs_length);
     hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
 
-    _inc_recorded_rs_lengths += rs_length;
+    _inc_recorded_rs_length += rs_length;
     _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
     _inc_bytes_used_before += hr->used();
   }
@@ -403,7 +409,7 @@
   guarantee(target_pause_time_ms > 0.0,
             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
 
-  size_t pending_cards = _policy->pending_cards();
+  size_t pending_cards = _policy->pending_cards_at_gc_start();
   double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
 
@@ -431,7 +437,7 @@
 
   // The number of recorded young regions is the incremental
   // collection set's current size
-  set_recorded_rs_lengths(_inc_recorded_rs_lengths);
+  set_recorded_rs_length(_inc_recorded_rs_length);
 
   double young_end_time_sec = os::elapsedTime();
   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
@@ -519,6 +525,9 @@
   _num_optional_regions -= num_selected_regions;
 
   stop_incremental_building();
+
+  _g1h->verify_region_attr_remset_update();
+
   return num_selected_regions > 0;
 }
 
@@ -526,22 +535,27 @@
   for (uint i = 0; i < _num_optional_regions; i++) {
     HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
     pss->record_unused_optional_region(r);
+    // Clear collection set marker and make sure that the remembered set information
+    // is correct as we still need it later.
     _g1h->clear_region_attr(r);
+    _g1h->register_region_with_region_attr(r);
     r->clear_index_in_opt_cset();
   }
   free_optional_regions();
+
+  _g1h->verify_region_attr_remset_update();
 }
 
 #ifdef ASSERT
 class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
 private:
   size_t _young_length;
-  int* _heap_region_indices;
+  uint* _heap_region_indices;
 public:
   G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
-    _heap_region_indices = NEW_C_HEAP_ARRAY(int, young_length, mtGC);
-    for (size_t i = 0; i < young_length; i++) {
-      _heap_region_indices[i] = -1;
+    _heap_region_indices = NEW_C_HEAP_ARRAY(uint, young_length + 1, mtGC);
+    for (size_t i = 0; i < young_length + 1; i++) {
+      _heap_region_indices[i] = UINT_MAX;
     }
   }
   ~G1VerifyYoungCSetIndicesClosure() {
@@ -549,12 +563,12 @@
   }
 
   virtual bool do_heap_region(HeapRegion* r) {
-    const int idx = r->young_index_in_cset();
+    const uint idx = r->young_index_in_cset();
 
-    assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
-    assert((size_t)idx < _young_length, "Young cset index too large for region %u", r->hrm_index());
+    assert(idx > 0, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
+    assert(idx <= _young_length, "Young cset index %u too large for region %u", idx, r->hrm_index());
 
-    assert(_heap_region_indices[idx] == -1,
+    assert(_heap_region_indices[idx] == UINT_MAX,
            "Index %d used by multiple regions, first use by region %u, second by region %u",
            idx, _heap_region_indices[idx], r->hrm_index());