7119027: G1: use atomics to update RS length / predict time of inc CSet
Summary: Make sure that the updates to the RS length and inc CSet predicted time are updated in an MT-safe way.
Reviewed-by: brutisso, iveresov
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Dec 20 20:29:35 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Dec 21 07:53:53 2011 -0500
@@ -230,7 +230,9 @@
_inc_cset_bytes_used_before(0),
_inc_cset_max_finger(NULL),
_inc_cset_recorded_rs_lengths(0),
+ _inc_cset_recorded_rs_lengths_diffs(0),
_inc_cset_predicted_elapsed_time_ms(0.0),
+ _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
@@ -1551,10 +1553,19 @@
}
}
- // It turns out that, sometimes, _max_rs_lengths can get smaller
- // than _recorded_rs_lengths which causes rs_length_diff to get
- // very large and mess up the RSet length predictions. We'll be
- // defensive until we work out why this happens.
+ // This is defensive. For a while _max_rs_lengths could get
+ // smaller than _recorded_rs_lengths which was causing
+ // rs_length_diff to get very large and mess up the RSet length
+ // predictions. The reason was unsafe concurrent updates to the
+ // _inc_cset_recorded_rs_lengths field which the code below guards
+ // against (see CR 7118202). This bug has now been fixed (see CR
+ // 7119027). However, I'm still worried that
+ // _inc_cset_recorded_rs_lengths might still end up somewhat
+ // inaccurate. The concurrent refinement thread calculates an
+ // RSet's length concurrently with other CR threads updating it
+ // which might cause it to calculate the length incorrectly (if,
+ // say, it's in mid-coarsening). So I'll leave in the defensive
+ // conditional below just in case.
size_t rs_length_diff = 0;
if (_max_rs_lengths > _recorded_rs_lengths) {
rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
@@ -2436,10 +2447,45 @@
_inc_cset_max_finger = 0;
_inc_cset_recorded_rs_lengths = 0;
- _inc_cset_predicted_elapsed_time_ms = 0;
+ _inc_cset_recorded_rs_lengths_diffs = 0;
+ _inc_cset_predicted_elapsed_time_ms = 0.0;
+ _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
_inc_cset_build_state = Active;
}
+void G1CollectorPolicy::finalize_incremental_cset_building() {
+ assert(_inc_cset_build_state == Active, "Precondition");
+ assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
+
+ // The two "main" fields, _inc_cset_recorded_rs_lengths and
+ // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
+ // that adds a new region to the CSet. Further updates by the
+ // concurrent refinement thread that samples the young RSet lengths
+ // are accumulated in the *_diffs fields. Here we add the diffs to
+ // the "main" fields.
+
+ if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
+ _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
+ } else {
+ // This is defensive. The diff should in theory be always positive
+ // as RSets can only grow between GCs. However, given that we
+ // sample their size concurrently with other threads updating them
+ // it's possible that we might get the wrong size back, which
+ // could make the calculations somewhat inaccurate.
+ size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
+ if (_inc_cset_recorded_rs_lengths >= diffs) {
+ _inc_cset_recorded_rs_lengths -= diffs;
+ } else {
+ _inc_cset_recorded_rs_lengths = 0;
+ }
+ }
+ _inc_cset_predicted_elapsed_time_ms +=
+ _inc_cset_predicted_elapsed_time_ms_diffs;
+
+ _inc_cset_recorded_rs_lengths_diffs = 0;
+ _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
+}
+
void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
@@ -2455,10 +2501,8 @@
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
size_t used_bytes = hr->used();
-
_inc_cset_recorded_rs_lengths += rs_length;
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
-
_inc_cset_bytes_used_before += used_bytes;
// Cache the values we have added to the aggregated informtion
@@ -2469,37 +2513,33 @@
hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
}
-void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
- // This routine is currently only called as part of the updating of
- // existing policy information for regions in the incremental cset that
- // is performed by the concurrent refine thread(s) as part of young list
- // RSet sampling. Therefore we should not be at a safepoint.
-
- assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
- assert(hr->is_young(), "it should be");
-
- size_t used_bytes = hr->used();
- size_t old_rs_length = hr->recorded_rs_length();
+void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
+ size_t new_rs_length) {
+ // Update the CSet information that is dependent on the new RS length
+ assert(hr->is_young(), "Precondition");
+ assert(!SafepointSynchronize::is_at_safepoint(),
+ "should not be at a safepoint");
+
+ // We could have updated _inc_cset_recorded_rs_lengths and
+ // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
+ // that atomically, as this code is executed by a concurrent
+ // refinement thread, potentially concurrently with a mutator thread
+ // allocating a new region and also updating the same fields. To
+ // avoid the atomic operations we accumulate these updates on two
+ // separate fields (*_diffs) and we'll just add them to the "main"
+ // fields at the start of a GC.
+
+ ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
+ ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
+ _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
+
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
-
- // Subtract the old recorded/predicted policy information for
- // the given heap region from the collection set info.
- _inc_cset_recorded_rs_lengths -= old_rs_length;
- _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
-
- _inc_cset_bytes_used_before -= used_bytes;
-
- // Clear the values cached in the heap region
- hr->set_recorded_rs_length(0);
- hr->set_predicted_elapsed_time_ms(0);
-}
-
-void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
- // Update the collection set information that is dependent on the new RS length
- assert(hr->is_young(), "Precondition");
-
- remove_from_incremental_cset_info(hr);
- add_to_incremental_cset_info(hr, new_rs_length);
+ double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
+ double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
+ _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
+
+ hr->set_recorded_rs_length(new_rs_length);
+ hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
}
void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
@@ -2591,6 +2631,7 @@
double non_young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list();
+ finalize_incremental_cset_building();
guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive",
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Dec 20 20:29:35 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Dec 21 07:53:53 2011 -0500
@@ -588,16 +588,29 @@
// Used to record the highest end of heap region in collection set
HeapWord* _inc_cset_max_finger;
- // The RSet lengths recorded for regions in the collection set
- // (updated by the periodic sampling of the regions in the
- // young list/collection set).
+ // The RSet lengths recorded for regions in the CSet. It is updated
+ // by the thread that adds a new region to the CSet. We assume that
+ // only one thread can be allocating a new CSet region (currently,
+ // it does so after taking the Heap_lock) hence no need to
+ // synchronize updates to this field.
size_t _inc_cset_recorded_rs_lengths;
- // The predicted elapsed time it will take to collect the regions
- // in the collection set (updated by the periodic sampling of the
- // regions in the young list/collection set).
+ // A concurrent refinement thread periodcially samples the young
+ // region RSets and needs to update _inc_cset_recorded_rs_lengths as
+ // the RSets grow. Instead of having to syncronize updates to that
+ // field we accumulate them in this field and add it to
+ // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
+ ssize_t _inc_cset_recorded_rs_lengths_diffs;
+
+ // The predicted elapsed time it will take to collect the regions in
+ // the CSet. This is updated by the thread that adds a new region to
+ // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
+ // MT-safety assumptions.
double _inc_cset_predicted_elapsed_time_ms;
+ // See the comment for _inc_cset_recorded_rs_lengths_diffs.
+ double _inc_cset_predicted_elapsed_time_ms_diffs;
+
// Stash a pointer to the g1 heap.
G1CollectedHeap* _g1;
@@ -894,6 +907,10 @@
// Initialize incremental collection set info.
void start_incremental_cset_building();
+ // Perform any final calculations on the incremental CSet fields
+ // before we can use them.
+ void finalize_incremental_cset_building();
+
void clear_incremental_cset() {
_inc_cset_head = NULL;
_inc_cset_tail = NULL;
@@ -902,10 +919,9 @@
// Stop adding regions to the incremental collection set
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
- // Add/remove information about hr to the aggregated information
- // for the incrementally built collection set.
+ // Add information about hr to the aggregated information for the
+ // incrementally built collection set.
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
- void remove_from_incremental_cset_info(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.