8228503: Rename "rs_lengths" to "rs_length" in ergo code
Reviewed-by: pliden, kbarrett
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp Wed Aug 07 10:29:07 2019 +0200
@@ -88,7 +88,7 @@
_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
- _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
+ _rs_length_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
_recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
_recent_avg_pause_time_ratio(0.0),
@@ -210,8 +210,8 @@
_pending_cards_seq->add(pending_cards);
}
-void G1Analytics::report_rs_lengths(double rs_lengths) {
- _rs_lengths_seq->add(rs_lengths);
+void G1Analytics::report_rs_length(double rs_length) {
+ _rs_length_seq->add(rs_length);
}
size_t G1Analytics::predict_rs_length_diff() const {
@@ -310,8 +310,8 @@
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
}
-size_t G1Analytics::predict_rs_lengths() const {
- return get_new_size_prediction(_rs_lengths_seq);
+size_t G1Analytics::predict_rs_length() const {
+ return get_new_size_prediction(_rs_length_seq);
}
size_t G1Analytics::predict_pending_cards() const {
--- a/src/hotspot/share/gc/g1/g1Analytics.hpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Analytics.hpp Wed Aug 07 10:29:07 2019 +0200
@@ -58,7 +58,7 @@
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
TruncatedSeq* _pending_cards_seq;
- TruncatedSeq* _rs_lengths_seq;
+ TruncatedSeq* _rs_length_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
@@ -109,7 +109,7 @@
void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
void report_constant_other_time_ms(double constant_other_time_ms);
void report_pending_cards(double pending_cards);
- void report_rs_lengths(double rs_lengths);
+ void report_rs_length(double rs_length);
size_t predict_rs_length_diff() const;
@@ -146,7 +146,7 @@
double predict_cleanup_time_ms() const;
- size_t predict_rs_lengths() const;
+ size_t predict_rs_length() const;
size_t predict_pending_cards() const;
double predict_cost_per_byte_ms() const;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Wed Aug 07 10:29:07 2019 +0200
@@ -4058,7 +4058,7 @@
G1SerialFreeCollectionSetClosure _cl;
const size_t* _surviving_young_words;
- size_t _rs_lengths;
+ size_t _rs_length;
volatile jint _serial_work_claim;
@@ -4090,7 +4090,7 @@
HeapRegion* r = g1h->region_at(region_idx);
assert(!g1h->is_on_master_free_list(r), "sanity");
- Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
+ Atomic::add(r->rem_set()->occupied_locked(), &_rs_length);
if (!is_young) {
g1h->_hot_card_cache->reset_card_counts(r);
@@ -4123,7 +4123,7 @@
_cl.complete_work();
G1Policy* policy = G1CollectedHeap::heap()->policy();
- policy->record_max_rs_lengths(_rs_lengths);
+ policy->record_max_rs_length(_rs_length);
policy->cset_regions_freed();
}
public:
@@ -4132,7 +4132,7 @@
_collection_set(collection_set),
_cl(evacuation_info, surviving_young_words),
_surviving_young_words(surviving_young_words),
- _rs_lengths(0),
+ _rs_length(0),
_serial_work_claim(0),
_parallel_work_claim(0),
_num_work_items(collection_set->region_length()),
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp Wed Aug 07 10:29:07 2019 +0200
@@ -61,14 +61,14 @@
_collection_set_max_length(0),
_num_optional_regions(0),
_bytes_used_before(0),
- _recorded_rs_lengths(0),
+ _recorded_rs_length(0),
_inc_build_state(Inactive),
_inc_part_start(0),
_inc_bytes_used_before(0),
- _inc_recorded_rs_lengths(0),
- _inc_recorded_rs_lengths_diffs(0),
+ _inc_recorded_rs_length(0),
+ _inc_recorded_rs_length_diff(0),
_inc_predicted_elapsed_time_ms(0.0),
- _inc_predicted_elapsed_time_ms_diffs(0.0) {
+ _inc_predicted_elapsed_time_ms_diff(0.0) {
}
G1CollectionSet::~G1CollectionSet() {
@@ -108,8 +108,8 @@
_candidates = NULL;
}
-void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
- _recorded_rs_lengths = rs_lengths;
+void G1CollectionSet::set_recorded_rs_length(size_t rs_length) {
+ _recorded_rs_length = rs_length;
}
// Add the heap region at the head of the non-incremental collection set
@@ -127,7 +127,7 @@
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
_bytes_used_before += hr->used();
- _recorded_rs_lengths += hr->rem_set()->occupied();
+ _recorded_rs_length += hr->rem_set()->occupied();
_old_region_length++;
_g1h->old_set_remove(hr);
@@ -148,10 +148,10 @@
_inc_bytes_used_before = 0;
- _inc_recorded_rs_lengths = 0;
- _inc_recorded_rs_lengths_diffs = 0;
+ _inc_recorded_rs_length = 0;
+ _inc_recorded_rs_length_diff = 0;
_inc_predicted_elapsed_time_ms = 0.0;
- _inc_predicted_elapsed_time_ms_diffs = 0.0;
+ _inc_predicted_elapsed_time_ms_diff = 0.0;
update_incremental_marker();
}
@@ -160,32 +160,32 @@
assert(_inc_build_state == Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
- // The two "main" fields, _inc_recorded_rs_lengths and
+ // The two "main" fields, _inc_recorded_rs_length and
// _inc_predicted_elapsed_time_ms, are updated by the thread
// that adds a new region to the CSet. Further updates by the
// concurrent refinement thread that samples the young RSet lengths
- // are accumulated in the *_diffs fields. Here we add the diffs to
+ // are accumulated in the *_diff fields. Here we add the diffs to
// the "main" fields.
- if (_inc_recorded_rs_lengths_diffs >= 0) {
- _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
+ if (_inc_recorded_rs_length_diff >= 0) {
+ _inc_recorded_rs_length += _inc_recorded_rs_length_diff;
} else {
// This is defensive. The diff should in theory be always positive
// as RSets can only grow between GCs. However, given that we
// sample their size concurrently with other threads updating them
// it's possible that we might get the wrong size back, which
// could make the calculations somewhat inaccurate.
- size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
- if (_inc_recorded_rs_lengths >= diffs) {
- _inc_recorded_rs_lengths -= diffs;
+ size_t diffs = (size_t) (-_inc_recorded_rs_length_diff);
+ if (_inc_recorded_rs_length >= diffs) {
+ _inc_recorded_rs_length -= diffs;
} else {
- _inc_recorded_rs_lengths = 0;
+ _inc_recorded_rs_length = 0;
}
}
- _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
+ _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diff;
- _inc_recorded_rs_lengths_diffs = 0;
- _inc_predicted_elapsed_time_ms_diffs = 0.0;
+ _inc_recorded_rs_length_diff = 0;
+ _inc_predicted_elapsed_time_ms_diff = 0.0;
}
void G1CollectionSet::clear() {
@@ -252,23 +252,23 @@
assert(hr->is_young(), "Precondition");
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
- // We could have updated _inc_recorded_rs_lengths and
+ // We could have updated _inc_recorded_rs_length and
// _inc_predicted_elapsed_time_ms directly but we'd need to do
// that atomically, as this code is executed by a concurrent
// refinement thread, potentially concurrently with a mutator thread
// allocating a new region and also updating the same fields. To
// avoid the atomic operations we accumulate these updates on two
- // separate fields (*_diffs) and we'll just add them to the "main"
+ // separate fields (*_diff) and we'll just add them to the "main"
// fields at the start of a GC.
ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
- ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
- _inc_recorded_rs_lengths_diffs += rs_lengths_diff;
+ ssize_t rs_length_diff = (ssize_t) new_rs_length - old_rs_length;
+ _inc_recorded_rs_length_diff += rs_length_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
- _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
+ _inc_predicted_elapsed_time_ms_diff += elapsed_ms_diff;
hr->set_recorded_rs_length(new_rs_length);
hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
@@ -316,7 +316,7 @@
hr->set_recorded_rs_length(rs_length);
hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
- _inc_recorded_rs_lengths += rs_length;
+ _inc_recorded_rs_length += rs_length;
_inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
_inc_bytes_used_before += hr->used();
}
@@ -437,7 +437,7 @@
// The number of recorded young regions is the incremental
// collection set's current size
- set_recorded_rs_lengths(_inc_recorded_rs_lengths);
+ set_recorded_rs_length(_inc_recorded_rs_length);
double young_end_time_sec = os::elapsedTime();
phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
--- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp Wed Aug 07 10:29:07 2019 +0200
@@ -164,7 +164,7 @@
// The number of cards in the remembered set in the collection set. Set from
// the incrementally built collection set at the start of an evacuation
// pause, and updated as more regions are added to the collection set.
- size_t _recorded_rs_lengths;
+ size_t _recorded_rs_length;
enum CSetBuildType {
Active, // We are actively building the collection set
@@ -188,25 +188,25 @@
// only one thread can be allocating a new CSet region (currently,
// it does so after taking the Heap_lock) hence no need to
// synchronize updates to this field.
- size_t _inc_recorded_rs_lengths;
+ size_t _inc_recorded_rs_length;
// A concurrent refinement thread periodically samples the young
- // region RSets and needs to update _inc_recorded_rs_lengths as
+ // region RSets and needs to update _inc_recorded_rs_length as
// the RSets grow. Instead of having to synchronize updates to that
// field we accumulate them in this field and add it to
- // _inc_recorded_rs_lengths_diffs at the start of a GC.
- ssize_t _inc_recorded_rs_lengths_diffs;
+ // _inc_recorded_rs_length_diff at the start of a GC.
+ ssize_t _inc_recorded_rs_length_diff;
// The predicted elapsed time it will take to collect the regions in
// the CSet. This is updated by the thread that adds a new region to
- // the CSet. See the comment for _inc_recorded_rs_lengths about
+ // the CSet. See the comment for _inc_recorded_rs_length about
// MT-safety assumptions.
double _inc_predicted_elapsed_time_ms;
- // See the comment for _inc_recorded_rs_lengths_diffs.
- double _inc_predicted_elapsed_time_ms_diffs;
+ // See the comment for _inc_recorded_rs_length_diff.
+ double _inc_predicted_elapsed_time_ms_diff;
- void set_recorded_rs_lengths(size_t rs_lengths);
+ void set_recorded_rs_length(size_t rs_length);
G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times();
@@ -293,7 +293,7 @@
void iterate_optional(HeapRegionClosure* cl) const;
- size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
+ size_t recorded_rs_length() { return _recorded_rs_length; }
size_t bytes_used_before() const {
return _bytes_used_before;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Wed Aug 07 10:29:07 2019 +0200
@@ -2587,7 +2587,7 @@
// and do_marking_step() is not being called serially.
bool do_stealing = do_termination && !is_serial;
- double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
+ double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diff_ms);
_time_target_ms = time_target_ms - diff_prediction_ms;
// set up the variables that are used in the work-based scheme to
@@ -2829,7 +2829,7 @@
// Keep statistics of how well we did with respect to hitting
// our target only if we actually timed out (if we aborted for
// other reasons, then the results might get skewed).
- _marking_step_diffs_ms.add(diff_ms);
+ _marking_step_diff_ms.add(diff_ms);
}
if (_cm->has_overflown()) {
@@ -2912,11 +2912,11 @@
_elapsed_time_ms(0.0),
_termination_time_ms(0.0),
_termination_start_time_ms(0.0),
- _marking_step_diffs_ms()
+ _marking_step_diff_ms()
{
guarantee(task_queue != NULL, "invariant");
- _marking_step_diffs_ms.add(0.5);
+ _marking_step_diff_ms.add(0.5);
}
// These are formatting macros that are used below to ensure
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Wed Aug 07 10:29:07 2019 +0200
@@ -700,7 +700,7 @@
// When this task got into the termination protocol
double _termination_start_time_ms;
- TruncatedSeq _marking_step_diffs_ms;
+ TruncatedSeq _marking_step_diff_ms;
// Updates the local fields after this task has claimed
// a new region to scan
--- a/src/hotspot/share/gc/g1/g1Policy.cpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp Wed Aug 07 10:29:07 2019 +0200
@@ -68,8 +68,8 @@
_reserve_regions(0),
_young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
_free_regions_at_end_of_collection(0),
- _max_rs_lengths(0),
- _rs_lengths_prediction(0),
+ _max_rs_length(0),
+ _rs_length_prediction(0),
_pending_cards(0),
_bytes_allocated_in_old_since_last_gc(0),
_initial_mark_to_mixed(),
@@ -219,23 +219,23 @@
}
uint G1Policy::update_young_list_max_and_target_length() {
- return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
+ return update_young_list_max_and_target_length(_analytics->predict_rs_length());
}
-uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
- uint unbounded_target_length = update_young_list_target_length(rs_lengths);
+uint G1Policy::update_young_list_max_and_target_length(size_t rs_length) {
+ uint unbounded_target_length = update_young_list_target_length(rs_length);
update_max_gc_locker_expansion();
return unbounded_target_length;
}
-uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
- YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
+uint G1Policy::update_young_list_target_length(size_t rs_length) {
+ YoungTargetLengths young_lengths = young_list_target_lengths(rs_length);
_young_list_target_length = young_lengths.first;
return young_lengths.second;
}
-G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengths) const {
+G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_length) const {
YoungTargetLengths result;
// Calculate the absolute and desired min bounds first.
@@ -256,7 +256,7 @@
if (use_adaptive_young_list_length()) {
if (collector_state()->in_young_only_phase()) {
young_list_target_length =
- calculate_young_list_target_length(rs_lengths,
+ calculate_young_list_target_length(rs_length,
base_min_length,
desired_min_length,
desired_max_length);
@@ -301,7 +301,7 @@
}
uint
-G1Policy::calculate_young_list_target_length(size_t rs_lengths,
+G1Policy::calculate_young_list_target_length(size_t rs_length,
uint base_min_length,
uint desired_min_length,
uint desired_max_length) const {
@@ -326,8 +326,8 @@
const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
const size_t pending_cards = _analytics->predict_pending_cards();
- const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
- const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, true /* for_young_gc */);
+ const size_t adj_rs_length = rs_length + _analytics->predict_rs_length_diff();
+ const size_t scanned_cards = _analytics->predict_card_num(adj_rs_length, true /* for_young_gc */);
const double base_time_ms =
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
survivor_regions_evac_time;
@@ -414,25 +414,25 @@
return survivor_regions_evac_time;
}
-void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
+void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
- if (rs_lengths > _rs_lengths_prediction) {
+ if (rs_length > _rs_length_prediction) {
// add 10% to avoid having to recalculate often
- size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
- update_rs_lengths_prediction(rs_lengths_prediction);
+ size_t rs_length_prediction = rs_length * 1100 / 1000;
+ update_rs_length_prediction(rs_length_prediction);
- update_young_list_max_and_target_length(rs_lengths_prediction);
+ update_young_list_max_and_target_length(rs_length_prediction);
}
}
-void G1Policy::update_rs_lengths_prediction() {
- update_rs_lengths_prediction(_analytics->predict_rs_lengths());
+void G1Policy::update_rs_length_prediction() {
+ update_rs_length_prediction(_analytics->predict_rs_length());
}
-void G1Policy::update_rs_lengths_prediction(size_t prediction) {
+void G1Policy::update_rs_length_prediction(size_t prediction) {
if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
- _rs_lengths_prediction = prediction;
+ _rs_length_prediction = prediction;
}
}
@@ -471,7 +471,7 @@
// Reset survivors SurvRateGroup.
_survivor_surv_rate_group->reset();
update_young_list_max_and_target_length();
- update_rs_lengths_prediction();
+ update_rs_length_prediction();
_bytes_allocated_in_old_since_last_gc = 0;
@@ -692,29 +692,29 @@
_analytics->report_cost_per_remset_card_ms(cost_per_remset_card_ms, this_pause_was_young_only);
}
- if (_max_rs_lengths > 0) {
+ if (_max_rs_length > 0) {
double cards_per_entry_ratio =
- (double) remset_cards_scanned / (double) _max_rs_lengths;
+ (double) remset_cards_scanned / (double) _max_rs_length;
_analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only);
}
- // This is defensive. For a while _max_rs_lengths could get
- // smaller than _recorded_rs_lengths which was causing
+ // This is defensive. For a while _max_rs_length could get
+ // smaller than _recorded_rs_length which was causing
// rs_length_diff to get very large and mess up the RSet length
// predictions. The reason was unsafe concurrent updates to the
- // _inc_cset_recorded_rs_lengths field which the code below guards
+ // _inc_cset_recorded_rs_length field which the code below guards
// against (see CR 7118202). This bug has now been fixed (see CR
// 7119027). However, I'm still worried that
- // _inc_cset_recorded_rs_lengths might still end up somewhat
+ // _inc_cset_recorded_rs_length might still end up somewhat
// inaccurate. The concurrent refinement thread calculates an
// RSet's length concurrently with other CR threads updating it
// which might cause it to calculate the length incorrectly (if,
// say, it's in mid-coarsening). So I'll leave in the defensive
// conditional below just in case.
size_t rs_length_diff = 0;
- size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
- if (_max_rs_lengths > recorded_rs_lengths) {
- rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
+ size_t recorded_rs_length = _collection_set->recorded_rs_length();
+ if (_max_rs_length > recorded_rs_length) {
+ rs_length_diff = _max_rs_length - recorded_rs_length;
}
_analytics->report_rs_length_diff((double) rs_length_diff);
@@ -745,7 +745,7 @@
// During mixed gc we do not use them for young gen sizing.
if (this_pause_was_young_only) {
_analytics->report_pending_cards((double) _pending_cards);
- _analytics->report_rs_lengths((double) _max_rs_lengths);
+ _analytics->report_rs_length((double) _max_rs_length);
}
}
@@ -757,7 +757,7 @@
_free_regions_at_end_of_collection = _g1h->num_free_regions();
- update_rs_lengths_prediction();
+ update_rs_length_prediction();
// Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
// that in this case we are not running in a "normal" operating mode.
@@ -889,7 +889,7 @@
}
double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
- size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
+ size_t rs_length = _analytics->predict_rs_length() + _analytics->predict_rs_length_diff();
size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->in_young_only_phase());
return predict_base_elapsed_time_ms(pending_cards, card_num);
}
--- a/src/hotspot/share/gc/g1/g1Policy.hpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp Wed Aug 07 10:29:07 2019 +0200
@@ -96,9 +96,9 @@
uint _free_regions_at_end_of_collection;
- size_t _max_rs_lengths;
+ size_t _max_rs_length;
- size_t _rs_lengths_prediction;
+ size_t _rs_length_prediction;
size_t _pending_cards;
@@ -132,8 +132,8 @@
hr->install_surv_rate_group(_survivor_surv_rate_group);
}
- void record_max_rs_lengths(size_t rs_lengths) {
- _max_rs_lengths = rs_lengths;
+ void record_max_rs_length(size_t rs_length) {
+ _max_rs_length = rs_length;
}
double predict_base_elapsed_time_ms(size_t pending_cards) const;
@@ -194,17 +194,17 @@
double _mark_cleanup_start_sec;
// Updates the internal young list maximum and target lengths. Returns the
- // unbounded young list target length.
+ // unbounded young list target length. If no rs_length parameter is passed,
+ // predict the RS length using the prediction model, otherwise use the
+ // given rs_length as the prediction.
uint update_young_list_max_and_target_length();
- uint update_young_list_max_and_target_length(size_t rs_lengths);
+ uint update_young_list_max_and_target_length(size_t rs_length);
// Update the young list target length either by setting it to the
// desired fixed value or by calculating it using G1's pause
- // prediction model. If no rs_lengths parameter is passed, predict
- // the RS lengths using the prediction model, otherwise use the
- // given rs_lengths as the prediction.
+ // prediction model.
// Returns the unbounded young list target length.
- uint update_young_list_target_length(size_t rs_lengths);
+ uint update_young_list_target_length(size_t rs_length);
// Calculate and return the minimum desired young list target
// length. This is the minimum desired young list length according
@@ -217,12 +217,12 @@
uint calculate_young_list_desired_max_length() const;
// Calculate and return the maximum young list target length that
- // can fit into the pause time goal. The parameters are: rs_lengths
+ // can fit into the pause time goal. The parameters are: rs_length
// represent the prediction of how large the young RSet lengths will
// be, base_min_length is the already existing number of regions in
// the young list, min_length and max_length are the desired min and
// max young list length according to the user's inputs.
- uint calculate_young_list_target_length(size_t rs_lengths,
+ uint calculate_young_list_target_length(size_t rs_length,
uint base_min_length,
uint desired_min_length,
uint desired_max_length) const;
@@ -230,10 +230,10 @@
// Result of the bounded_young_list_target_length() method, containing both the
// bounded as well as the unbounded young list target lengths in this order.
typedef Pair<uint, uint, StackObj> YoungTargetLengths;
- YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
+ YoungTargetLengths young_list_target_lengths(size_t rs_length) const;
- void update_rs_lengths_prediction();
- void update_rs_lengths_prediction(size_t prediction);
+ void update_rs_length_prediction();
+ void update_rs_length_prediction(size_t prediction);
// Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount
@@ -295,10 +295,10 @@
G1GCPhaseTimes* phase_times() const { return _phase_times; }
- // Check the current value of the young list RSet lengths and
+ // Check the current value of the young list RSet length and
// compare it against the last prediction. If the current value is
// higher, recalculate the young list target length prediction.
- void revise_young_list_target_length_if_necessary(size_t rs_lengths);
+ void revise_young_list_target_length_if_necessary(size_t rs_length);
// This should be called after the heap is resized.
void record_new_heap_size(uint new_number_of_regions);
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp Wed Aug 07 10:29:07 2019 +0200
@@ -110,7 +110,7 @@
}
while (!should_terminate()) {
- sample_young_list_rs_lengths();
+ sample_young_list_rs_length();
if (os::supports_vtime()) {
_vtime_accum = (os::elapsedVTime() - vtime_start);
@@ -132,14 +132,14 @@
class G1YoungRemSetSamplingClosure : public HeapRegionClosure {
SuspendibleThreadSetJoiner* _sts;
size_t _regions_visited;
- size_t _sampled_rs_lengths;
+ size_t _sampled_rs_length;
public:
G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) :
- HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { }
+ HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_length(0) { }
virtual bool do_heap_region(HeapRegion* r) {
size_t rs_length = r->rem_set()->occupied();
- _sampled_rs_lengths += rs_length;
+ _sampled_rs_length += rs_length;
// Update the collection set policy information for this region
G1CollectedHeap::heap()->collection_set()->update_young_region_prediction(r, rs_length);
@@ -158,10 +158,10 @@
return false;
}
- size_t sampled_rs_lengths() const { return _sampled_rs_lengths; }
+ size_t sampled_rs_length() const { return _sampled_rs_length; }
};
-void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
+void G1YoungRemSetSamplingThread::sample_young_list_rs_length() {
SuspendibleThreadSetJoiner sts;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1Policy* policy = g1h->policy();
@@ -173,7 +173,7 @@
g1cs->iterate(&cl);
if (cl.is_complete()) {
- policy->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths());
+ policy->revise_young_list_target_length_if_necessary(cl.sampled_rs_length());
}
}
}
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.hpp Tue Aug 06 15:03:48 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.hpp Wed Aug 07 10:29:07 2019 +0200
@@ -47,7 +47,7 @@
double _vtime_accum; // Accumulated virtual time.
- void sample_young_list_rs_lengths();
+ void sample_young_list_rs_length();
void run_service();
void check_for_periodic_gc();