src/hotspot/share/gc/g1/g1DefaultPolicy.cpp
changeset 49375 9453739cb5b0
parent 49374 17d4481280f1
child 49376 7cd503c499a0
equal deleted inserted replaced
49374:17d4481280f1 49375:9453739cb5b0
     1 /*
       
     2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "gc/g1/concurrentMarkThread.inline.hpp"
       
    27 #include "gc/g1/g1Analytics.hpp"
       
    28 #include "gc/g1/g1CollectedHeap.inline.hpp"
       
    29 #include "gc/g1/g1CollectionSet.hpp"
       
    30 #include "gc/g1/g1ConcurrentMark.hpp"
       
    31 #include "gc/g1/g1ConcurrentRefine.hpp"
       
    32 #include "gc/g1/g1DefaultPolicy.hpp"
       
    33 #include "gc/g1/g1HotCardCache.hpp"
       
    34 #include "gc/g1/g1IHOPControl.hpp"
       
    35 #include "gc/g1/g1GCPhaseTimes.hpp"
       
    36 #include "gc/g1/g1Policy.hpp"
       
    37 #include "gc/g1/g1SurvivorRegions.hpp"
       
    38 #include "gc/g1/g1YoungGenSizer.hpp"
       
    39 #include "gc/g1/heapRegion.inline.hpp"
       
    40 #include "gc/g1/heapRegionRemSet.hpp"
       
    41 #include "gc/shared/gcPolicyCounters.hpp"
       
    42 #include "logging/logStream.hpp"
       
    43 #include "runtime/arguments.hpp"
       
    44 #include "runtime/java.hpp"
       
    45 #include "runtime/mutexLocker.hpp"
       
    46 #include "utilities/debug.hpp"
       
    47 #include "utilities/growableArray.hpp"
       
    48 #include "utilities/pair.hpp"
       
    49 
       
    50 G1DefaultPolicy::G1DefaultPolicy(STWGCTimer* gc_timer) :
       
    51   _predictor(G1ConfidencePercent / 100.0),
       
    52   _analytics(new G1Analytics(&_predictor)),
       
    53   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
       
    54   _ihop_control(create_ihop_control(&_predictor)),
       
    55   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
       
    56   _young_list_fixed_length(0),
       
    57   _short_lived_surv_rate_group(new SurvRateGroup()),
       
    58   _survivor_surv_rate_group(new SurvRateGroup()),
       
    59   _reserve_factor((double) G1ReservePercent / 100.0),
       
    60   _reserve_regions(0),
       
    61   _rs_lengths_prediction(0),
       
    62   _bytes_allocated_in_old_since_last_gc(0),
       
    63   _initial_mark_to_mixed(),
       
    64   _collection_set(NULL),
       
    65   _g1(NULL),
       
    66   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
       
    67   _tenuring_threshold(MaxTenuringThreshold),
       
    68   _max_survivor_regions(0),
       
    69   _survivors_age_table(true),
       
    70   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { }
       
    71 
       
    72 G1DefaultPolicy::~G1DefaultPolicy() {
       
    73   delete _ihop_control;
       
    74 }
       
    75 
       
    76 G1CollectorState* G1DefaultPolicy::collector_state() const { return _g1->collector_state(); }
       
    77 
       
    78 void G1DefaultPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
       
    79   _g1 = g1h;
       
    80   _collection_set = collection_set;
       
    81 
       
    82   assert(Heap_lock->owned_by_self(), "Locking discipline.");
       
    83 
       
    84   if (!adaptive_young_list_length()) {
       
    85     _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
       
    86   }
       
    87   _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
       
    88 
       
    89   _free_regions_at_end_of_collection = _g1->num_free_regions();
       
    90 
       
    91   update_young_list_max_and_target_length();
       
    92   // We may immediately start allocating regions and placing them on the
       
    93   // collection set list. Initialize the per-collection set info
       
    94   _collection_set->start_incremental_building();
       
    95 }
       
    96 
       
    97 void G1DefaultPolicy::note_gc_start() {
       
    98   phase_times()->note_gc_start();
       
    99 }
       
   100 
       
   101 class G1YoungLengthPredictor VALUE_OBJ_CLASS_SPEC {
       
   102   const bool _during_cm;
       
   103   const double _base_time_ms;
       
   104   const double _base_free_regions;
       
   105   const double _target_pause_time_ms;
       
   106   const G1DefaultPolicy* const _policy;
       
   107 
       
   108  public:
       
   109   G1YoungLengthPredictor(bool during_cm,
       
   110                          double base_time_ms,
       
   111                          double base_free_regions,
       
   112                          double target_pause_time_ms,
       
   113                          const G1DefaultPolicy* policy) :
       
   114     _during_cm(during_cm),
       
   115     _base_time_ms(base_time_ms),
       
   116     _base_free_regions(base_free_regions),
       
   117     _target_pause_time_ms(target_pause_time_ms),
       
   118     _policy(policy) {}
       
   119 
       
   120   bool will_fit(uint young_length) const {
       
   121     if (young_length >= _base_free_regions) {
       
   122       // end condition 1: not enough space for the young regions
       
   123       return false;
       
   124     }
       
   125 
       
   126     const double accum_surv_rate = _policy->accum_yg_surv_rate_pred((int) young_length - 1);
       
   127     const size_t bytes_to_copy =
       
   128                  (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
       
   129     const double copy_time_ms =
       
   130       _policy->analytics()->predict_object_copy_time_ms(bytes_to_copy, _during_cm);
       
   131     const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length);
       
   132     const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms;
       
   133     if (pause_time_ms > _target_pause_time_ms) {
       
   134       // end condition 2: prediction is over the target pause time
       
   135       return false;
       
   136     }
       
   137 
       
   138     const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
       
   139 
       
   140     // When copying, we will likely need more bytes free than is live in the region.
       
   141     // Add some safety margin to factor in the confidence of our guess, and the
       
   142     // natural expected waste.
       
   143     // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
       
   144     // of the calculation: the lower the confidence, the more headroom.
       
   145     // (100 + TargetPLABWastePct) represents the increase in expected bytes during
       
   146     // copying due to anticipated waste in the PLABs.
       
   147     const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
       
   148     const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
       
   149 
       
   150     if (expected_bytes_to_copy > free_bytes) {
       
   151       // end condition 3: out-of-space
       
   152       return false;
       
   153     }
       
   154 
       
   155     // success!
       
   156     return true;
       
   157   }
       
   158 };
       
   159 
       
   160 void G1DefaultPolicy::record_new_heap_size(uint new_number_of_regions) {
       
   161   // re-calculate the necessary reserve
       
   162   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
       
   163   // We use ceiling so that if reserve_regions_d is > 0.0 (but
       
   164   // smaller than 1.0) we'll get 1.
       
   165   _reserve_regions = (uint) ceil(reserve_regions_d);
       
   166 
       
   167   _young_gen_sizer.heap_size_changed(new_number_of_regions);
       
   168 
       
   169   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
       
   170 }
       
   171 
       
   172 uint G1DefaultPolicy::calculate_young_list_desired_min_length(uint base_min_length) const {
       
   173   uint desired_min_length = 0;
       
   174   if (adaptive_young_list_length()) {
       
   175     if (_analytics->num_alloc_rate_ms() > 3) {
       
   176       double now_sec = os::elapsedTime();
       
   177       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
       
   178       double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
       
   179       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
       
   180     } else {
       
   181       // otherwise we don't have enough info to make the prediction
       
   182     }
       
   183   }
       
   184   desired_min_length += base_min_length;
       
   185   // make sure we don't go below any user-defined minimum bound
       
   186   return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
       
   187 }
       
   188 
       
   189 uint G1DefaultPolicy::calculate_young_list_desired_max_length() const {
       
   190   // Here, we might want to also take into account any additional
       
   191   // constraints (i.e., user-defined minimum bound). Currently, we
       
   192   // effectively don't set this bound.
       
   193   return _young_gen_sizer.max_desired_young_length();
       
   194 }
       
   195 
       
   196 uint G1DefaultPolicy::update_young_list_max_and_target_length() {
       
   197   return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
       
   198 }
       
   199 
       
   200 uint G1DefaultPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
       
   201   uint unbounded_target_length = update_young_list_target_length(rs_lengths);
       
   202   update_max_gc_locker_expansion();
       
   203   return unbounded_target_length;
       
   204 }
       
   205 
       
   206 uint G1DefaultPolicy::update_young_list_target_length(size_t rs_lengths) {
       
   207   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
       
   208   _young_list_target_length = young_lengths.first;
       
   209   return young_lengths.second;
       
   210 }
       
   211 
       
   212 G1DefaultPolicy::YoungTargetLengths G1DefaultPolicy::young_list_target_lengths(size_t rs_lengths) const {
       
   213   YoungTargetLengths result;
       
   214 
       
   215   // Calculate the absolute and desired min bounds first.
       
   216 
       
   217   // This is how many young regions we already have (currently: the survivors).
       
   218   const uint base_min_length = _g1->survivor_regions_count();
       
   219   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
       
   220   // This is the absolute minimum young length. Ensure that we
       
   221   // will at least have one eden region available for allocation.
       
   222   uint absolute_min_length = base_min_length + MAX2(_g1->eden_regions_count(), (uint)1);
       
   223   // If we shrank the young list target it should not shrink below the current size.
       
   224   desired_min_length = MAX2(desired_min_length, absolute_min_length);
       
   225   // Calculate the absolute and desired max bounds.
       
   226 
       
   227   uint desired_max_length = calculate_young_list_desired_max_length();
       
   228 
       
   229   uint young_list_target_length = 0;
       
   230   if (adaptive_young_list_length()) {
       
   231     if (collector_state()->gcs_are_young()) {
       
   232       young_list_target_length =
       
   233                         calculate_young_list_target_length(rs_lengths,
       
   234                                                            base_min_length,
       
   235                                                            desired_min_length,
       
   236                                                            desired_max_length);
       
   237     } else {
       
   238       // Don't calculate anything and let the code below bound it to
       
   239       // the desired_min_length, i.e., do the next GC as soon as
       
   240       // possible to maximize how many old regions we can add to it.
       
   241     }
       
   242   } else {
       
   243     // The user asked for a fixed young gen so we'll fix the young gen
       
   244     // whether the next GC is young or mixed.
       
   245     young_list_target_length = _young_list_fixed_length;
       
   246   }
       
   247 
       
   248   result.second = young_list_target_length;
       
   249 
       
   250   // We will try our best not to "eat" into the reserve.
       
   251   uint absolute_max_length = 0;
       
   252   if (_free_regions_at_end_of_collection > _reserve_regions) {
       
   253     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
       
   254   }
       
   255   if (desired_max_length > absolute_max_length) {
       
   256     desired_max_length = absolute_max_length;
       
   257   }
       
   258 
       
   259   // Make sure we don't go over the desired max length, nor under the
       
   260   // desired min length. In case they clash, desired_min_length wins
       
   261   // which is why that test is second.
       
   262   if (young_list_target_length > desired_max_length) {
       
   263     young_list_target_length = desired_max_length;
       
   264   }
       
   265   if (young_list_target_length < desired_min_length) {
       
   266     young_list_target_length = desired_min_length;
       
   267   }
       
   268 
       
   269   assert(young_list_target_length > base_min_length,
       
   270          "we should be able to allocate at least one eden region");
       
   271   assert(young_list_target_length >= absolute_min_length, "post-condition");
       
   272 
       
   273   result.first = young_list_target_length;
       
   274   return result;
       
   275 }
       
   276 
       
   277 uint
       
   278 G1DefaultPolicy::calculate_young_list_target_length(size_t rs_lengths,
       
   279                                                     uint base_min_length,
       
   280                                                     uint desired_min_length,
       
   281                                                     uint desired_max_length) const {
       
   282   assert(adaptive_young_list_length(), "pre-condition");
       
   283   assert(collector_state()->gcs_are_young(), "only call this for young GCs");
       
   284 
       
   285   // In case some edge-condition makes the desired max length too small...
       
   286   if (desired_max_length <= desired_min_length) {
       
   287     return desired_min_length;
       
   288   }
       
   289 
       
   290   // We'll adjust min_young_length and max_young_length not to include
       
   291   // the already allocated young regions (i.e., so they reflect the
       
   292   // min and max eden regions we'll allocate). The base_min_length
       
   293   // will be reflected in the predictions by the
       
   294   // survivor_regions_evac_time prediction.
       
   295   assert(desired_min_length > base_min_length, "invariant");
       
   296   uint min_young_length = desired_min_length - base_min_length;
       
   297   assert(desired_max_length > base_min_length, "invariant");
       
   298   uint max_young_length = desired_max_length - base_min_length;
       
   299 
       
   300   const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
       
   301   const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
       
   302   const size_t pending_cards = _analytics->predict_pending_cards();
       
   303   const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
       
   304   const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
       
   305   const double base_time_ms =
       
   306     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
       
   307     survivor_regions_evac_time;
       
   308   const uint available_free_regions = _free_regions_at_end_of_collection;
       
   309   const uint base_free_regions =
       
   310     available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
       
   311 
       
   312   // Here, we will make sure that the shortest young length that
       
   313   // makes sense fits within the target pause time.
       
   314 
       
   315   G1YoungLengthPredictor p(collector_state()->during_concurrent_mark(),
       
   316                            base_time_ms,
       
   317                            base_free_regions,
       
   318                            target_pause_time_ms,
       
   319                            this);
       
   320   if (p.will_fit(min_young_length)) {
       
   321     // The shortest young length will fit into the target pause time;
       
   322     // we'll now check whether the absolute maximum number of young
       
   323     // regions will fit in the target pause time. If not, we'll do
       
   324     // a binary search between min_young_length and max_young_length.
       
   325     if (p.will_fit(max_young_length)) {
       
   326       // The maximum young length will fit into the target pause time.
       
   327       // We are done so set min young length to the maximum length (as
       
   328       // the result is assumed to be returned in min_young_length).
       
   329       min_young_length = max_young_length;
       
   330     } else {
       
   331       // The maximum possible number of young regions will not fit within
       
   332       // the target pause time so we'll search for the optimal
       
   333       // length. The loop invariants are:
       
   334       //
       
   335       // min_young_length < max_young_length
       
   336       // min_young_length is known to fit into the target pause time
       
   337       // max_young_length is known not to fit into the target pause time
       
   338       //
       
   339       // Going into the loop we know the above hold as we've just
       
   340       // checked them. Every time around the loop we check whether
       
   341       // the middle value between min_young_length and
       
   342       // max_young_length fits into the target pause time. If it
       
   343       // does, it becomes the new min. If it doesn't, it becomes
       
   344       // the new max. This way we maintain the loop invariants.
       
   345 
       
   346       assert(min_young_length < max_young_length, "invariant");
       
   347       uint diff = (max_young_length - min_young_length) / 2;
       
   348       while (diff > 0) {
       
   349         uint young_length = min_young_length + diff;
       
   350         if (p.will_fit(young_length)) {
       
   351           min_young_length = young_length;
       
   352         } else {
       
   353           max_young_length = young_length;
       
   354         }
       
   355         assert(min_young_length <  max_young_length, "invariant");
       
   356         diff = (max_young_length - min_young_length) / 2;
       
   357       }
       
   358       // The results is min_young_length which, according to the
       
   359       // loop invariants, should fit within the target pause time.
       
   360 
       
   361       // These are the post-conditions of the binary search above:
       
   362       assert(min_young_length < max_young_length,
       
   363              "otherwise we should have discovered that max_young_length "
       
   364              "fits into the pause target and not done the binary search");
       
   365       assert(p.will_fit(min_young_length),
       
   366              "min_young_length, the result of the binary search, should "
       
   367              "fit into the pause target");
       
   368       assert(!p.will_fit(min_young_length + 1),
       
   369              "min_young_length, the result of the binary search, should be "
       
   370              "optimal, so no larger length should fit into the pause target");
       
   371     }
       
   372   } else {
       
   373     // Even the minimum length doesn't fit into the pause time
       
   374     // target, return it as the result nevertheless.
       
   375   }
       
   376   return base_min_length + min_young_length;
       
   377 }
       
   378 
       
   379 double G1DefaultPolicy::predict_survivor_regions_evac_time() const {
       
   380   double survivor_regions_evac_time = 0.0;
       
   381   const GrowableArray<HeapRegion*>* survivor_regions = _g1->survivor()->regions();
       
   382 
       
   383   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
       
   384        it != survivor_regions->end();
       
   385        ++it) {
       
   386     survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->gcs_are_young());
       
   387   }
       
   388   return survivor_regions_evac_time;
       
   389 }
       
   390 
       
   391 void G1DefaultPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
       
   392   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
       
   393 
       
   394   if (rs_lengths > _rs_lengths_prediction) {
       
   395     // add 10% to avoid having to recalculate often
       
   396     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
       
   397     update_rs_lengths_prediction(rs_lengths_prediction);
       
   398 
       
   399     update_young_list_max_and_target_length(rs_lengths_prediction);
       
   400   }
       
   401 }
       
   402 
       
   403 void G1DefaultPolicy::update_rs_lengths_prediction() {
       
   404   update_rs_lengths_prediction(_analytics->predict_rs_lengths());
       
   405 }
       
   406 
       
   407 void G1DefaultPolicy::update_rs_lengths_prediction(size_t prediction) {
       
   408   if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
       
   409     _rs_lengths_prediction = prediction;
       
   410   }
       
   411 }
       
   412 
       
   413 void G1DefaultPolicy::record_full_collection_start() {
       
   414   _full_collection_start_sec = os::elapsedTime();
       
   415   // Release the future to-space so that it is available for compaction into.
       
   416   collector_state()->set_full_collection(true);
       
   417 }
       
   418 
       
   419 void G1DefaultPolicy::record_full_collection_end() {
       
   420   // Consider this like a collection pause for the purposes of allocation
       
   421   // since last pause.
       
   422   double end_sec = os::elapsedTime();
       
   423   double full_gc_time_sec = end_sec - _full_collection_start_sec;
       
   424   double full_gc_time_ms = full_gc_time_sec * 1000.0;
       
   425 
       
   426   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
       
   427 
       
   428   collector_state()->set_full_collection(false);
       
   429 
       
   430   // "Nuke" the heuristics that control the young/mixed GC
       
   431   // transitions and make sure we start with young GCs after the Full GC.
       
   432   collector_state()->set_gcs_are_young(true);
       
   433   collector_state()->set_last_young_gc(false);
       
   434   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
       
   435   collector_state()->set_during_initial_mark_pause(false);
       
   436   collector_state()->set_in_marking_window(false);
       
   437   collector_state()->set_in_marking_window_im(false);
       
   438 
       
   439   _short_lived_surv_rate_group->start_adding_regions();
       
   440   // also call this on any additional surv rate groups
       
   441 
       
   442   _free_regions_at_end_of_collection = _g1->num_free_regions();
       
   443   // Reset survivors SurvRateGroup.
       
   444   _survivor_surv_rate_group->reset();
       
   445   update_young_list_max_and_target_length();
       
   446   update_rs_lengths_prediction();
       
   447   cset_chooser()->clear();
       
   448 
       
   449   _bytes_allocated_in_old_since_last_gc = 0;
       
   450 
       
   451   record_pause(FullGC, _full_collection_start_sec, end_sec);
       
   452 }
       
   453 
       
   454 void G1DefaultPolicy::record_collection_pause_start(double start_time_sec) {
       
   455   // We only need to do this here as the policy will only be applied
       
   456   // to the GC we're about to start. so, no point is calculating this
       
   457   // every time we calculate / recalculate the target young length.
       
   458   update_survivors_policy();
       
   459 
       
   460   assert(_g1->used() == _g1->recalculate_used(),
       
   461          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
       
   462          _g1->used(), _g1->recalculate_used());
       
   463 
       
   464   phase_times()->record_cur_collection_start_sec(start_time_sec);
       
   465   _pending_cards = _g1->pending_card_num();
       
   466 
       
   467   _collection_set->reset_bytes_used_before();
       
   468   _bytes_copied_during_gc = 0;
       
   469 
       
   470   collector_state()->set_last_gc_was_young(false);
       
   471 
       
   472   // do that for any other surv rate groups
       
   473   _short_lived_surv_rate_group->stop_adding_regions();
       
   474   _survivors_age_table.clear();
       
   475 
       
   476   assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
       
   477 }
       
   478 
       
   479 void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
       
   480   collector_state()->set_during_marking(true);
       
   481   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
       
   482   collector_state()->set_during_initial_mark_pause(false);
       
   483 }
       
   484 
       
   485 void G1DefaultPolicy::record_concurrent_mark_remark_start() {
       
   486   _mark_remark_start_sec = os::elapsedTime();
       
   487   collector_state()->set_during_marking(false);
       
   488 }
       
   489 
       
   490 void G1DefaultPolicy::record_concurrent_mark_remark_end() {
       
   491   double end_time_sec = os::elapsedTime();
       
   492   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
       
   493   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
       
   494   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
       
   495 
       
   496   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
       
   497 }
       
   498 
       
   499 void G1DefaultPolicy::record_concurrent_mark_cleanup_start() {
       
   500   _mark_cleanup_start_sec = os::elapsedTime();
       
   501 }
       
   502 
       
   503 void G1DefaultPolicy::record_concurrent_mark_cleanup_completed() {
       
   504   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
       
   505                                                               "skip last young-only gc");
       
   506   collector_state()->set_last_young_gc(should_continue_with_reclaim);
       
   507   // We skip the marking phase.
       
   508   if (!should_continue_with_reclaim) {
       
   509     abort_time_to_mixed_tracking();
       
   510   }
       
   511   collector_state()->set_in_marking_window(false);
       
   512 }
       
   513 
       
   514 double G1DefaultPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
       
   515   return phase_times()->average_time_ms(phase);
       
   516 }
       
   517 
       
   518 double G1DefaultPolicy::young_other_time_ms() const {
       
   519   return phase_times()->young_cset_choice_time_ms() +
       
   520          phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
       
   521 }
       
   522 
       
   523 double G1DefaultPolicy::non_young_other_time_ms() const {
       
   524   return phase_times()->non_young_cset_choice_time_ms() +
       
   525          phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
       
   526 }
       
   527 
       
   528 double G1DefaultPolicy::other_time_ms(double pause_time_ms) const {
       
   529   return pause_time_ms - phase_times()->cur_collection_par_time_ms();
       
   530 }
       
   531 
       
   532 double G1DefaultPolicy::constant_other_time_ms(double pause_time_ms) const {
       
   533   return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
       
   534 }
       
   535 
       
   536 CollectionSetChooser* G1DefaultPolicy::cset_chooser() const {
       
   537   return _collection_set->cset_chooser();
       
   538 }
       
   539 
       
   540 bool G1DefaultPolicy::about_to_start_mixed_phase() const {
       
   541   return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
       
   542 }
       
   543 
       
   544 bool G1DefaultPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
       
   545   if (about_to_start_mixed_phase()) {
       
   546     return false;
       
   547   }
       
   548 
       
   549   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
       
   550 
       
   551   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
       
   552   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
       
   553   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
       
   554 
       
   555   bool result = false;
       
   556   if (marking_request_bytes > marking_initiating_used_threshold) {
       
   557     result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
       
   558     log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
       
   559                               result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
       
   560                               cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
       
   561   }
       
   562 
       
   563   return result;
       
   564 }
       
   565 
       
   566 // Anything below that is considered to be zero
       
   567 #define MIN_TIMER_GRANULARITY 0.0000001
       
   568 
       
   569 void G1DefaultPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
       
   570   double end_time_sec = os::elapsedTime();
       
   571 
       
   572   size_t cur_used_bytes = _g1->used();
       
   573   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
       
   574   bool last_pause_included_initial_mark = false;
       
   575   bool update_stats = !_g1->evacuation_failed();
       
   576 
       
   577   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
       
   578 
       
   579   _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
       
   580 
       
   581   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
       
   582   if (last_pause_included_initial_mark) {
       
   583     record_concurrent_mark_init_end(0.0);
       
   584   } else {
       
   585     maybe_start_marking();
       
   586   }
       
   587 
       
   588   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
       
   589   if (app_time_ms < MIN_TIMER_GRANULARITY) {
       
   590     // This usually happens due to the timer not having the required
       
   591     // granularity. Some Linuxes are the usual culprits.
       
   592     // We'll just set it to something (arbitrarily) small.
       
   593     app_time_ms = 1.0;
       
   594   }
       
   595 
       
   596   if (update_stats) {
       
   597     // We maintain the invariant that all objects allocated by mutator
       
   598     // threads will be allocated out of eden regions. So, we can use
       
   599     // the eden region number allocated since the previous GC to
       
   600     // calculate the application's allocate rate. The only exception
       
   601     // to that is humongous objects that are allocated separately. But
       
   602     // given that humongous object allocations do not really affect
       
   603     // either the pause's duration nor when the next pause will take
       
   604     // place we can safely ignore them here.
       
   605     uint regions_allocated = _collection_set->eden_region_length();
       
   606     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
       
   607     _analytics->report_alloc_rate_ms(alloc_rate_ms);
       
   608 
       
   609     double interval_ms =
       
   610       (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
       
   611     _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
       
   612     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
       
   613   }
       
   614 
       
   615   bool new_in_marking_window = collector_state()->in_marking_window();
       
   616   bool new_in_marking_window_im = false;
       
   617   if (last_pause_included_initial_mark) {
       
   618     new_in_marking_window = true;
       
   619     new_in_marking_window_im = true;
       
   620   }
       
   621 
       
   622   if (collector_state()->last_young_gc()) {
       
   623     // This is supposed to to be the "last young GC" before we start
       
   624     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
       
   625     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
       
   626 
       
   627     if (next_gc_should_be_mixed("start mixed GCs",
       
   628                                 "do not start mixed GCs")) {
       
   629       collector_state()->set_gcs_are_young(false);
       
   630     } else {
       
   631       // We aborted the mixed GC phase early.
       
   632       abort_time_to_mixed_tracking();
       
   633     }
       
   634 
       
   635     collector_state()->set_last_young_gc(false);
       
   636   }
       
   637 
       
   638   if (!collector_state()->last_gc_was_young()) {
       
   639     // This is a mixed GC. Here we decide whether to continue doing
       
   640     // mixed GCs or not.
       
   641     if (!next_gc_should_be_mixed("continue mixed GCs",
       
   642                                  "do not continue mixed GCs")) {
       
   643       collector_state()->set_gcs_are_young(true);
       
   644 
       
   645       maybe_start_marking();
       
   646     }
       
   647   }
       
   648 
       
   649   _short_lived_surv_rate_group->start_adding_regions();
       
   650   // Do that for any other surv rate groups
       
   651 
       
   652   double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
       
   653 
       
   654   if (update_stats) {
       
   655     double cost_per_card_ms = 0.0;
       
   656     if (_pending_cards > 0) {
       
   657       cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
       
   658       _analytics->report_cost_per_card_ms(cost_per_card_ms);
       
   659     }
       
   660     _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
       
   661 
       
   662     double cost_per_entry_ms = 0.0;
       
   663     if (cards_scanned > 10) {
       
   664       cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
       
   665       _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
       
   666     }
       
   667 
       
   668     if (_max_rs_lengths > 0) {
       
   669       double cards_per_entry_ratio =
       
   670         (double) cards_scanned / (double) _max_rs_lengths;
       
   671       _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
       
   672     }
       
   673 
       
   674     // This is defensive. For a while _max_rs_lengths could get
       
   675     // smaller than _recorded_rs_lengths which was causing
       
   676     // rs_length_diff to get very large and mess up the RSet length
       
   677     // predictions. The reason was unsafe concurrent updates to the
       
   678     // _inc_cset_recorded_rs_lengths field which the code below guards
       
   679     // against (see CR 7118202). This bug has now been fixed (see CR
       
   680     // 7119027). However, I'm still worried that
       
   681     // _inc_cset_recorded_rs_lengths might still end up somewhat
       
   682     // inaccurate. The concurrent refinement thread calculates an
       
   683     // RSet's length concurrently with other CR threads updating it
       
   684     // which might cause it to calculate the length incorrectly (if,
       
   685     // say, it's in mid-coarsening). So I'll leave in the defensive
       
   686     // conditional below just in case.
       
   687     size_t rs_length_diff = 0;
       
   688     size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
       
   689     if (_max_rs_lengths > recorded_rs_lengths) {
       
   690       rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
       
   691     }
       
   692     _analytics->report_rs_length_diff((double) rs_length_diff);
       
   693 
       
   694     size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
       
   695     size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
       
   696     double cost_per_byte_ms = 0.0;
       
   697 
       
   698     if (copied_bytes > 0) {
       
   699       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
       
   700       _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
       
   701     }
       
   702 
       
   703     if (_collection_set->young_region_length() > 0) {
       
   704       _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
       
   705                                                         _collection_set->young_region_length());
       
   706     }
       
   707 
       
   708     if (_collection_set->old_region_length() > 0) {
       
   709       _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
       
   710                                                             _collection_set->old_region_length());
       
   711     }
       
   712 
       
   713     _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
       
   714 
       
   715     _analytics->report_pending_cards((double) _pending_cards);
       
   716     _analytics->report_rs_lengths((double) _max_rs_lengths);
       
   717   }
       
   718 
       
   719   collector_state()->set_in_marking_window(new_in_marking_window);
       
   720   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
       
   721   _free_regions_at_end_of_collection = _g1->num_free_regions();
       
   722   // IHOP control wants to know the expected young gen length if it were not
       
   723   // restrained by the heap reserve. Using the actual length would make the
       
   724   // prediction too small and the limit the young gen every time we get to the
       
   725   // predicted target occupancy.
       
   726   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
       
   727   update_rs_lengths_prediction();
       
   728 
       
   729   update_ihop_prediction(app_time_ms / 1000.0,
       
   730                          _bytes_allocated_in_old_since_last_gc,
       
   731                          last_unrestrained_young_length * HeapRegion::GrainBytes);
       
   732   _bytes_allocated_in_old_since_last_gc = 0;
       
   733 
       
   734   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
       
   735 
       
   736   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
       
   737   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
       
   738 
       
   739   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
       
   740     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
       
   741                                 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
       
   742                                 update_rs_time_goal_ms, scan_hcc_time_ms);
       
   743 
       
   744     update_rs_time_goal_ms = 0;
       
   745   } else {
       
   746     update_rs_time_goal_ms -= scan_hcc_time_ms;
       
   747   }
       
   748   _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
       
   749                                       phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
       
   750                                       update_rs_time_goal_ms);
       
   751 
       
   752   cset_chooser()->verify();
       
   753 }
       
   754 
       
   755 G1IHOPControl* G1DefaultPolicy::create_ihop_control(const G1Predictions* predictor){
       
   756   if (G1UseAdaptiveIHOP) {
       
   757     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
       
   758                                      predictor,
       
   759                                      G1ReservePercent,
       
   760                                      G1HeapWastePercent);
       
   761   } else {
       
   762     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
       
   763   }
       
   764 }
       
   765 
       
   766 void G1DefaultPolicy::update_ihop_prediction(double mutator_time_s,
       
   767                                       size_t mutator_alloc_bytes,
       
   768                                       size_t young_gen_size) {
       
   769   // Always try to update IHOP prediction. Even evacuation failures give information
       
   770   // about e.g. whether to start IHOP earlier next time.
       
   771 
       
   772   // Avoid using really small application times that might create samples with
       
   773   // very high or very low values. They may be caused by e.g. back-to-back gcs.
       
   774   double const min_valid_time = 1e-6;
       
   775 
       
   776   bool report = false;
       
   777 
       
   778   double marking_to_mixed_time = -1.0;
       
   779   if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
       
   780     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
       
   781     assert(marking_to_mixed_time > 0.0,
       
   782            "Initial mark to mixed time must be larger than zero but is %.3f",
       
   783            marking_to_mixed_time);
       
   784     if (marking_to_mixed_time > min_valid_time) {
       
   785       _ihop_control->update_marking_length(marking_to_mixed_time);
       
   786       report = true;
       
   787     }
       
   788   }
       
   789 
       
   790   // As an approximation for the young gc promotion rates during marking we use
       
   791   // all of them. In many applications there are only a few if any young gcs during
       
   792   // marking, which makes any prediction useless. This increases the accuracy of the
       
   793   // prediction.
       
   794   if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
       
   795     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
       
   796     report = true;
       
   797   }
       
   798 
       
   799   if (report) {
       
   800     report_ihop_statistics();
       
   801   }
       
   802 }
       
   803 
       
   804 void G1DefaultPolicy::report_ihop_statistics() {
       
   805   _ihop_control->print();
       
   806 }
       
   807 
       
   808 void G1DefaultPolicy::print_phases() {
       
   809   phase_times()->print();
       
   810 }
       
   811 
       
   812 double G1DefaultPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
       
   813   TruncatedSeq* seq = surv_rate_group->get_seq(age);
       
   814   guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
       
   815   double pred = _predictor.get_new_prediction(seq);
       
   816   if (pred > 1.0) {
       
   817     pred = 1.0;
       
   818   }
       
   819   return pred;
       
   820 }
       
   821 
       
   822 double G1DefaultPolicy::accum_yg_surv_rate_pred(int age) const {
       
   823   return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
       
   824 }
       
   825 
       
   826 double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
       
   827                                               size_t scanned_cards) const {
       
   828   return
       
   829     _analytics->predict_rs_update_time_ms(pending_cards) +
       
   830     _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
       
   831     _analytics->predict_constant_other_time_ms();
       
   832 }
       
   833 
       
   834 double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
       
   835   size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
       
   836   size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
       
   837   return predict_base_elapsed_time_ms(pending_cards, card_num);
       
   838 }
       
   839 
       
   840 size_t G1DefaultPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
       
   841   size_t bytes_to_copy;
       
   842   if (hr->is_marked())
       
   843     bytes_to_copy = hr->max_live_bytes();
       
   844   else {
       
   845     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
       
   846     int age = hr->age_in_surv_rate_group();
       
   847     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
       
   848     bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
       
   849   }
       
   850   return bytes_to_copy;
       
   851 }
       
   852 
       
   853 double G1DefaultPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
       
   854                                                 bool for_young_gc) const {
       
   855   size_t rs_length = hr->rem_set()->occupied();
       
   856   // Predicting the number of cards is based on which type of GC
       
   857   // we're predicting for.
       
   858   size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
       
   859   size_t bytes_to_copy = predict_bytes_to_copy(hr);
       
   860 
       
   861   double region_elapsed_time_ms =
       
   862     _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
       
   863     _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
       
   864 
       
   865   // The prediction of the "other" time for this region is based
       
   866   // upon the region type and NOT the GC type.
       
   867   if (hr->is_young()) {
       
   868     region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
       
   869   } else {
       
   870     region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
       
   871   }
       
   872   return region_elapsed_time_ms;
       
   873 }
       
   874 
       
   875 bool G1DefaultPolicy::should_allocate_mutator_region() const {
       
   876   uint young_list_length = _g1->young_regions_count();
       
   877   uint young_list_target_length = _young_list_target_length;
       
   878   return young_list_length < young_list_target_length;
       
   879 }
       
   880 
       
   881 bool G1DefaultPolicy::can_expand_young_list() const {
       
   882   uint young_list_length = _g1->young_regions_count();
       
   883   uint young_list_max_length = _young_list_max_length;
       
   884   return young_list_length < young_list_max_length;
       
   885 }
       
   886 
       
   887 bool G1DefaultPolicy::adaptive_young_list_length() const {
       
   888   return _young_gen_sizer.adaptive_young_list_length();
       
   889 }
       
   890 
       
   891 size_t G1DefaultPolicy::desired_survivor_size() const {
       
   892   size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
       
   893   return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
       
   894 }
       
   895 
       
   896 void G1DefaultPolicy::print_age_table() {
       
   897   _survivors_age_table.print_age_table(_tenuring_threshold);
       
   898 }
       
   899 
       
   900 void G1DefaultPolicy::update_max_gc_locker_expansion() {
       
   901   uint expansion_region_num = 0;
       
   902   if (GCLockerEdenExpansionPercent > 0) {
       
   903     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
       
   904     double expansion_region_num_d = perc * (double) _young_list_target_length;
       
   905     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
       
   906     // less than 1.0) we'll get 1.
       
   907     expansion_region_num = (uint) ceil(expansion_region_num_d);
       
   908   } else {
       
   909     assert(expansion_region_num == 0, "sanity");
       
   910   }
       
   911   _young_list_max_length = _young_list_target_length + expansion_region_num;
       
   912   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
       
   913 }
       
   914 
       
   915 // Calculates survivor space parameters.
       
   916 void G1DefaultPolicy::update_survivors_policy() {
       
   917   double max_survivor_regions_d =
       
   918                  (double) _young_list_target_length / (double) SurvivorRatio;
       
   919   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
       
   920   // smaller than 1.0) we'll get 1.
       
   921   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
       
   922 
       
   923   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
       
   924   if (UsePerfData) {
       
   925     _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
       
   926     _policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
       
   927   }
       
   928 }
       
   929 
       
   930 bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
       
   931   // We actually check whether we are marking here and not if we are in a
       
   932   // reclamation phase. This means that we will schedule a concurrent mark
       
   933   // even while we are still in the process of reclaiming memory.
       
   934   bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle();
       
   935   if (!during_cycle) {
       
   936     log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
       
   937     collector_state()->set_initiate_conc_mark_if_possible(true);
       
   938     return true;
       
   939   } else {
       
   940     log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
       
   941     return false;
       
   942   }
       
   943 }
       
   944 
       
   945 void G1DefaultPolicy::initiate_conc_mark() {
       
   946   collector_state()->set_during_initial_mark_pause(true);
       
   947   collector_state()->set_initiate_conc_mark_if_possible(false);
       
   948 }
       
   949 
       
   950 void G1DefaultPolicy::decide_on_conc_mark_initiation() {
       
   951   // We are about to decide on whether this pause will be an
       
   952   // initial-mark pause.
       
   953 
       
   954   // First, collector_state()->during_initial_mark_pause() should not be already set. We
       
   955   // will set it here if we have to. However, it should be cleared by
       
   956   // the end of the pause (it's only set for the duration of an
       
   957   // initial-mark pause).
       
   958   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
       
   959 
       
   960   if (collector_state()->initiate_conc_mark_if_possible()) {
       
   961     // We had noticed on a previous pause that the heap occupancy has
       
   962     // gone over the initiating threshold and we should start a
       
   963     // concurrent marking cycle. So we might initiate one.
       
   964 
       
   965     if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
       
   966       // Initiate a new initial mark if there is no marking or reclamation going on.
       
   967       initiate_conc_mark();
       
   968       log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
       
   969     } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
       
   970       // Initiate a user requested initial mark. An initial mark must be young only
       
   971       // GC, so the collector state must be updated to reflect this.
       
   972       collector_state()->set_gcs_are_young(true);
       
   973       collector_state()->set_last_young_gc(false);
       
   974 
       
   975       abort_time_to_mixed_tracking();
       
   976       initiate_conc_mark();
       
   977       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
       
   978     } else {
       
   979       // The concurrent marking thread is still finishing up the
       
   980       // previous cycle. If we start one right now the two cycles
       
   981       // overlap. In particular, the concurrent marking thread might
       
   982       // be in the process of clearing the next marking bitmap (which
       
   983       // we will use for the next cycle if we start one). Starting a
       
   984       // cycle now will be bad given that parts of the marking
       
   985       // information might get cleared by the marking thread. And we
       
   986       // cannot wait for the marking thread to finish the cycle as it
       
   987       // periodically yields while clearing the next marking bitmap
       
   988       // and, if it's in a yield point, it's waiting for us to
       
   989       // finish. So, at this point we will not start a cycle and we'll
       
   990       // let the concurrent marking thread complete the last one.
       
   991       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
       
   992     }
       
   993   }
       
   994 }
       
   995 
       
   996 void G1DefaultPolicy::record_concurrent_mark_cleanup_end() {
       
   997   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
       
   998 
       
   999   double end_sec = os::elapsedTime();
       
  1000   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
       
  1001   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
       
  1002   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
       
  1003 
       
  1004   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
       
  1005 }
       
  1006 
       
  1007 double G1DefaultPolicy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
       
  1008   return percent_of(reclaimable_bytes, _g1->capacity());
       
  1009 }
       
  1010 
       
  1011 void G1DefaultPolicy::maybe_start_marking() {
       
  1012   if (need_to_start_conc_mark("end of GC")) {
       
  1013     // Note: this might have already been set, if during the last
       
  1014     // pause we decided to start a cycle but at the beginning of
       
  1015     // this pause we decided to postpone it. That's OK.
       
  1016     collector_state()->set_initiate_conc_mark_if_possible(true);
       
  1017   }
       
  1018 }
       
  1019 
       
  1020 G1DefaultPolicy::PauseKind G1DefaultPolicy::young_gc_pause_kind() const {
       
  1021   assert(!collector_state()->full_collection(), "must be");
       
  1022   if (collector_state()->during_initial_mark_pause()) {
       
  1023     assert(collector_state()->last_gc_was_young(), "must be");
       
  1024     assert(!collector_state()->last_young_gc(), "must be");
       
  1025     return InitialMarkGC;
       
  1026   } else if (collector_state()->last_young_gc()) {
       
  1027     assert(!collector_state()->during_initial_mark_pause(), "must be");
       
  1028     assert(collector_state()->last_gc_was_young(), "must be");
       
  1029     return LastYoungGC;
       
  1030   } else if (!collector_state()->last_gc_was_young()) {
       
  1031     assert(!collector_state()->during_initial_mark_pause(), "must be");
       
  1032     assert(!collector_state()->last_young_gc(), "must be");
       
  1033     return MixedGC;
       
  1034   } else {
       
  1035     assert(collector_state()->last_gc_was_young(), "must be");
       
  1036     assert(!collector_state()->during_initial_mark_pause(), "must be");
       
  1037     assert(!collector_state()->last_young_gc(), "must be");
       
  1038     return YoungOnlyGC;
       
  1039   }
       
  1040 }
       
  1041 
       
  1042 void G1DefaultPolicy::record_pause(PauseKind kind, double start, double end) {
       
  1043   // Manage the MMU tracker. For some reason it ignores Full GCs.
       
  1044   if (kind != FullGC) {
       
  1045     _mmu_tracker->add_pause(start, end);
       
  1046   }
       
  1047   // Manage the mutator time tracking from initial mark to first mixed gc.
       
  1048   switch (kind) {
       
  1049     case FullGC:
       
  1050       abort_time_to_mixed_tracking();
       
  1051       break;
       
  1052     case Cleanup:
       
  1053     case Remark:
       
  1054     case YoungOnlyGC:
       
  1055     case LastYoungGC:
       
  1056       _initial_mark_to_mixed.add_pause(end - start);
       
  1057       break;
       
  1058     case InitialMarkGC:
       
  1059       _initial_mark_to_mixed.record_initial_mark_end(end);
       
  1060       break;
       
  1061     case MixedGC:
       
  1062       _initial_mark_to_mixed.record_mixed_gc_start(start);
       
  1063       break;
       
  1064     default:
       
  1065       ShouldNotReachHere();
       
  1066   }
       
  1067 }
       
  1068 
       
  1069 void G1DefaultPolicy::abort_time_to_mixed_tracking() {
       
  1070   _initial_mark_to_mixed.reset();
       
  1071 }
       
  1072 
       
  1073 bool G1DefaultPolicy::next_gc_should_be_mixed(const char* true_action_str,
       
  1074                                        const char* false_action_str) const {
       
  1075   if (cset_chooser()->is_empty()) {
       
  1076     log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
       
  1077     return false;
       
  1078   }
       
  1079 
       
  1080   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
       
  1081   size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
       
  1082   double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
       
  1083   double threshold = (double) G1HeapWastePercent;
       
  1084   if (reclaimable_percent <= threshold) {
       
  1085     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
       
  1086                         false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
       
  1087     return false;
       
  1088   }
       
  1089   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
       
  1090                       true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
       
  1091   return true;
       
  1092 }
       
  1093 
       
  1094 uint G1DefaultPolicy::calc_min_old_cset_length() const {
       
  1095   // The min old CSet region bound is based on the maximum desired
       
  1096   // number of mixed GCs after a cycle. I.e., even if some old regions
       
  1097   // look expensive, we should add them to the CSet anyway to make
       
  1098   // sure we go through the available old regions in no more than the
       
  1099   // maximum desired number of mixed GCs.
       
  1100   //
       
  1101   // The calculation is based on the number of marked regions we added
       
  1102   // to the CSet chooser in the first place, not how many remain, so
       
  1103   // that the result is the same during all mixed GCs that follow a cycle.
       
  1104 
       
  1105   const size_t region_num = (size_t) cset_chooser()->length();
       
  1106   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
       
  1107   size_t result = region_num / gc_num;
       
  1108   // emulate ceiling
       
  1109   if (result * gc_num < region_num) {
       
  1110     result += 1;
       
  1111   }
       
  1112   return (uint) result;
       
  1113 }
       
  1114 
       
  1115 uint G1DefaultPolicy::calc_max_old_cset_length() const {
       
  1116   // The max old CSet region bound is based on the threshold expressed
       
  1117   // as a percentage of the heap size. I.e., it should bound the
       
  1118   // number of old regions added to the CSet irrespective of how many
       
  1119   // of them are available.
       
  1120 
       
  1121   const G1CollectedHeap* g1h = G1CollectedHeap::heap();
       
  1122   const size_t region_num = g1h->num_regions();
       
  1123   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
       
  1124   size_t result = region_num * perc / 100;
       
  1125   // emulate ceiling
       
  1126   if (100 * result < region_num * perc) {
       
  1127     result += 1;
       
  1128   }
       
  1129   return (uint) result;
       
  1130 }
       
  1131 
       
  1132 void G1DefaultPolicy::finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
       
  1133   double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms, survivor);
       
  1134   _collection_set->finalize_old_part(time_remaining_ms);
       
  1135 }
       
  1136 
       
  1137 void G1DefaultPolicy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
       
  1138 
       
  1139   // Add survivor regions to SurvRateGroup.
       
  1140   note_start_adding_survivor_regions();
       
  1141   finished_recalculating_age_indexes(true /* is_survivors */);
       
  1142 
       
  1143   HeapRegion* last = NULL;
       
  1144   for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
       
  1145        it != survivors->regions()->end();
       
  1146        ++it) {
       
  1147     HeapRegion* curr = *it;
       
  1148     set_region_survivor(curr);
       
  1149 
       
  1150     // The region is a non-empty survivor so let's add it to
       
  1151     // the incremental collection set for the next evacuation
       
  1152     // pause.
       
  1153     _collection_set->add_survivor_regions(curr);
       
  1154 
       
  1155     last = curr;
       
  1156   }
       
  1157   note_stop_adding_survivor_regions();
       
  1158 
       
  1159   // Don't clear the survivor list handles until the start of
       
  1160   // the next evacuation pause - we need it in order to re-tag
       
  1161   // the survivor regions from this evacuation pause as 'young'
       
  1162   // at the start of the next.
       
  1163 
       
  1164   finished_recalculating_age_indexes(false /* is_survivors */);
       
  1165 }