hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp
changeset 34298 f3c9dcc5af96
parent 34134 d4fd14f628fb
child 34299 3fdfdda0ac1f
equal deleted inserted replaced
34290:08040a133ed1 34298:f3c9dcc5af96
    26 #include "gc/g1/concurrentG1Refine.hpp"
    26 #include "gc/g1/concurrentG1Refine.hpp"
    27 #include "gc/g1/concurrentMark.hpp"
    27 #include "gc/g1/concurrentMark.hpp"
    28 #include "gc/g1/concurrentMarkThread.inline.hpp"
    28 #include "gc/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc/g1/g1CollectedHeap.inline.hpp"
    29 #include "gc/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc/g1/g1CollectorPolicy.hpp"
    30 #include "gc/g1/g1CollectorPolicy.hpp"
       
    31 #include "gc/g1/g1IHOPControl.hpp"
    31 #include "gc/g1/g1ErgoVerbose.hpp"
    32 #include "gc/g1/g1ErgoVerbose.hpp"
    32 #include "gc/g1/g1GCPhaseTimes.hpp"
    33 #include "gc/g1/g1GCPhaseTimes.hpp"
    33 #include "gc/g1/g1Log.hpp"
    34 #include "gc/g1/g1Log.hpp"
    34 #include "gc/g1/heapRegion.inline.hpp"
    35 #include "gc/g1/heapRegion.inline.hpp"
    35 #include "gc/g1/heapRegionRemSet.hpp"
    36 #include "gc/g1/heapRegionRemSet.hpp"
    36 #include "gc/shared/gcPolicyCounters.hpp"
    37 #include "gc/shared/gcPolicyCounters.hpp"
    37 #include "runtime/arguments.hpp"
    38 #include "runtime/arguments.hpp"
    38 #include "runtime/java.hpp"
    39 #include "runtime/java.hpp"
    39 #include "runtime/mutexLocker.hpp"
    40 #include "runtime/mutexLocker.hpp"
    40 #include "utilities/debug.hpp"
    41 #include "utilities/debug.hpp"
       
    42 #include "utilities/pair.hpp"
    41 
    43 
    42 // Different defaults for different number of GC threads
    44 // Different defaults for different number of GC threads
    43 // They were chosen by running GCOld and SPECjbb on debris with different
    45 // They were chosen by running GCOld and SPECjbb on debris with different
    44 //   numbers of GC threads and choosing them based on the results
    46 //   numbers of GC threads and choosing them based on the results
    45 
    47 
   146   _recorded_survivor_regions(0),
   148   _recorded_survivor_regions(0),
   147   _recorded_survivor_head(NULL),
   149   _recorded_survivor_head(NULL),
   148   _recorded_survivor_tail(NULL),
   150   _recorded_survivor_tail(NULL),
   149   _survivors_age_table(true),
   151   _survivors_age_table(true),
   150 
   152 
   151   _gc_overhead_perc(0.0) {
   153   _gc_overhead_perc(0.0),
       
   154 
       
   155   _bytes_allocated_in_old_since_last_gc(0),
       
   156   _ihop_control(NULL),
       
   157   _initial_mark_to_mixed() {
   152 
   158 
   153   // SurvRateGroups below must be initialized after the predictor because they
   159   // SurvRateGroups below must be initialized after the predictor because they
   154   // indirectly use it through this object passed to their constructor.
   160   // indirectly use it through this object passed to their constructor.
   155   _short_lived_surv_rate_group =
   161   _short_lived_surv_rate_group =
   156     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
   162     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
   286   _reserve_regions = 0;
   292   _reserve_regions = 0;
   287 
   293 
   288   _collectionSetChooser = new CollectionSetChooser();
   294   _collectionSetChooser = new CollectionSetChooser();
   289 }
   295 }
   290 
   296 
       
   297 G1CollectorPolicy::~G1CollectorPolicy() {
       
   298   delete _ihop_control;
       
   299 }
       
   300 
   291 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
   301 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
   292   return _predictor.get_new_prediction(seq);
   302   return _predictor.get_new_prediction(seq);
   293 }
   303 }
   294 
   304 
   295 void G1CollectorPolicy::initialize_alignments() {
   305 void G1CollectorPolicy::initialize_alignments() {
   315   uintx max_regions = G1CollectedHeap::heap()->max_regions();
   325   uintx max_regions = G1CollectedHeap::heap()->max_regions();
   316   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
   326   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
   317   if (max_young_size != MaxNewSize) {
   327   if (max_young_size != MaxNewSize) {
   318     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
   328     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
   319   }
   329   }
       
   330 
       
   331   _ihop_control = create_ihop_control();
   320 }
   332 }
   321 
   333 
   322 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
   334 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
   323 
   335 
   324 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
   336 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
   520   // constraints (i.e., user-defined minimum bound). Currently, we
   532   // constraints (i.e., user-defined minimum bound). Currently, we
   521   // effectively don't set this bound.
   533   // effectively don't set this bound.
   522   return _young_gen_sizer->max_desired_young_length();
   534   return _young_gen_sizer->max_desired_young_length();
   523 }
   535 }
   524 
   536 
   525 void G1CollectorPolicy::update_young_list_max_and_target_length() {
   537 uint G1CollectorPolicy::update_young_list_max_and_target_length() {
   526   update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
   538   return update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
   527 }
   539 }
   528 
   540 
   529 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
   541 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
   530   update_young_list_target_length(rs_lengths);
   542   uint unbounded_target_length = update_young_list_target_length(rs_lengths);
   531   update_max_gc_locker_expansion();
   543   update_max_gc_locker_expansion();
   532 }
   544   return unbounded_target_length;
   533 
   545 }
   534 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   546 
   535   _young_list_target_length = bounded_young_list_target_length(rs_lengths);
   547 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   536 }
   548   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
   537 
   549   _young_list_target_length = young_lengths.first;
   538 void G1CollectorPolicy::update_young_list_target_length() {
   550   return young_lengths.second;
   539   update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
   551 }
   540 }
   552 
   541 
   553 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
   542 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
   554   YoungTargetLengths result;
   543   // Calculate the absolute and desired min bounds.
   555 
       
   556   // Calculate the absolute and desired min bounds first.
   544 
   557 
   545   // This is how many young regions we already have (currently: the survivors).
   558   // This is how many young regions we already have (currently: the survivors).
   546   uint base_min_length = recorded_survivor_regions();
   559   uint base_min_length = recorded_survivor_regions();
   547   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
   560   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
   548   // This is the absolute minimum young length. Ensure that we
   561   // This is the absolute minimum young length. Ensure that we
   550   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
   563   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
   551   // If we shrank the young list target it should not shrink below the current size.
   564   // If we shrank the young list target it should not shrink below the current size.
   552   desired_min_length = MAX2(desired_min_length, absolute_min_length);
   565   desired_min_length = MAX2(desired_min_length, absolute_min_length);
   553   // Calculate the absolute and desired max bounds.
   566   // Calculate the absolute and desired max bounds.
   554 
   567 
   555   // We will try our best not to "eat" into the reserve.
       
   556   uint absolute_max_length = 0;
       
   557   if (_free_regions_at_end_of_collection > _reserve_regions) {
       
   558     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
       
   559   }
       
   560   uint desired_max_length = calculate_young_list_desired_max_length();
   568   uint desired_max_length = calculate_young_list_desired_max_length();
   561   if (desired_max_length > absolute_max_length) {
       
   562     desired_max_length = absolute_max_length;
       
   563   }
       
   564 
   569 
   565   uint young_list_target_length = 0;
   570   uint young_list_target_length = 0;
   566   if (adaptive_young_list_length()) {
   571   if (adaptive_young_list_length()) {
   567     if (collector_state()->gcs_are_young()) {
   572     if (collector_state()->gcs_are_young()) {
   568       young_list_target_length =
   573       young_list_target_length =
   579     // The user asked for a fixed young gen so we'll fix the young gen
   584     // The user asked for a fixed young gen so we'll fix the young gen
   580     // whether the next GC is young or mixed.
   585     // whether the next GC is young or mixed.
   581     young_list_target_length = _young_list_fixed_length;
   586     young_list_target_length = _young_list_fixed_length;
   582   }
   587   }
   583 
   588 
       
   589   result.second = young_list_target_length;
       
   590 
       
   591   // We will try our best not to "eat" into the reserve.
       
   592   uint absolute_max_length = 0;
       
   593   if (_free_regions_at_end_of_collection > _reserve_regions) {
       
   594     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
       
   595   }
       
   596   if (desired_max_length > absolute_max_length) {
       
   597     desired_max_length = absolute_max_length;
       
   598   }
       
   599 
   584   // Make sure we don't go over the desired max length, nor under the
   600   // Make sure we don't go over the desired max length, nor under the
   585   // desired min length. In case they clash, desired_min_length wins
   601   // desired min length. In case they clash, desired_min_length wins
   586   // which is why that test is second.
   602   // which is why that test is second.
   587   if (young_list_target_length > desired_max_length) {
   603   if (young_list_target_length > desired_max_length) {
   588     young_list_target_length = desired_max_length;
   604     young_list_target_length = desired_max_length;
   593 
   609 
   594   assert(young_list_target_length > recorded_survivor_regions(),
   610   assert(young_list_target_length > recorded_survivor_regions(),
   595          "we should be able to allocate at least one eden region");
   611          "we should be able to allocate at least one eden region");
   596   assert(young_list_target_length >= absolute_min_length, "post-condition");
   612   assert(young_list_target_length >= absolute_min_length, "post-condition");
   597 
   613 
   598   return young_list_target_length;
   614   result.first = young_list_target_length;
       
   615   return result;
   599 }
   616 }
   600 
   617 
   601 uint
   618 uint
   602 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   619 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   603                                                      uint base_min_length,
   620                                                      uint base_min_length,
   836   // Reset survivors SurvRateGroup.
   853   // Reset survivors SurvRateGroup.
   837   _survivor_surv_rate_group->reset();
   854   _survivor_surv_rate_group->reset();
   838   update_young_list_max_and_target_length();
   855   update_young_list_max_and_target_length();
   839   update_rs_lengths_prediction();
   856   update_rs_lengths_prediction();
   840   _collectionSetChooser->clear();
   857   _collectionSetChooser->clear();
       
   858 
       
   859   _bytes_allocated_in_old_since_last_gc = 0;
       
   860 
       
   861   record_pause(FullGC, _full_collection_start_sec, end_sec);
   841 }
   862 }
   842 
   863 
   843 void G1CollectorPolicy::record_stop_world_start() {
   864 void G1CollectorPolicy::record_stop_world_start() {
   844   _stop_world_start = os::elapsedTime();
   865   _stop_world_start = os::elapsedTime();
   845 }
   866 }
   893   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   914   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   894   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   915   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   895   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   916   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   896   _prev_collection_pause_end_ms += elapsed_time_ms;
   917   _prev_collection_pause_end_ms += elapsed_time_ms;
   897 
   918 
   898   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
   919   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
   899 }
   920 }
   900 
   921 
   901 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   922 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   902   _mark_cleanup_start_sec = os::elapsedTime();
   923   _mark_cleanup_start_sec = os::elapsedTime();
   903 }
   924 }
   904 
   925 
   905 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   926 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   906   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
   927   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
   907                                                               "skip last young-only gc");
   928                                                               "skip last young-only gc");
   908   collector_state()->set_last_young_gc(should_continue_with_reclaim);
   929   collector_state()->set_last_young_gc(should_continue_with_reclaim);
       
   930   // We skip the marking phase.
       
   931   if (!should_continue_with_reclaim) {
       
   932     abort_time_to_mixed_tracking();
       
   933   }
   909   collector_state()->set_in_marking_window(false);
   934   collector_state()->set_in_marking_window(false);
   910 }
   935 }
   911 
   936 
   912 void G1CollectorPolicy::record_concurrent_pause() {
   937 void G1CollectorPolicy::record_concurrent_pause() {
   913   if (_stop_world_start > 0.0) {
   938   if (_stop_world_start > 0.0) {
   950 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   975 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   951   if (about_to_start_mixed_phase()) {
   976   if (about_to_start_mixed_phase()) {
   952     return false;
   977     return false;
   953   }
   978   }
   954 
   979 
   955   size_t marking_initiating_used_threshold =
   980   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
   956     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
   981 
   957   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
   982   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
   958   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
   983   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
   959 
   984   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
   960   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
   985 
       
   986   if (marking_request_bytes > marking_initiating_used_threshold) {
   961     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
   987     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
   962       ergo_verbose5(ErgoConcCycles,
   988       ergo_verbose5(ErgoConcCycles,
   963         "request concurrent cycle initiation",
   989         "request concurrent cycle initiation",
   964         ergo_format_reason("occupancy higher than threshold")
   990         ergo_format_reason("occupancy higher than threshold")
   965         ergo_format_byte("occupancy")
   991         ergo_format_byte("occupancy")
   967         ergo_format_byte_perc("threshold")
   993         ergo_format_byte_perc("threshold")
   968         ergo_format_str("source"),
   994         ergo_format_str("source"),
   969         cur_used_bytes,
   995         cur_used_bytes,
   970         alloc_byte_size,
   996         alloc_byte_size,
   971         marking_initiating_used_threshold,
   997         marking_initiating_used_threshold,
   972         (double) InitiatingHeapOccupancyPercent,
   998         (double) marking_initiating_used_threshold / _g1->capacity() * 100,
   973         source);
   999         source);
   974       return true;
  1000       return true;
   975     } else {
  1001     } else {
   976       ergo_verbose5(ErgoConcCycles,
  1002       ergo_verbose5(ErgoConcCycles,
   977         "do not request concurrent cycle initiation",
  1003         "do not request concurrent cycle initiation",
   994 // Anything below that is considered to be zero
  1020 // Anything below that is considered to be zero
   995 #define MIN_TIMER_GRANULARITY 0.0000001
  1021 #define MIN_TIMER_GRANULARITY 0.0000001
   996 
  1022 
   997 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
  1023 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
   998   double end_time_sec = os::elapsedTime();
  1024   double end_time_sec = os::elapsedTime();
   999   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
  1025 
  1000          "otherwise, the subtraction below does not make sense");
       
  1001   size_t rs_size =
       
  1002             _cur_collection_pause_used_regions_at_start - cset_region_length();
       
  1003   size_t cur_used_bytes = _g1->used();
  1026   size_t cur_used_bytes = _g1->used();
  1004   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1027   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1005   bool last_pause_included_initial_mark = false;
  1028   bool last_pause_included_initial_mark = false;
  1006   bool update_stats = !_g1->evacuation_failed();
  1029   bool update_stats = !_g1->evacuation_failed();
  1007 
  1030 
  1011     _short_lived_surv_rate_group->print();
  1034     _short_lived_surv_rate_group->print();
  1012     // do that for any other surv rate groups too
  1035     // do that for any other surv rate groups too
  1013   }
  1036   }
  1014 #endif // PRODUCT
  1037 #endif // PRODUCT
  1015 
  1038 
       
  1039   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
       
  1040 
  1016   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
  1041   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
  1017   if (last_pause_included_initial_mark) {
  1042   if (last_pause_included_initial_mark) {
  1018     record_concurrent_mark_init_end(0.0);
  1043     record_concurrent_mark_init_end(0.0);
  1019   } else {
  1044   } else {
  1020     maybe_start_marking();
  1045     maybe_start_marking();
  1021   }
  1046   }
  1022 
  1047 
  1023   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
  1048   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
       
  1049   if (app_time_ms < MIN_TIMER_GRANULARITY) {
       
  1050     // This usually happens due to the timer not having the required
       
  1051     // granularity. Some Linuxes are the usual culprits.
       
  1052     // We'll just set it to something (arbitrarily) small.
       
  1053     app_time_ms = 1.0;
       
  1054   }
  1024 
  1055 
  1025   if (update_stats) {
  1056   if (update_stats) {
  1026     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
  1057     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
  1027     // this is where we update the allocation rate of the application
       
  1028     double app_time_ms =
       
  1029       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
       
  1030     if (app_time_ms < MIN_TIMER_GRANULARITY) {
       
  1031       // This usually happens due to the timer not having the required
       
  1032       // granularity. Some Linuxes are the usual culprits.
       
  1033       // We'll just set it to something (arbitrarily) small.
       
  1034       app_time_ms = 1.0;
       
  1035     }
       
  1036     // We maintain the invariant that all objects allocated by mutator
  1058     // We maintain the invariant that all objects allocated by mutator
  1037     // threads will be allocated out of eden regions. So, we can use
  1059     // threads will be allocated out of eden regions. So, we can use
  1038     // the eden region number allocated since the previous GC to
  1060     // the eden region number allocated since the previous GC to
  1039     // calculate the application's allocate rate. The only exception
  1061     // calculate the application's allocate rate. The only exception
  1040     // to that is humongous objects that are allocated separately. But
  1062     // to that is humongous objects that are allocated separately. But
  1075     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
  1097     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
  1076 
  1098 
  1077     if (next_gc_should_be_mixed("start mixed GCs",
  1099     if (next_gc_should_be_mixed("start mixed GCs",
  1078                                 "do not start mixed GCs")) {
  1100                                 "do not start mixed GCs")) {
  1079       collector_state()->set_gcs_are_young(false);
  1101       collector_state()->set_gcs_are_young(false);
       
  1102     } else {
       
  1103       // We aborted the mixed GC phase early.
       
  1104       abort_time_to_mixed_tracking();
  1080     }
  1105     }
  1081 
  1106 
  1082     collector_state()->set_last_young_gc(false);
  1107     collector_state()->set_last_young_gc(false);
  1083   }
  1108   }
  1084 
  1109 
  1085   if (!collector_state()->last_gc_was_young()) {
  1110   if (!collector_state()->last_gc_was_young()) {
  1086     // This is a mixed GC. Here we decide whether to continue doing
  1111     // This is a mixed GC. Here we decide whether to continue doing
  1087     // mixed GCs or not.
  1112     // mixed GCs or not.
  1088 
       
  1089     if (!next_gc_should_be_mixed("continue mixed GCs",
  1113     if (!next_gc_should_be_mixed("continue mixed GCs",
  1090                                  "do not continue mixed GCs")) {
  1114                                  "do not continue mixed GCs")) {
  1091       collector_state()->set_gcs_are_young(true);
  1115       collector_state()->set_gcs_are_young(true);
  1092 
  1116 
  1093       maybe_start_marking();
  1117       maybe_start_marking();
  1175   }
  1199   }
  1176 
  1200 
  1177   collector_state()->set_in_marking_window(new_in_marking_window);
  1201   collector_state()->set_in_marking_window(new_in_marking_window);
  1178   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
  1202   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
  1179   _free_regions_at_end_of_collection = _g1->num_free_regions();
  1203   _free_regions_at_end_of_collection = _g1->num_free_regions();
  1180   update_young_list_max_and_target_length();
  1204   // IHOP control wants to know the expected young gen length if it were not
       
  1205   // restrained by the heap reserve. Using the actual length would make the
       
  1206   // prediction too small and the limit the young gen every time we get to the
       
  1207   // predicted target occupancy.
       
  1208   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
  1181   update_rs_lengths_prediction();
  1209   update_rs_lengths_prediction();
       
  1210 
       
  1211   update_ihop_prediction(app_time_ms / 1000.0,
       
  1212                          _bytes_allocated_in_old_since_last_gc,
       
  1213                          last_unrestrained_young_length * HeapRegion::GrainBytes);
       
  1214   _bytes_allocated_in_old_since_last_gc = 0;
  1182 
  1215 
  1183   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1216   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1184   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1217   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1185 
  1218 
  1186   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
  1219   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
  1203                                update_rs_time_goal_ms);
  1236                                update_rs_time_goal_ms);
  1204 
  1237 
  1205   _collectionSetChooser->verify();
  1238   _collectionSetChooser->verify();
  1206 }
  1239 }
  1207 
  1240 
       
  1241 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
       
  1242   return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent,
       
  1243                                  G1CollectedHeap::heap()->max_capacity());
       
  1244 }
       
  1245 
       
  1246 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s,
       
  1247                                                size_t mutator_alloc_bytes,
       
  1248                                                size_t young_gen_size) {
       
  1249   // Always try to update IHOP prediction. Even evacuation failures give information
       
  1250   // about e.g. whether to start IHOP earlier next time.
       
  1251 
       
  1252   // Avoid using really small application times that might create samples with
       
  1253   // very high or very low values. They may be caused by e.g. back-to-back gcs.
       
  1254   double const min_valid_time = 1e-6;
       
  1255 
       
  1256   bool report = false;
       
  1257 
       
  1258   double marking_to_mixed_time = -1.0;
       
  1259   if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
       
  1260     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
       
  1261     assert(marking_to_mixed_time > 0.0,
       
  1262            "Initial mark to mixed time must be larger than zero but is %.3f",
       
  1263            marking_to_mixed_time);
       
  1264     if (marking_to_mixed_time > min_valid_time) {
       
  1265       _ihop_control->update_marking_length(marking_to_mixed_time);
       
  1266       report = true;
       
  1267     }
       
  1268   }
       
  1269 
       
  1270   // As an approximation for the young gc promotion rates during marking we use
       
  1271   // all of them. In many applications there are only a few if any young gcs during
       
  1272   // marking, which makes any prediction useless. This increases the accuracy of the
       
  1273   // prediction.
       
  1274   if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
       
  1275     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
       
  1276     report = true;
       
  1277   }
       
  1278 
       
  1279   if (report) {
       
  1280     report_ihop_statistics();
       
  1281   }
       
  1282 }
       
  1283 
       
  1284 void G1CollectorPolicy::report_ihop_statistics() {
       
  1285   _ihop_control->print();
       
  1286 }
       
  1287 
  1208 #define EXT_SIZE_FORMAT "%.1f%s"
  1288 #define EXT_SIZE_FORMAT "%.1f%s"
  1209 #define EXT_SIZE_PARAMS(bytes)                                  \
  1289 #define EXT_SIZE_PARAMS(bytes)                                  \
  1210   byte_size_in_proper_unit((double)(bytes)),                    \
  1290   byte_size_in_proper_unit((double)(bytes)),                    \
  1211   proper_unit_for_byte_size((bytes))
  1291   proper_unit_for_byte_size((bytes))
  1212 
  1292 
  1214   YoungList* young_list = _g1->young_list();
  1294   YoungList* young_list = _g1->young_list();
  1215   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
  1295   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
  1216   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
  1296   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
  1217   _heap_capacity_bytes_before_gc = _g1->capacity();
  1297   _heap_capacity_bytes_before_gc = _g1->capacity();
  1218   _heap_used_bytes_before_gc = _g1->used();
  1298   _heap_used_bytes_before_gc = _g1->used();
  1219   _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
       
  1220 
  1299 
  1221   _eden_capacity_bytes_before_gc =
  1300   _eden_capacity_bytes_before_gc =
  1222          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
  1301          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
  1223 
  1302 
  1224   if (full) {
  1303   if (full) {
  1715   const uint overpartition_factor = 4;
  1794   const uint overpartition_factor = 4;
  1716   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
  1795   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
  1717   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
  1796   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
  1718 }
  1797 }
  1719 
  1798 
  1720 void
  1799 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
  1721 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
       
  1722   _collectionSetChooser->clear();
  1800   _collectionSetChooser->clear();
  1723 
  1801 
  1724   WorkGang* workers = _g1->workers();
  1802   WorkGang* workers = _g1->workers();
  1725   uint n_workers = workers->active_workers();
  1803   uint n_workers = workers->active_workers();
  1726 
  1804 
  1735   double end_sec = os::elapsedTime();
  1813   double end_sec = os::elapsedTime();
  1736   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  1814   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  1737   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1815   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1738   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1816   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1739   _prev_collection_pause_end_ms += elapsed_time_ms;
  1817   _prev_collection_pause_end_ms += elapsed_time_ms;
  1740   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
  1818 
       
  1819   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
  1741 }
  1820 }
  1742 
  1821 
  1743 // Add the heap region at the head of the non-incremental collection set
  1822 // Add the heap region at the head of the non-incremental collection set
  1744 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  1823 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  1745   assert(_inc_cset_build_state == Active, "Precondition");
  1824   assert(_inc_cset_build_state == Active, "Precondition");
  1949     // Note: this might have already been set, if during the last
  2028     // Note: this might have already been set, if during the last
  1950     // pause we decided to start a cycle but at the beginning of
  2029     // pause we decided to start a cycle but at the beginning of
  1951     // this pause we decided to postpone it. That's OK.
  2030     // this pause we decided to postpone it. That's OK.
  1952     collector_state()->set_initiate_conc_mark_if_possible(true);
  2031     collector_state()->set_initiate_conc_mark_if_possible(true);
  1953   }
  2032   }
       
  2033 }
       
  2034 
       
  2035 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const {
       
  2036   assert(!collector_state()->full_collection(), "must be");
       
  2037   if (collector_state()->during_initial_mark_pause()) {
       
  2038     assert(collector_state()->last_gc_was_young(), "must be");
       
  2039     assert(!collector_state()->last_young_gc(), "must be");
       
  2040     return InitialMarkGC;
       
  2041   } else if (collector_state()->last_young_gc()) {
       
  2042     assert(!collector_state()->during_initial_mark_pause(), "must be");
       
  2043     assert(collector_state()->last_gc_was_young(), "must be");
       
  2044     return LastYoungGC;
       
  2045   } else if (!collector_state()->last_gc_was_young()) {
       
  2046     assert(!collector_state()->during_initial_mark_pause(), "must be");
       
  2047     assert(!collector_state()->last_young_gc(), "must be");
       
  2048     return MixedGC;
       
  2049   } else {
       
  2050     assert(collector_state()->last_gc_was_young(), "must be");
       
  2051     assert(!collector_state()->during_initial_mark_pause(), "must be");
       
  2052     assert(!collector_state()->last_young_gc(), "must be");
       
  2053     return YoungOnlyGC;
       
  2054   }
       
  2055 }
       
  2056 
       
  2057 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) {
       
  2058   // Manage the MMU tracker. For some reason it ignores Full GCs.
       
  2059   if (kind != FullGC) {
       
  2060     _mmu_tracker->add_pause(start, end);
       
  2061   }
       
  2062   // Manage the mutator time tracking from initial mark to first mixed gc.
       
  2063   switch (kind) {
       
  2064     case FullGC:
       
  2065       abort_time_to_mixed_tracking();
       
  2066       break;
       
  2067     case Cleanup:
       
  2068     case Remark:
       
  2069     case YoungOnlyGC:
       
  2070     case LastYoungGC:
       
  2071       _initial_mark_to_mixed.add_pause(end - start);
       
  2072       break;
       
  2073     case InitialMarkGC:
       
  2074       _initial_mark_to_mixed.record_initial_mark_end(end);
       
  2075       break;
       
  2076     case MixedGC:
       
  2077       _initial_mark_to_mixed.record_mixed_gc_start(start);
       
  2078       break;
       
  2079     default:
       
  2080       ShouldNotReachHere();
       
  2081   }
       
  2082 }
       
  2083 
       
  2084 void G1CollectorPolicy::abort_time_to_mixed_tracking() {
       
  2085   _initial_mark_to_mixed.reset();
  1954 }
  2086 }
  1955 
  2087 
  1956 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  2088 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  1957                                                 const char* false_action_str) const {
  2089                                                 const char* false_action_str) const {
  1958   CollectionSetChooser* cset_chooser = _collectionSetChooser;
  2090   CollectionSetChooser* cset_chooser = _collectionSetChooser;