src/hotspot/share/gc/g1/g1Policy.cpp
changeset 49607 acffe6ff3ae7
parent 49412 2c3b9dbba7bc
child 49632 64f9ebc85e67
equal deleted inserted replaced
49606:9ae8719efcae 49607:acffe6ff3ae7
    47 #include "utilities/pair.hpp"
    47 #include "utilities/pair.hpp"
    48 
    48 
    49 G1Policy::G1Policy(STWGCTimer* gc_timer) :
    49 G1Policy::G1Policy(STWGCTimer* gc_timer) :
    50   _predictor(G1ConfidencePercent / 100.0),
    50   _predictor(G1ConfidencePercent / 100.0),
    51   _analytics(new G1Analytics(&_predictor)),
    51   _analytics(new G1Analytics(&_predictor)),
       
    52   _remset_tracker(),
    52   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
    53   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
    53   _ihop_control(create_ihop_control(&_predictor)),
    54   _ihop_control(create_ihop_control(&_predictor)),
    54   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
    55   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
    55   _young_list_fixed_length(0),
    56   _young_list_fixed_length(0),
    56   _short_lived_surv_rate_group(new SurvRateGroup()),
    57   _short_lived_surv_rate_group(new SurvRateGroup()),
    64   _g1(NULL),
    65   _g1(NULL),
    65   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
    66   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
    66   _tenuring_threshold(MaxTenuringThreshold),
    67   _tenuring_threshold(MaxTenuringThreshold),
    67   _max_survivor_regions(0),
    68   _max_survivor_regions(0),
    68   _survivors_age_table(true),
    69   _survivors_age_table(true),
    69   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { }
    70   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) {
       
    71 }
    70 
    72 
    71 G1Policy::~G1Policy() {
    73 G1Policy::~G1Policy() {
    72   delete _ihop_control;
    74   delete _ihop_control;
    73 }
    75 }
    74 
    76 
   411 
   413 
   412 void G1Policy::record_full_collection_start() {
   414 void G1Policy::record_full_collection_start() {
   413   _full_collection_start_sec = os::elapsedTime();
   415   _full_collection_start_sec = os::elapsedTime();
   414   // Release the future to-space so that it is available for compaction into.
   416   // Release the future to-space so that it is available for compaction into.
   415   collector_state()->set_full_collection(true);
   417   collector_state()->set_full_collection(true);
       
   418   cset_chooser()->clear();
   416 }
   419 }
   417 
   420 
   418 void G1Policy::record_full_collection_end() {
   421 void G1Policy::record_full_collection_end() {
   419   // Consider this like a collection pause for the purposes of allocation
   422   // Consider this like a collection pause for the purposes of allocation
   420   // since last pause.
   423   // since last pause.
   441   _free_regions_at_end_of_collection = _g1->num_free_regions();
   444   _free_regions_at_end_of_collection = _g1->num_free_regions();
   442   // Reset survivors SurvRateGroup.
   445   // Reset survivors SurvRateGroup.
   443   _survivor_surv_rate_group->reset();
   446   _survivor_surv_rate_group->reset();
   444   update_young_list_max_and_target_length();
   447   update_young_list_max_and_target_length();
   445   update_rs_lengths_prediction();
   448   update_rs_lengths_prediction();
   446   cset_chooser()->clear();
       
   447 
   449 
   448   _bytes_allocated_in_old_since_last_gc = 0;
   450   _bytes_allocated_in_old_since_last_gc = 0;
   449 
   451 
   450   record_pause(FullGC, _full_collection_start_sec, end_sec);
   452   record_pause(FullGC, _full_collection_start_sec, end_sec);
   451 }
   453 }
   498 void G1Policy::record_concurrent_mark_cleanup_start() {
   500 void G1Policy::record_concurrent_mark_cleanup_start() {
   499   _mark_cleanup_start_sec = os::elapsedTime();
   501   _mark_cleanup_start_sec = os::elapsedTime();
   500 }
   502 }
   501 
   503 
   502 void G1Policy::record_concurrent_mark_cleanup_completed() {
   504 void G1Policy::record_concurrent_mark_cleanup_completed() {
   503   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
   505   collector_state()->set_last_young_gc(collector_state()->mixed_gc_pending());
   504                                                               "skip last young-only gc");
       
   505   collector_state()->set_last_young_gc(should_continue_with_reclaim);
       
   506   // We skip the marking phase.
       
   507   if (!should_continue_with_reclaim) {
       
   508     abort_time_to_mixed_tracking();
       
   509   }
       
   510   collector_state()->set_in_marking_window(false);
   506   collector_state()->set_in_marking_window(false);
   511 }
   507 }
   512 
   508 
   513 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
   509 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
   514   return phase_times()->average_time_ms(phase);
   510   return phase_times()->average_time_ms(phase);
   535 CollectionSetChooser* G1Policy::cset_chooser() const {
   531 CollectionSetChooser* G1Policy::cset_chooser() const {
   536   return _collection_set->cset_chooser();
   532   return _collection_set->cset_chooser();
   537 }
   533 }
   538 
   534 
   539 bool G1Policy::about_to_start_mixed_phase() const {
   535 bool G1Policy::about_to_start_mixed_phase() const {
       
   536   guarantee(_g1->concurrent_mark()->cm_thread()->during_cycle() || !collector_state()->mixed_gc_pending(), "Pending mixed phase when CM is idle!");
   540   return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
   537   return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
   541 }
   538 }
   542 
   539 
   543 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   540 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   544   if (about_to_start_mixed_phase()) {
   541   if (about_to_start_mixed_phase()) {
   617     new_in_marking_window = true;
   614     new_in_marking_window = true;
   618     new_in_marking_window_im = true;
   615     new_in_marking_window_im = true;
   619   }
   616   }
   620 
   617 
   621   if (collector_state()->last_young_gc()) {
   618   if (collector_state()->last_young_gc()) {
   622     // This is supposed to to be the "last young GC" before we start
       
   623     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
       
   624     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
   619     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
   625 
   620     // This has been the "last young GC" before we start doing mixed GCs. We already
   626     if (next_gc_should_be_mixed("start mixed GCs",
   621     // decided to start mixed GCs much earlier, so there is nothing to do except
   627                                 "do not start mixed GCs")) {
   622     // advancing the state.
   628       collector_state()->set_gcs_are_young(false);
   623     collector_state()->set_gcs_are_young(false);
   629     } else {
       
   630       // We aborted the mixed GC phase early.
       
   631       abort_time_to_mixed_tracking();
       
   632     }
       
   633 
       
   634     collector_state()->set_last_young_gc(false);
   624     collector_state()->set_last_young_gc(false);
   635   }
   625   }
   636 
   626 
   637   if (!collector_state()->last_gc_was_young()) {
   627   if (!collector_state()->last_gc_was_young()) {
   638     // This is a mixed GC. Here we decide whether to continue doing
   628     // This is a mixed GC. Here we decide whether to continue doing more
   639     // mixed GCs or not.
   629     // mixed GCs or not.
   640     if (!next_gc_should_be_mixed("continue mixed GCs",
   630     if (!next_gc_should_be_mixed("continue mixed GCs",
   641                                  "do not continue mixed GCs")) {
   631                                  "do not continue mixed GCs")) {
   642       collector_state()->set_gcs_are_young(true);
   632       collector_state()->set_gcs_are_young(true);
   643 
   633 
       
   634       clear_collection_set_candidates();
   644       maybe_start_marking();
   635       maybe_start_marking();
   645     }
   636     }
   646   }
   637   }
   647 
   638 
   648   _short_lived_surv_rate_group->start_adding_regions();
   639   _short_lived_surv_rate_group->start_adding_regions();
   969       // Initiate a user requested initial mark. An initial mark must be young only
   960       // Initiate a user requested initial mark. An initial mark must be young only
   970       // GC, so the collector state must be updated to reflect this.
   961       // GC, so the collector state must be updated to reflect this.
   971       collector_state()->set_gcs_are_young(true);
   962       collector_state()->set_gcs_are_young(true);
   972       collector_state()->set_last_young_gc(false);
   963       collector_state()->set_last_young_gc(false);
   973 
   964 
       
   965       // We might have ended up coming here about to start a mixed phase with a collection set
       
   966       // active. The following remark might change the change the "evacuation efficiency" of
       
   967       // the regions in this set, leading to failing asserts later.
       
   968       // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
       
   969       clear_collection_set_candidates();
   974       abort_time_to_mixed_tracking();
   970       abort_time_to_mixed_tracking();
   975       initiate_conc_mark();
   971       initiate_conc_mark();
   976       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
   972       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
   977     } else {
   973     } else {
   978       // The concurrent marking thread is still finishing up the
   974       // The concurrent marking thread is still finishing up the
   993 }
   989 }
   994 
   990 
   995 void G1Policy::record_concurrent_mark_cleanup_end() {
   991 void G1Policy::record_concurrent_mark_cleanup_end() {
   996   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
   992   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
   997 
   993 
       
   994   bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
       
   995   if (!mixed_gc_pending) {
       
   996     clear_collection_set_candidates();
       
   997     abort_time_to_mixed_tracking();
       
   998   }
       
   999   collector_state()->set_mixed_gc_pending(mixed_gc_pending);
       
  1000 
   998   double end_sec = os::elapsedTime();
  1001   double end_sec = os::elapsedTime();
   999   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  1002   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  1000   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
  1003   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
  1001   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
  1004   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
  1002 
  1005 
  1003   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
  1006   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
  1004 }
  1007 }
  1005 
  1008 
  1006 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
  1009 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
  1007   return percent_of(reclaimable_bytes, _g1->capacity());
  1010   return percent_of(reclaimable_bytes, _g1->capacity());
       
  1011 }
       
  1012 
       
  1013 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
       
  1014   virtual bool do_heap_region(HeapRegion* r) {
       
  1015     r->rem_set()->clear_locked(true /* only_cardset */);
       
  1016     return false;
       
  1017   }
       
  1018 };
       
  1019 
       
  1020 void G1Policy::clear_collection_set_candidates() {
       
  1021   // Clear remembered sets of remaining candidate regions and the actual candidate
       
  1022   // list.
       
  1023   G1ClearCollectionSetCandidateRemSets cl;
       
  1024   cset_chooser()->iterate(&cl);
       
  1025   cset_chooser()->clear();
  1008 }
  1026 }
  1009 
  1027 
  1010 void G1Policy::maybe_start_marking() {
  1028 void G1Policy::maybe_start_marking() {
  1011   if (need_to_start_conc_mark("end of GC")) {
  1029   if (need_to_start_conc_mark("end of GC")) {
  1012     // Note: this might have already been set, if during the last
  1030     // Note: this might have already been set, if during the last