hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp
changeset 37039 79f62b89a7a6
parent 36577 e177c49493e9
child 37041 803d638420d5
equal deleted inserted replaced
36592:e208f63ee9ca 37039:79f62b89a7a6
    24 
    24 
    25 #include "precompiled.hpp"
    25 #include "precompiled.hpp"
    26 #include "gc/g1/concurrentG1Refine.hpp"
    26 #include "gc/g1/concurrentG1Refine.hpp"
    27 #include "gc/g1/concurrentMarkThread.inline.hpp"
    27 #include "gc/g1/concurrentMarkThread.inline.hpp"
    28 #include "gc/g1/g1CollectedHeap.inline.hpp"
    28 #include "gc/g1/g1CollectedHeap.inline.hpp"
       
    29 #include "gc/g1/g1CollectionSet.hpp"
    29 #include "gc/g1/g1CollectorPolicy.hpp"
    30 #include "gc/g1/g1CollectorPolicy.hpp"
    30 #include "gc/g1/g1ConcurrentMark.hpp"
    31 #include "gc/g1/g1ConcurrentMark.hpp"
    31 #include "gc/g1/g1IHOPControl.hpp"
    32 #include "gc/g1/g1IHOPControl.hpp"
    32 #include "gc/g1/g1GCPhaseTimes.hpp"
    33 #include "gc/g1/g1GCPhaseTimes.hpp"
    33 #include "gc/g1/heapRegion.inline.hpp"
    34 #include "gc/g1/heapRegion.inline.hpp"
   113 
   114 
   114   _recent_avg_pause_time_ratio(0.0),
   115   _recent_avg_pause_time_ratio(0.0),
   115   _rs_lengths_prediction(0),
   116   _rs_lengths_prediction(0),
   116   _max_survivor_regions(0),
   117   _max_survivor_regions(0),
   117 
   118 
   118   _eden_cset_region_length(0),
       
   119   _survivor_cset_region_length(0),
       
   120   _old_cset_region_length(0),
       
   121 
       
   122   _collection_set(NULL),
       
   123   _collection_set_bytes_used_before(0),
       
   124 
       
   125   // Incremental CSet attributes
       
   126   _inc_cset_build_state(Inactive),
       
   127   _inc_cset_head(NULL),
       
   128   _inc_cset_tail(NULL),
       
   129   _inc_cset_bytes_used_before(0),
       
   130   _inc_cset_recorded_rs_lengths(0),
       
   131   _inc_cset_recorded_rs_lengths_diffs(0),
       
   132   _inc_cset_predicted_elapsed_time_ms(0.0),
       
   133   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
       
   134 
       
   135   // add here any more surv rate groups
   119   // add here any more surv rate groups
   136   _recorded_survivor_regions(0),
   120   _recorded_survivor_regions(0),
   137   _recorded_survivor_head(NULL),
   121   _recorded_survivor_head(NULL),
   138   _recorded_survivor_tail(NULL),
   122   _recorded_survivor_tail(NULL),
   139   _survivors_age_table(true),
   123   _survivors_age_table(true),
   265   }
   249   }
   266   _reserve_factor = (double) reserve_perc / 100.0;
   250   _reserve_factor = (double) reserve_perc / 100.0;
   267   // This will be set when the heap is expanded
   251   // This will be set when the heap is expanded
   268   // for the first time during initialization.
   252   // for the first time during initialization.
   269   _reserve_regions = 0;
   253   _reserve_regions = 0;
   270 
       
   271   _cset_chooser = new CollectionSetChooser();
       
   272 
   254 
   273   _ihop_control = create_ihop_control();
   255   _ihop_control = create_ihop_control();
   274 }
   256 }
   275 
   257 
   276 G1CollectorPolicy::~G1CollectorPolicy() {
   258 G1CollectorPolicy::~G1CollectorPolicy() {
   487 
   469 
   488 
   470 
   489 void G1CollectorPolicy::init() {
   471 void G1CollectorPolicy::init() {
   490   // Set aside an initial future to_space.
   472   // Set aside an initial future to_space.
   491   _g1 = G1CollectedHeap::heap();
   473   _g1 = G1CollectedHeap::heap();
       
   474   _collection_set = _g1->collection_set();
       
   475   _collection_set->set_policy(this);
   492 
   476 
   493   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   477   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   494 
   478 
   495   initialize_gc_policy_counters();
   479   initialize_gc_policy_counters();
   496 
   480 
   502   _free_regions_at_end_of_collection = _g1->num_free_regions();
   486   _free_regions_at_end_of_collection = _g1->num_free_regions();
   503 
   487 
   504   update_young_list_max_and_target_length();
   488   update_young_list_max_and_target_length();
   505   // We may immediately start allocating regions and placing them on the
   489   // We may immediately start allocating regions and placing them on the
   506   // collection set list. Initialize the per-collection set info
   490   // collection set list. Initialize the per-collection set info
   507   start_incremental_cset_building();
   491   _collection_set->start_incremental_building();
   508 }
   492 }
   509 
   493 
   510 void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
   494 void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
   511   phase_times()->note_gc_start(num_active_workers);
   495   phase_times()->note_gc_start(num_active_workers);
   512 }
   496 }
   911          _g1->used(), _g1->recalculate_used());
   895          _g1->used(), _g1->recalculate_used());
   912 
   896 
   913   phase_times()->record_cur_collection_start_sec(start_time_sec);
   897   phase_times()->record_cur_collection_start_sec(start_time_sec);
   914   _pending_cards = _g1->pending_card_num();
   898   _pending_cards = _g1->pending_card_num();
   915 
   899 
   916   _collection_set_bytes_used_before = 0;
   900   _collection_set->reset_bytes_used_before();
   917   _bytes_copied_during_gc = 0;
   901   _bytes_copied_during_gc = 0;
   918 
   902 
   919   collector_state()->set_last_gc_was_young(false);
   903   collector_state()->set_last_gc_was_young(false);
   920 
   904 
   921   // do that for any other surv rate groups
   905   // do that for any other surv rate groups
   984          average_time_ms(G1GCPhaseTimes::Termination);
   968          average_time_ms(G1GCPhaseTimes::Termination);
   985 }
   969 }
   986 
   970 
   987 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
   971 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
   988   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
   972   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
       
   973 }
       
   974 
       
   975 CollectionSetChooser* G1CollectorPolicy::cset_chooser() const {
       
   976   return _collection_set->cset_chooser();
   989 }
   977 }
   990 
   978 
   991 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
   979 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
   992   return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
   980   return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
   993 }
   981 }
  1051     // calculate the application's allocate rate. The only exception
  1039     // calculate the application's allocate rate. The only exception
  1052     // to that is humongous objects that are allocated separately. But
  1040     // to that is humongous objects that are allocated separately. But
  1053     // given that humongous object allocations do not really affect
  1041     // given that humongous object allocations do not really affect
  1054     // either the pause's duration nor when the next pause will take
  1042     // either the pause's duration nor when the next pause will take
  1055     // place we can safely ignore them here.
  1043     // place we can safely ignore them here.
  1056     uint regions_allocated = eden_cset_region_length();
  1044     uint regions_allocated = _collection_set->eden_region_length();
  1057     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1045     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1058     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1046     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1059 
  1047 
  1060     double interval_ms =
  1048     double interval_ms =
  1061       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1049       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1161     // RSet's length concurrently with other CR threads updating it
  1149     // RSet's length concurrently with other CR threads updating it
  1162     // which might cause it to calculate the length incorrectly (if,
  1150     // which might cause it to calculate the length incorrectly (if,
  1163     // say, it's in mid-coarsening). So I'll leave in the defensive
  1151     // say, it's in mid-coarsening). So I'll leave in the defensive
  1164     // conditional below just in case.
  1152     // conditional below just in case.
  1165     size_t rs_length_diff = 0;
  1153     size_t rs_length_diff = 0;
  1166     if (_max_rs_lengths > _recorded_rs_lengths) {
  1154     size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
  1167       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1155     if (_max_rs_lengths > recorded_rs_lengths) {
       
  1156       rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
  1168     }
  1157     }
  1169     _rs_length_diff_seq->add((double) rs_length_diff);
  1158     _rs_length_diff_seq->add((double) rs_length_diff);
  1170 
  1159 
  1171     size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
  1160     size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
  1172     size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
  1161     size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
  1173     double cost_per_byte_ms = 0.0;
  1162     double cost_per_byte_ms = 0.0;
  1174 
  1163 
  1175     if (copied_bytes > 0) {
  1164     if (copied_bytes > 0) {
  1176       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
  1165       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
  1177       if (collector_state()->in_marking_window()) {
  1166       if (collector_state()->in_marking_window()) {
  1179       } else {
  1168       } else {
  1180         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1169         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1181       }
  1170       }
  1182     }
  1171     }
  1183 
  1172 
  1184     if (young_cset_region_length() > 0) {
  1173     if (_collection_set->young_region_length() > 0) {
  1185       _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
  1174       _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
  1186                                                young_cset_region_length());
  1175                                                _collection_set->young_region_length());
  1187     }
  1176     }
  1188 
  1177 
  1189     if (old_cset_region_length() > 0) {
  1178     if (_collection_set->old_region_length() > 0) {
  1190       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
  1179       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
  1191                                                    old_cset_region_length());
  1180                                                    _collection_set->old_region_length());
  1192     }
  1181     }
  1193 
  1182 
  1194     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
  1183     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
  1195 
  1184 
  1196     _pending_cards_seq->add((double) _pending_cards);
  1185     _pending_cards_seq->add((double) _pending_cards);
  1497     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1486     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1498   } else {
  1487   } else {
  1499     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1488     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1500   }
  1489   }
  1501   return region_elapsed_time_ms;
  1490   return region_elapsed_time_ms;
  1502 }
       
  1503 
       
  1504 void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
       
  1505                                                  uint survivor_cset_region_length) {
       
  1506   _eden_cset_region_length     = eden_cset_region_length;
       
  1507   _survivor_cset_region_length = survivor_cset_region_length;
       
  1508   _old_cset_region_length      = 0;
       
  1509 }
       
  1510 
       
  1511 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
       
  1512   _recorded_rs_lengths = rs_lengths;
       
  1513 }
  1491 }
  1514 
  1492 
  1515 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1493 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1516                                                double elapsed_ms) {
  1494                                                double elapsed_ms) {
  1517   _recent_gc_times_ms->add(elapsed_ms);
  1495   _recent_gc_times_ms->add(elapsed_ms);
  1816   _prev_collection_pause_end_ms += elapsed_time_ms;
  1794   _prev_collection_pause_end_ms += elapsed_time_ms;
  1817 
  1795 
  1818   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
  1796   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
  1819 }
  1797 }
  1820 
  1798 
  1821 // Add the heap region at the head of the non-incremental collection set
       
  1822 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
       
  1823   assert(_inc_cset_build_state == Active, "Precondition");
       
  1824   assert(hr->is_old(), "the region should be old");
       
  1825 
       
  1826   assert(!hr->in_collection_set(), "should not already be in the CSet");
       
  1827   _g1->register_old_region_with_cset(hr);
       
  1828   hr->set_next_in_collection_set(_collection_set);
       
  1829   _collection_set = hr;
       
  1830   _collection_set_bytes_used_before += hr->used();
       
  1831   size_t rs_length = hr->rem_set()->occupied();
       
  1832   _recorded_rs_lengths += rs_length;
       
  1833   _old_cset_region_length += 1;
       
  1834 }
       
  1835 
       
  1836 // Initialize the per-collection-set information
       
  1837 void G1CollectorPolicy::start_incremental_cset_building() {
       
  1838   assert(_inc_cset_build_state == Inactive, "Precondition");
       
  1839 
       
  1840   _inc_cset_head = NULL;
       
  1841   _inc_cset_tail = NULL;
       
  1842   _inc_cset_bytes_used_before = 0;
       
  1843 
       
  1844   _inc_cset_recorded_rs_lengths = 0;
       
  1845   _inc_cset_recorded_rs_lengths_diffs = 0;
       
  1846   _inc_cset_predicted_elapsed_time_ms = 0.0;
       
  1847   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
       
  1848   _inc_cset_build_state = Active;
       
  1849 }
       
  1850 
       
  1851 void G1CollectorPolicy::finalize_incremental_cset_building() {
       
  1852   assert(_inc_cset_build_state == Active, "Precondition");
       
  1853   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
       
  1854 
       
  1855   // The two "main" fields, _inc_cset_recorded_rs_lengths and
       
  1856   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
       
  1857   // that adds a new region to the CSet. Further updates by the
       
  1858   // concurrent refinement thread that samples the young RSet lengths
       
  1859   // are accumulated in the *_diffs fields. Here we add the diffs to
       
  1860   // the "main" fields.
       
  1861 
       
  1862   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
       
  1863     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
       
  1864   } else {
       
  1865     // This is defensive. The diff should in theory be always positive
       
  1866     // as RSets can only grow between GCs. However, given that we
       
  1867     // sample their size concurrently with other threads updating them
       
  1868     // it's possible that we might get the wrong size back, which
       
  1869     // could make the calculations somewhat inaccurate.
       
  1870     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
       
  1871     if (_inc_cset_recorded_rs_lengths >= diffs) {
       
  1872       _inc_cset_recorded_rs_lengths -= diffs;
       
  1873     } else {
       
  1874       _inc_cset_recorded_rs_lengths = 0;
       
  1875     }
       
  1876   }
       
  1877   _inc_cset_predicted_elapsed_time_ms +=
       
  1878                                      _inc_cset_predicted_elapsed_time_ms_diffs;
       
  1879 
       
  1880   _inc_cset_recorded_rs_lengths_diffs = 0;
       
  1881   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
       
  1882 }
       
  1883 
       
  1884 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
       
  1885   // This routine is used when:
       
  1886   // * adding survivor regions to the incremental cset at the end of an
       
  1887   //   evacuation pause,
       
  1888   // * adding the current allocation region to the incremental cset
       
  1889   //   when it is retired, and
       
  1890   // * updating existing policy information for a region in the
       
  1891   //   incremental cset via young list RSet sampling.
       
  1892   // Therefore this routine may be called at a safepoint by the
       
  1893   // VM thread, or in-between safepoints by mutator threads (when
       
  1894   // retiring the current allocation region) or a concurrent
       
  1895   // refine thread (RSet sampling).
       
  1896 
       
  1897   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
       
  1898   size_t used_bytes = hr->used();
       
  1899   _inc_cset_recorded_rs_lengths += rs_length;
       
  1900   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
       
  1901   _inc_cset_bytes_used_before += used_bytes;
       
  1902 
       
  1903   // Cache the values we have added to the aggregated information
       
  1904   // in the heap region in case we have to remove this region from
       
  1905   // the incremental collection set, or it is updated by the
       
  1906   // rset sampling code
       
  1907   hr->set_recorded_rs_length(rs_length);
       
  1908   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
       
  1909 }
       
  1910 
       
  1911 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
       
  1912                                                      size_t new_rs_length) {
       
  1913   // Update the CSet information that is dependent on the new RS length
       
  1914   assert(hr->is_young(), "Precondition");
       
  1915   assert(!SafepointSynchronize::is_at_safepoint(),
       
  1916                                                "should not be at a safepoint");
       
  1917 
       
  1918   // We could have updated _inc_cset_recorded_rs_lengths and
       
  1919   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
       
  1920   // that atomically, as this code is executed by a concurrent
       
  1921   // refinement thread, potentially concurrently with a mutator thread
       
  1922   // allocating a new region and also updating the same fields. To
       
  1923   // avoid the atomic operations we accumulate these updates on two
       
  1924   // separate fields (*_diffs) and we'll just add them to the "main"
       
  1925   // fields at the start of a GC.
       
  1926 
       
  1927   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
       
  1928   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
       
  1929   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
       
  1930 
       
  1931   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
       
  1932   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
       
  1933   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
       
  1934   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
       
  1935 
       
  1936   hr->set_recorded_rs_length(new_rs_length);
       
  1937   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
       
  1938 }
       
  1939 
       
  1940 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
       
  1941   assert(hr->is_young(), "invariant");
       
  1942   assert(hr->young_index_in_cset() > -1, "should have already been set");
       
  1943   assert(_inc_cset_build_state == Active, "Precondition");
       
  1944 
       
  1945   // We need to clear and set the cached recorded/cached collection set
       
  1946   // information in the heap region here (before the region gets added
       
  1947   // to the collection set). An individual heap region's cached values
       
  1948   // are calculated, aggregated with the policy collection set info,
       
  1949   // and cached in the heap region here (initially) and (subsequently)
       
  1950   // by the Young List sampling code.
       
  1951 
       
  1952   size_t rs_length = hr->rem_set()->occupied();
       
  1953   add_to_incremental_cset_info(hr, rs_length);
       
  1954 
       
  1955   assert(!hr->in_collection_set(), "invariant");
       
  1956   _g1->register_young_region_with_cset(hr);
       
  1957   assert(hr->next_in_collection_set() == NULL, "invariant");
       
  1958 }
       
  1959 
       
  1960 // Add the region at the RHS of the incremental cset
       
  1961 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
       
  1962   // We should only ever be appending survivors at the end of a pause
       
  1963   assert(hr->is_survivor(), "Logic");
       
  1964 
       
  1965   // Do the 'common' stuff
       
  1966   add_region_to_incremental_cset_common(hr);
       
  1967 
       
  1968   // Now add the region at the right hand side
       
  1969   if (_inc_cset_tail == NULL) {
       
  1970     assert(_inc_cset_head == NULL, "invariant");
       
  1971     _inc_cset_head = hr;
       
  1972   } else {
       
  1973     _inc_cset_tail->set_next_in_collection_set(hr);
       
  1974   }
       
  1975   _inc_cset_tail = hr;
       
  1976 }
       
  1977 
       
  1978 // Add the region to the LHS of the incremental cset
       
  1979 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
       
  1980   // Survivors should be added to the RHS at the end of a pause
       
  1981   assert(hr->is_eden(), "Logic");
       
  1982 
       
  1983   // Do the 'common' stuff
       
  1984   add_region_to_incremental_cset_common(hr);
       
  1985 
       
  1986   // Add the region at the left hand side
       
  1987   hr->set_next_in_collection_set(_inc_cset_head);
       
  1988   if (_inc_cset_head == NULL) {
       
  1989     assert(_inc_cset_tail == NULL, "Invariant");
       
  1990     _inc_cset_tail = hr;
       
  1991   }
       
  1992   _inc_cset_head = hr;
       
  1993 }
       
  1994 
       
  1995 #ifndef PRODUCT
       
  1996 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
       
  1997   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
       
  1998 
       
  1999   st->print_cr("\nCollection_set:");
       
  2000   HeapRegion* csr = list_head;
       
  2001   while (csr != NULL) {
       
  2002     HeapRegion* next = csr->next_in_collection_set();
       
  2003     assert(csr->in_collection_set(), "bad CS");
       
  2004     st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
       
  2005                  HR_FORMAT_PARAMS(csr),
       
  2006                  p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
       
  2007                  csr->age_in_surv_rate_group_cond());
       
  2008     csr = next;
       
  2009   }
       
  2010 }
       
  2011 #endif // !PRODUCT
       
  2012 
       
  2013 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
  1799 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
  2014   // Returns the given amount of reclaimable bytes (that represents
  1800   // Returns the given amount of reclaimable bytes (that represents
  2015   // the amount of reclaimable space still to be collected) as a
  1801   // the amount of reclaimable space still to be collected) as a
  2016   // percentage of the current heap capacity.
  1802   // percentage of the current heap capacity.
  2017   size_t capacity_bytes = _g1->capacity();
  1803   size_t capacity_bytes = _g1->capacity();
  2137     result += 1;
  1923     result += 1;
  2138   }
  1924   }
  2139   return (uint) result;
  1925   return (uint) result;
  2140 }
  1926 }
  2141 
  1927 
  2142 
  1928 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) {
  2143 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
  1929   double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
  2144   double young_start_time_sec = os::elapsedTime();
  1930   _collection_set->finalize_old_part(time_remaining_ms);
  2145 
  1931 }
  2146   YoungList* young_list = _g1->young_list();
  1932 
  2147   finalize_incremental_cset_building();
       
  2148 
       
  2149   guarantee(target_pause_time_ms > 0.0,
       
  2150             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
       
  2151   guarantee(_collection_set == NULL, "Precondition");
       
  2152 
       
  2153   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
       
  2154   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
       
  2155 
       
  2156   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
       
  2157                             _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
       
  2158 
       
  2159   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
       
  2160 
       
  2161   // The young list is laid with the survivor regions from the previous
       
  2162   // pause are appended to the RHS of the young list, i.e.
       
  2163   //   [Newly Young Regions ++ Survivors from last pause].
       
  2164 
       
  2165   uint survivor_region_length = young_list->survivor_length();
       
  2166   uint eden_region_length = young_list->eden_length();
       
  2167   init_cset_region_lengths(eden_region_length, survivor_region_length);
       
  2168 
       
  2169   HeapRegion* hr = young_list->first_survivor_region();
       
  2170   while (hr != NULL) {
       
  2171     assert(hr->is_survivor(), "badly formed young list");
       
  2172     // There is a convention that all the young regions in the CSet
       
  2173     // are tagged as "eden", so we do this for the survivors here. We
       
  2174     // use the special set_eden_pre_gc() as it doesn't check that the
       
  2175     // region is free (which is not the case here).
       
  2176     hr->set_eden_pre_gc();
       
  2177     hr = hr->get_next_young_region();
       
  2178   }
       
  2179 
       
  2180   // Clear the fields that point to the survivor list - they are all young now.
       
  2181   young_list->clear_survivors();
       
  2182 
       
  2183   _collection_set = _inc_cset_head;
       
  2184   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
       
  2185   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
       
  2186 
       
  2187   log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
       
  2188                             eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms);
       
  2189 
       
  2190   // The number of recorded young regions is the incremental
       
  2191   // collection set's current size
       
  2192   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
       
  2193 
       
  2194   double young_end_time_sec = os::elapsedTime();
       
  2195   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
       
  2196 
       
  2197   return time_remaining_ms;
       
  2198 }
       
  2199 
       
  2200 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
       
  2201   double non_young_start_time_sec = os::elapsedTime();
       
  2202   double predicted_old_time_ms = 0.0;
       
  2203 
       
  2204 
       
  2205   if (!collector_state()->gcs_are_young()) {
       
  2206     cset_chooser()->verify();
       
  2207     const uint min_old_cset_length = calc_min_old_cset_length();
       
  2208     const uint max_old_cset_length = calc_max_old_cset_length();
       
  2209 
       
  2210     uint expensive_region_num = 0;
       
  2211     bool check_time_remaining = adaptive_young_list_length();
       
  2212 
       
  2213     HeapRegion* hr = cset_chooser()->peek();
       
  2214     while (hr != NULL) {
       
  2215       if (old_cset_region_length() >= max_old_cset_length) {
       
  2216         // Added maximum number of old regions to the CSet.
       
  2217         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
       
  2218                                   old_cset_region_length(), max_old_cset_length);
       
  2219         break;
       
  2220       }
       
  2221 
       
  2222 
       
  2223       // Stop adding regions if the remaining reclaimable space is
       
  2224       // not above G1HeapWastePercent.
       
  2225       size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
       
  2226       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
       
  2227       double threshold = (double) G1HeapWastePercent;
       
  2228       if (reclaimable_perc <= threshold) {
       
  2229         // We've added enough old regions that the amount of uncollected
       
  2230         // reclaimable space is at or below the waste threshold. Stop
       
  2231         // adding old regions to the CSet.
       
  2232         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
       
  2233                                   "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
       
  2234                                   old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
       
  2235         break;
       
  2236       }
       
  2237 
       
  2238       double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
       
  2239       if (check_time_remaining) {
       
  2240         if (predicted_time_ms > time_remaining_ms) {
       
  2241           // Too expensive for the current CSet.
       
  2242 
       
  2243           if (old_cset_region_length() >= min_old_cset_length) {
       
  2244             // We have added the minimum number of old regions to the CSet,
       
  2245             // we are done with this CSet.
       
  2246             log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
       
  2247                                       "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
       
  2248                                       predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length);
       
  2249             break;
       
  2250           }
       
  2251 
       
  2252           // We'll add it anyway given that we haven't reached the
       
  2253           // minimum number of old regions.
       
  2254           expensive_region_num += 1;
       
  2255         }
       
  2256       } else {
       
  2257         if (old_cset_region_length() >= min_old_cset_length) {
       
  2258           // In the non-auto-tuning case, we'll finish adding regions
       
  2259           // to the CSet if we reach the minimum.
       
  2260 
       
  2261           log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
       
  2262                                     old_cset_region_length(), min_old_cset_length);
       
  2263           break;
       
  2264         }
       
  2265       }
       
  2266 
       
  2267       // We will add this region to the CSet.
       
  2268       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
       
  2269       predicted_old_time_ms += predicted_time_ms;
       
  2270       cset_chooser()->pop(); // already have region via peek()
       
  2271       _g1->old_set_remove(hr);
       
  2272       add_old_region_to_cset(hr);
       
  2273 
       
  2274       hr = cset_chooser()->peek();
       
  2275     }
       
  2276     if (hr == NULL) {
       
  2277       log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
       
  2278     }
       
  2279 
       
  2280     if (expensive_region_num > 0) {
       
  2281       // We print the information once here at the end, predicated on
       
  2282       // whether we added any apparently expensive regions or not, to
       
  2283       // avoid generating output per region.
       
  2284       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
       
  2285                                 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
       
  2286                                 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
       
  2287     }
       
  2288 
       
  2289     cset_chooser()->verify();
       
  2290   }
       
  2291 
       
  2292   stop_incremental_cset_building();
       
  2293 
       
  2294   log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
       
  2295                             old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
       
  2296 
       
  2297   double non_young_end_time_sec = os::elapsedTime();
       
  2298   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
       
  2299 }