author | iveresov |
Fri, 06 Mar 2009 13:50:14 -0800 | |
changeset 2142 | 032f4652700c |
parent 2121 | 0b899b36d991 |
child 2154 | 72a9b7284ccf |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
2 |
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// A G1CollectorPolicy makes policy decisions that determine the |
|
26 |
// characteristics of the collector. Examples include: |
|
27 |
// * choice of collection set. |
|
28 |
// * when to collect. |
|
29 |
||
30 |
class HeapRegion; |
|
31 |
class CollectionSetChooser; |
|
32 |
||
33 |
// Yes, this is a bit unpleasant... but it saves replicating the same thing |
|
34 |
// over and over again and introducing subtle problems through small typos and |
|
35 |
// cutting and pasting mistakes. The macros below introduces a number |
|
36 |
// sequnce into the following two classes and the methods that access it. |
|
37 |
||
38 |
#define define_num_seq(name) \ |
|
39 |
private: \ |
|
40 |
NumberSeq _all_##name##_times_ms; \ |
|
41 |
public: \ |
|
42 |
void record_##name##_time_ms(double ms) { \ |
|
43 |
_all_##name##_times_ms.add(ms); \ |
|
44 |
} \ |
|
45 |
NumberSeq* get_##name##_seq() { \ |
|
46 |
return &_all_##name##_times_ms; \ |
|
47 |
} |
|
48 |
||
49 |
class MainBodySummary; |
|
50 |
class PopPreambleSummary; |
|
51 |
||
2013
49e915da0905
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
2009
diff
changeset
|
52 |
class PauseSummary: public CHeapObj { |
1374 | 53 |
define_num_seq(total) |
54 |
define_num_seq(other) |
|
55 |
||
56 |
public: |
|
57 |
virtual MainBodySummary* main_body_summary() { return NULL; } |
|
58 |
virtual PopPreambleSummary* pop_preamble_summary() { return NULL; } |
|
59 |
}; |
|
60 |
||
2013
49e915da0905
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
2009
diff
changeset
|
61 |
class MainBodySummary: public CHeapObj { |
1374 | 62 |
define_num_seq(satb_drain) // optional |
63 |
define_num_seq(parallel) // parallel only |
|
64 |
define_num_seq(ext_root_scan) |
|
65 |
define_num_seq(mark_stack_scan) |
|
66 |
define_num_seq(scan_only) |
|
67 |
define_num_seq(update_rs) |
|
68 |
define_num_seq(scan_rs) |
|
69 |
define_num_seq(scan_new_refs) // Only for temp use; added to |
|
70 |
// in parallel case. |
|
71 |
define_num_seq(obj_copy) |
|
72 |
define_num_seq(termination) // parallel only |
|
73 |
define_num_seq(parallel_other) // parallel only |
|
74 |
define_num_seq(mark_closure) |
|
75 |
define_num_seq(clear_ct) // parallel only |
|
76 |
}; |
|
77 |
||
2013
49e915da0905
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
2009
diff
changeset
|
78 |
class PopPreambleSummary: public CHeapObj { |
1374 | 79 |
define_num_seq(pop_preamble) |
80 |
define_num_seq(pop_update_rs) |
|
81 |
define_num_seq(pop_scan_rs) |
|
82 |
define_num_seq(pop_closure_app) |
|
83 |
define_num_seq(pop_evacuation) |
|
84 |
define_num_seq(pop_other) |
|
85 |
}; |
|
86 |
||
87 |
class NonPopSummary: public PauseSummary, |
|
88 |
public MainBodySummary { |
|
89 |
public: |
|
90 |
virtual MainBodySummary* main_body_summary() { return this; } |
|
91 |
}; |
|
92 |
||
93 |
class PopSummary: public PauseSummary, |
|
94 |
public MainBodySummary, |
|
95 |
public PopPreambleSummary { |
|
96 |
public: |
|
97 |
virtual MainBodySummary* main_body_summary() { return this; } |
|
98 |
virtual PopPreambleSummary* pop_preamble_summary() { return this; } |
|
99 |
}; |
|
100 |
||
101 |
class NonPopAbandonedSummary: public PauseSummary { |
|
102 |
}; |
|
103 |
||
104 |
class PopAbandonedSummary: public PauseSummary, |
|
105 |
public PopPreambleSummary { |
|
106 |
public: |
|
107 |
virtual PopPreambleSummary* pop_preamble_summary() { return this; } |
|
108 |
}; |
|
109 |
||
110 |
class G1CollectorPolicy: public CollectorPolicy { |
|
111 |
protected: |
|
112 |
// The number of pauses during the execution. |
|
113 |
long _n_pauses; |
|
114 |
||
115 |
// either equal to the number of parallel threads, if ParallelGCThreads |
|
116 |
// has been set, or 1 otherwise |
|
117 |
int _parallel_gc_threads; |
|
118 |
||
119 |
enum SomePrivateConstants { |
|
120 |
NumPrevPausesForHeuristics = 10, |
|
121 |
NumPrevGCsForHeuristics = 10, |
|
122 |
NumAPIs = HeapRegion::MaxAge |
|
123 |
}; |
|
124 |
||
125 |
G1MMUTracker* _mmu_tracker; |
|
126 |
||
127 |
void initialize_flags(); |
|
128 |
||
129 |
void initialize_all() { |
|
130 |
initialize_flags(); |
|
131 |
initialize_size_info(); |
|
132 |
initialize_perm_generation(PermGen::MarkSweepCompact); |
|
133 |
} |
|
134 |
||
135 |
virtual size_t default_init_heap_size() { |
|
136 |
// Pick some reasonable default. |
|
137 |
return 8*M; |
|
138 |
} |
|
139 |
||
140 |
||
141 |
double _cur_collection_start_sec; |
|
142 |
size_t _cur_collection_pause_used_at_start_bytes; |
|
143 |
size_t _cur_collection_pause_used_regions_at_start; |
|
144 |
size_t _prev_collection_pause_used_at_end_bytes; |
|
145 |
double _cur_collection_par_time_ms; |
|
146 |
double _cur_satb_drain_time_ms; |
|
147 |
double _cur_clear_ct_time_ms; |
|
148 |
bool _satb_drain_time_set; |
|
149 |
double _cur_popular_preamble_start_ms; |
|
150 |
double _cur_popular_preamble_time_ms; |
|
151 |
double _cur_popular_compute_rc_time_ms; |
|
152 |
double _cur_popular_evac_time_ms; |
|
153 |
||
154 |
double _cur_CH_strong_roots_end_sec; |
|
155 |
double _cur_CH_strong_roots_dur_ms; |
|
156 |
double _cur_G1_strong_roots_end_sec; |
|
157 |
double _cur_G1_strong_roots_dur_ms; |
|
158 |
||
159 |
// Statistics for recent GC pauses. See below for how indexed. |
|
160 |
TruncatedSeq* _recent_CH_strong_roots_times_ms; |
|
161 |
TruncatedSeq* _recent_G1_strong_roots_times_ms; |
|
162 |
TruncatedSeq* _recent_evac_times_ms; |
|
163 |
// These exclude marking times. |
|
164 |
TruncatedSeq* _recent_pause_times_ms; |
|
165 |
TruncatedSeq* _recent_gc_times_ms; |
|
166 |
||
167 |
TruncatedSeq* _recent_CS_bytes_used_before; |
|
168 |
TruncatedSeq* _recent_CS_bytes_surviving; |
|
169 |
||
170 |
TruncatedSeq* _recent_rs_sizes; |
|
171 |
||
172 |
TruncatedSeq* _concurrent_mark_init_times_ms; |
|
173 |
TruncatedSeq* _concurrent_mark_remark_times_ms; |
|
174 |
TruncatedSeq* _concurrent_mark_cleanup_times_ms; |
|
175 |
||
176 |
NonPopSummary* _non_pop_summary; |
|
177 |
PopSummary* _pop_summary; |
|
178 |
NonPopAbandonedSummary* _non_pop_abandoned_summary; |
|
179 |
PopAbandonedSummary* _pop_abandoned_summary; |
|
180 |
||
181 |
NumberSeq* _all_pause_times_ms; |
|
182 |
NumberSeq* _all_full_gc_times_ms; |
|
183 |
double _stop_world_start; |
|
184 |
NumberSeq* _all_stop_world_times_ms; |
|
185 |
NumberSeq* _all_yield_times_ms; |
|
186 |
||
187 |
size_t _region_num_young; |
|
188 |
size_t _region_num_tenured; |
|
189 |
size_t _prev_region_num_young; |
|
190 |
size_t _prev_region_num_tenured; |
|
191 |
||
192 |
NumberSeq* _all_mod_union_times_ms; |
|
193 |
||
194 |
int _aux_num; |
|
195 |
NumberSeq* _all_aux_times_ms; |
|
196 |
double* _cur_aux_start_times_ms; |
|
197 |
double* _cur_aux_times_ms; |
|
198 |
bool* _cur_aux_times_set; |
|
199 |
||
200 |
double* _par_last_ext_root_scan_times_ms; |
|
201 |
double* _par_last_mark_stack_scan_times_ms; |
|
202 |
double* _par_last_scan_only_times_ms; |
|
203 |
double* _par_last_scan_only_regions_scanned; |
|
204 |
double* _par_last_update_rs_start_times_ms; |
|
205 |
double* _par_last_update_rs_times_ms; |
|
206 |
double* _par_last_update_rs_processed_buffers; |
|
207 |
double* _par_last_scan_rs_start_times_ms; |
|
208 |
double* _par_last_scan_rs_times_ms; |
|
209 |
double* _par_last_scan_new_refs_times_ms; |
|
210 |
double* _par_last_obj_copy_times_ms; |
|
211 |
double* _par_last_termination_times_ms; |
|
212 |
||
213 |
// there are two pases during popular pauses, so we need to store |
|
214 |
// somewhere the results of the first pass |
|
215 |
double* _pop_par_last_update_rs_start_times_ms; |
|
216 |
double* _pop_par_last_update_rs_times_ms; |
|
217 |
double* _pop_par_last_update_rs_processed_buffers; |
|
218 |
double* _pop_par_last_scan_rs_start_times_ms; |
|
219 |
double* _pop_par_last_scan_rs_times_ms; |
|
220 |
double* _pop_par_last_closure_app_times_ms; |
|
221 |
||
222 |
double _pop_compute_rc_start; |
|
223 |
double _pop_evac_start; |
|
224 |
||
225 |
// indicates that we are in young GC mode |
|
226 |
bool _in_young_gc_mode; |
|
227 |
||
228 |
// indicates whether we are in full young or partially young GC mode |
|
229 |
bool _full_young_gcs; |
|
230 |
||
231 |
// if true, then it tries to dynamically adjust the length of the |
|
232 |
// young list |
|
233 |
bool _adaptive_young_list_length; |
|
234 |
size_t _young_list_min_length; |
|
235 |
size_t _young_list_target_length; |
|
236 |
size_t _young_list_so_prefix_length; |
|
237 |
size_t _young_list_fixed_length; |
|
238 |
||
239 |
size_t _young_cset_length; |
|
240 |
bool _last_young_gc_full; |
|
241 |
||
242 |
double _target_pause_time_ms; |
|
243 |
||
244 |
unsigned _full_young_pause_num; |
|
245 |
unsigned _partial_young_pause_num; |
|
246 |
||
247 |
bool _during_marking; |
|
248 |
bool _in_marking_window; |
|
249 |
bool _in_marking_window_im; |
|
250 |
||
251 |
SurvRateGroup* _short_lived_surv_rate_group; |
|
252 |
SurvRateGroup* _survivor_surv_rate_group; |
|
253 |
// add here any more surv rate groups |
|
254 |
||
255 |
bool during_marking() { |
|
256 |
return _during_marking; |
|
257 |
} |
|
258 |
||
259 |
// <NEW PREDICTION> |
|
260 |
||
261 |
private: |
|
262 |
enum PredictionConstants { |
|
263 |
TruncatedSeqLength = 10 |
|
264 |
}; |
|
265 |
||
266 |
TruncatedSeq* _alloc_rate_ms_seq; |
|
267 |
double _prev_collection_pause_end_ms; |
|
268 |
||
269 |
TruncatedSeq* _pending_card_diff_seq; |
|
270 |
TruncatedSeq* _rs_length_diff_seq; |
|
271 |
TruncatedSeq* _cost_per_card_ms_seq; |
|
272 |
TruncatedSeq* _cost_per_scan_only_region_ms_seq; |
|
273 |
TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; |
|
274 |
TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; |
|
275 |
TruncatedSeq* _cost_per_entry_ms_seq; |
|
276 |
TruncatedSeq* _partially_young_cost_per_entry_ms_seq; |
|
277 |
TruncatedSeq* _cost_per_byte_ms_seq; |
|
278 |
TruncatedSeq* _constant_other_time_ms_seq; |
|
279 |
TruncatedSeq* _young_other_cost_per_region_ms_seq; |
|
280 |
TruncatedSeq* _non_young_other_cost_per_region_ms_seq; |
|
281 |
||
282 |
TruncatedSeq* _pending_cards_seq; |
|
283 |
TruncatedSeq* _scanned_cards_seq; |
|
284 |
TruncatedSeq* _rs_lengths_seq; |
|
285 |
||
286 |
TruncatedSeq* _cost_per_byte_ms_during_cm_seq; |
|
287 |
TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq; |
|
288 |
||
289 |
TruncatedSeq* _young_gc_eff_seq; |
|
290 |
||
291 |
TruncatedSeq* _max_conc_overhead_seq; |
|
292 |
||
293 |
size_t _recorded_young_regions; |
|
294 |
size_t _recorded_scan_only_regions; |
|
295 |
size_t _recorded_non_young_regions; |
|
296 |
size_t _recorded_region_num; |
|
297 |
||
298 |
size_t _free_regions_at_end_of_collection; |
|
299 |
size_t _scan_only_regions_at_end_of_collection; |
|
300 |
||
301 |
size_t _recorded_rs_lengths; |
|
302 |
size_t _max_rs_lengths; |
|
303 |
||
304 |
size_t _recorded_marked_bytes; |
|
305 |
size_t _recorded_young_bytes; |
|
306 |
||
307 |
size_t _predicted_pending_cards; |
|
308 |
size_t _predicted_cards_scanned; |
|
309 |
size_t _predicted_rs_lengths; |
|
310 |
size_t _predicted_bytes_to_copy; |
|
311 |
||
312 |
double _predicted_survival_ratio; |
|
313 |
double _predicted_rs_update_time_ms; |
|
314 |
double _predicted_rs_scan_time_ms; |
|
315 |
double _predicted_scan_only_scan_time_ms; |
|
316 |
double _predicted_object_copy_time_ms; |
|
317 |
double _predicted_constant_other_time_ms; |
|
318 |
double _predicted_young_other_time_ms; |
|
319 |
double _predicted_non_young_other_time_ms; |
|
320 |
double _predicted_pause_time_ms; |
|
321 |
||
322 |
double _vtime_diff_ms; |
|
323 |
||
324 |
double _recorded_young_free_cset_time_ms; |
|
325 |
double _recorded_non_young_free_cset_time_ms; |
|
326 |
||
327 |
double _sigma; |
|
328 |
double _expensive_region_limit_ms; |
|
329 |
||
330 |
size_t _rs_lengths_prediction; |
|
331 |
||
332 |
size_t _known_garbage_bytes; |
|
333 |
double _known_garbage_ratio; |
|
334 |
||
335 |
double sigma() { |
|
336 |
return _sigma; |
|
337 |
} |
|
338 |
||
339 |
// A function that prevents us putting too much stock in small sample |
|
340 |
// sets. Returns a number between 2.0 and 1.0, depending on the number |
|
341 |
// of samples. 5 or more samples yields one; fewer scales linearly from |
|
342 |
// 2.0 at 1 sample to 1.0 at 5. |
|
343 |
double confidence_factor(int samples) { |
|
344 |
if (samples > 4) return 1.0; |
|
345 |
else return 1.0 + sigma() * ((double)(5 - samples))/2.0; |
|
346 |
} |
|
347 |
||
348 |
double get_new_neg_prediction(TruncatedSeq* seq) { |
|
349 |
return seq->davg() - sigma() * seq->dsd(); |
|
350 |
} |
|
351 |
||
352 |
#ifndef PRODUCT |
|
353 |
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); |
|
354 |
#endif // PRODUCT |
|
355 |
||
356 |
protected: |
|
357 |
double _pause_time_target_ms; |
|
358 |
double _recorded_young_cset_choice_time_ms; |
|
359 |
double _recorded_non_young_cset_choice_time_ms; |
|
360 |
bool _within_target; |
|
361 |
size_t _pending_cards; |
|
362 |
size_t _max_pending_cards; |
|
363 |
||
364 |
public: |
|
365 |
||
366 |
void set_region_short_lived(HeapRegion* hr) { |
|
367 |
hr->install_surv_rate_group(_short_lived_surv_rate_group); |
|
368 |
} |
|
369 |
||
370 |
void set_region_survivors(HeapRegion* hr) { |
|
371 |
hr->install_surv_rate_group(_survivor_surv_rate_group); |
|
372 |
} |
|
373 |
||
374 |
#ifndef PRODUCT |
|
375 |
bool verify_young_ages(); |
|
376 |
#endif // PRODUCT |
|
377 |
||
378 |
void tag_scan_only(size_t short_lived_scan_only_length); |
|
379 |
||
380 |
double get_new_prediction(TruncatedSeq* seq) { |
|
381 |
return MAX2(seq->davg() + sigma() * seq->dsd(), |
|
382 |
seq->davg() * confidence_factor(seq->num())); |
|
383 |
} |
|
384 |
||
385 |
size_t young_cset_length() { |
|
386 |
return _young_cset_length; |
|
387 |
} |
|
388 |
||
389 |
void record_max_rs_lengths(size_t rs_lengths) { |
|
390 |
_max_rs_lengths = rs_lengths; |
|
391 |
} |
|
392 |
||
393 |
size_t predict_pending_card_diff() { |
|
394 |
double prediction = get_new_neg_prediction(_pending_card_diff_seq); |
|
395 |
if (prediction < 0.00001) |
|
396 |
return 0; |
|
397 |
else |
|
398 |
return (size_t) prediction; |
|
399 |
} |
|
400 |
||
401 |
size_t predict_pending_cards() { |
|
402 |
size_t max_pending_card_num = _g1->max_pending_card_num(); |
|
403 |
size_t diff = predict_pending_card_diff(); |
|
404 |
size_t prediction; |
|
405 |
if (diff > max_pending_card_num) |
|
406 |
prediction = max_pending_card_num; |
|
407 |
else |
|
408 |
prediction = max_pending_card_num - diff; |
|
409 |
||
410 |
return prediction; |
|
411 |
} |
|
412 |
||
413 |
size_t predict_rs_length_diff() { |
|
414 |
return (size_t) get_new_prediction(_rs_length_diff_seq); |
|
415 |
} |
|
416 |
||
417 |
double predict_alloc_rate_ms() { |
|
418 |
return get_new_prediction(_alloc_rate_ms_seq); |
|
419 |
} |
|
420 |
||
421 |
double predict_cost_per_card_ms() { |
|
422 |
return get_new_prediction(_cost_per_card_ms_seq); |
|
423 |
} |
|
424 |
||
425 |
double predict_rs_update_time_ms(size_t pending_cards) { |
|
426 |
return (double) pending_cards * predict_cost_per_card_ms(); |
|
427 |
} |
|
428 |
||
429 |
double predict_fully_young_cards_per_entry_ratio() { |
|
430 |
return get_new_prediction(_fully_young_cards_per_entry_ratio_seq); |
|
431 |
} |
|
432 |
||
433 |
double predict_partially_young_cards_per_entry_ratio() { |
|
434 |
if (_partially_young_cards_per_entry_ratio_seq->num() < 2) |
|
435 |
return predict_fully_young_cards_per_entry_ratio(); |
|
436 |
else |
|
437 |
return get_new_prediction(_partially_young_cards_per_entry_ratio_seq); |
|
438 |
} |
|
439 |
||
440 |
size_t predict_young_card_num(size_t rs_length) { |
|
441 |
return (size_t) ((double) rs_length * |
|
442 |
predict_fully_young_cards_per_entry_ratio()); |
|
443 |
} |
|
444 |
||
445 |
size_t predict_non_young_card_num(size_t rs_length) { |
|
446 |
return (size_t) ((double) rs_length * |
|
447 |
predict_partially_young_cards_per_entry_ratio()); |
|
448 |
} |
|
449 |
||
450 |
double predict_rs_scan_time_ms(size_t card_num) { |
|
451 |
if (full_young_gcs()) |
|
452 |
return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); |
|
453 |
else |
|
454 |
return predict_partially_young_rs_scan_time_ms(card_num); |
|
455 |
} |
|
456 |
||
457 |
double predict_partially_young_rs_scan_time_ms(size_t card_num) { |
|
458 |
if (_partially_young_cost_per_entry_ms_seq->num() < 3) |
|
459 |
return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); |
|
460 |
else |
|
461 |
return (double) card_num * |
|
462 |
get_new_prediction(_partially_young_cost_per_entry_ms_seq); |
|
463 |
} |
|
464 |
||
465 |
double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) { |
|
466 |
if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3) |
|
467 |
return 1.5 * (double) scan_only_region_num * |
|
468 |
get_new_prediction(_cost_per_scan_only_region_ms_seq); |
|
469 |
else |
|
470 |
return (double) scan_only_region_num * |
|
471 |
get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq); |
|
472 |
} |
|
473 |
||
474 |
double predict_scan_only_time_ms(size_t scan_only_region_num) { |
|
475 |
if (_in_marking_window_im) |
|
476 |
return predict_scan_only_time_ms_during_cm(scan_only_region_num); |
|
477 |
else |
|
478 |
return (double) scan_only_region_num * |
|
479 |
get_new_prediction(_cost_per_scan_only_region_ms_seq); |
|
480 |
} |
|
481 |
||
482 |
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { |
|
483 |
if (_cost_per_byte_ms_during_cm_seq->num() < 3) |
|
484 |
return 1.1 * (double) bytes_to_copy * |
|
485 |
get_new_prediction(_cost_per_byte_ms_seq); |
|
486 |
else |
|
487 |
return (double) bytes_to_copy * |
|
488 |
get_new_prediction(_cost_per_byte_ms_during_cm_seq); |
|
489 |
} |
|
490 |
||
491 |
double predict_object_copy_time_ms(size_t bytes_to_copy) { |
|
492 |
if (_in_marking_window && !_in_marking_window_im) |
|
493 |
return predict_object_copy_time_ms_during_cm(bytes_to_copy); |
|
494 |
else |
|
495 |
return (double) bytes_to_copy * |
|
496 |
get_new_prediction(_cost_per_byte_ms_seq); |
|
497 |
} |
|
498 |
||
499 |
double predict_constant_other_time_ms() { |
|
500 |
return get_new_prediction(_constant_other_time_ms_seq); |
|
501 |
} |
|
502 |
||
503 |
double predict_young_other_time_ms(size_t young_num) { |
|
504 |
return |
|
505 |
(double) young_num * |
|
506 |
get_new_prediction(_young_other_cost_per_region_ms_seq); |
|
507 |
} |
|
508 |
||
509 |
double predict_non_young_other_time_ms(size_t non_young_num) { |
|
510 |
return |
|
511 |
(double) non_young_num * |
|
512 |
get_new_prediction(_non_young_other_cost_per_region_ms_seq); |
|
513 |
} |
|
514 |
||
515 |
void check_if_region_is_too_expensive(double predicted_time_ms); |
|
516 |
||
517 |
double predict_young_collection_elapsed_time_ms(size_t adjustment); |
|
518 |
double predict_base_elapsed_time_ms(size_t pending_cards); |
|
519 |
double predict_base_elapsed_time_ms(size_t pending_cards, |
|
520 |
size_t scanned_cards); |
|
521 |
size_t predict_bytes_to_copy(HeapRegion* hr); |
|
522 |
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); |
|
523 |
||
524 |
// for use by: calculate_optimal_so_length(length) |
|
525 |
void predict_gc_eff(size_t young_region_num, |
|
526 |
size_t so_length, |
|
527 |
double base_time_ms, |
|
528 |
double *gc_eff, |
|
529 |
double *pause_time_ms); |
|
530 |
||
531 |
// for use by: calculate_young_list_target_config(rs_length) |
|
532 |
bool predict_gc_eff(size_t young_region_num, |
|
533 |
size_t so_length, |
|
534 |
double base_time_with_so_ms, |
|
535 |
size_t init_free_regions, |
|
536 |
double target_pause_time_ms, |
|
537 |
double* gc_eff); |
|
538 |
||
539 |
void start_recording_regions(); |
|
540 |
void record_cset_region(HeapRegion* hr, bool young); |
|
541 |
void record_scan_only_regions(size_t scan_only_length); |
|
542 |
void end_recording_regions(); |
|
543 |
||
544 |
void record_vtime_diff_ms(double vtime_diff_ms) { |
|
545 |
_vtime_diff_ms = vtime_diff_ms; |
|
546 |
} |
|
547 |
||
548 |
void record_young_free_cset_time_ms(double time_ms) { |
|
549 |
_recorded_young_free_cset_time_ms = time_ms; |
|
550 |
} |
|
551 |
||
552 |
void record_non_young_free_cset_time_ms(double time_ms) { |
|
553 |
_recorded_non_young_free_cset_time_ms = time_ms; |
|
554 |
} |
|
555 |
||
556 |
double predict_young_gc_eff() { |
|
557 |
return get_new_neg_prediction(_young_gc_eff_seq); |
|
558 |
} |
|
559 |
||
2009 | 560 |
double predict_survivor_regions_evac_time(); |
561 |
||
1374 | 562 |
// </NEW PREDICTION> |
563 |
||
564 |
public: |
|
565 |
void cset_regions_freed() { |
|
566 |
bool propagate = _last_young_gc_full && !_in_marking_window; |
|
567 |
_short_lived_surv_rate_group->all_surviving_words_recorded(propagate); |
|
568 |
_survivor_surv_rate_group->all_surviving_words_recorded(propagate); |
|
569 |
// also call it on any more surv rate groups |
|
570 |
} |
|
571 |
||
572 |
void set_known_garbage_bytes(size_t known_garbage_bytes) { |
|
573 |
_known_garbage_bytes = known_garbage_bytes; |
|
574 |
size_t heap_bytes = _g1->capacity(); |
|
575 |
_known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; |
|
576 |
} |
|
577 |
||
578 |
void decrease_known_garbage_bytes(size_t known_garbage_bytes) { |
|
579 |
guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); |
|
580 |
||
581 |
_known_garbage_bytes -= known_garbage_bytes; |
|
582 |
size_t heap_bytes = _g1->capacity(); |
|
583 |
_known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; |
|
584 |
} |
|
585 |
||
586 |
G1MMUTracker* mmu_tracker() { |
|
587 |
return _mmu_tracker; |
|
588 |
} |
|
589 |
||
590 |
double predict_init_time_ms() { |
|
591 |
return get_new_prediction(_concurrent_mark_init_times_ms); |
|
592 |
} |
|
593 |
||
594 |
double predict_remark_time_ms() { |
|
595 |
return get_new_prediction(_concurrent_mark_remark_times_ms); |
|
596 |
} |
|
597 |
||
598 |
double predict_cleanup_time_ms() { |
|
599 |
return get_new_prediction(_concurrent_mark_cleanup_times_ms); |
|
600 |
} |
|
601 |
||
602 |
// Returns an estimate of the survival rate of the region at yg-age |
|
603 |
// "yg_age". |
|
2009 | 604 |
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { |
605 |
TruncatedSeq* seq = surv_rate_group->get_seq(age); |
|
1374 | 606 |
if (seq->num() == 0) |
607 |
gclog_or_tty->print("BARF! age is %d", age); |
|
608 |
guarantee( seq->num() > 0, "invariant" ); |
|
609 |
double pred = get_new_prediction(seq); |
|
610 |
if (pred > 1.0) |
|
611 |
pred = 1.0; |
|
612 |
return pred; |
|
613 |
} |
|
614 |
||
2009 | 615 |
double predict_yg_surv_rate(int age) { |
616 |
return predict_yg_surv_rate(age, _short_lived_surv_rate_group); |
|
617 |
} |
|
618 |
||
1374 | 619 |
double accum_yg_surv_rate_pred(int age) { |
620 |
return _short_lived_surv_rate_group->accum_surv_rate_pred(age); |
|
621 |
} |
|
622 |
||
623 |
protected: |
|
624 |
void print_stats (int level, const char* str, double value); |
|
625 |
void print_stats (int level, const char* str, int value); |
|
626 |
void print_par_stats (int level, const char* str, double* data) { |
|
627 |
print_par_stats(level, str, data, true); |
|
628 |
} |
|
629 |
void print_par_stats (int level, const char* str, double* data, bool summary); |
|
630 |
void print_par_buffers (int level, const char* str, double* data, bool summary); |
|
631 |
||
632 |
void check_other_times(int level, |
|
633 |
NumberSeq* other_times_ms, |
|
634 |
NumberSeq* calc_other_times_ms) const; |
|
635 |
||
636 |
void print_summary (PauseSummary* stats) const; |
|
637 |
void print_abandoned_summary(PauseSummary* non_pop_summary, |
|
638 |
PauseSummary* pop_summary) const; |
|
639 |
||
640 |
void print_summary (int level, const char* str, NumberSeq* seq) const; |
|
641 |
void print_summary_sd (int level, const char* str, NumberSeq* seq) const; |
|
642 |
||
643 |
double avg_value (double* data); |
|
644 |
double max_value (double* data); |
|
645 |
double sum_of_values (double* data); |
|
646 |
double max_sum (double* data1, double* data2); |
|
647 |
||
648 |
int _last_satb_drain_processed_buffers; |
|
649 |
int _last_update_rs_processed_buffers; |
|
650 |
double _last_pause_time_ms; |
|
651 |
||
652 |
size_t _bytes_in_to_space_before_gc; |
|
653 |
size_t _bytes_in_to_space_after_gc; |
|
654 |
size_t bytes_in_to_space_during_gc() { |
|
655 |
return |
|
656 |
_bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc; |
|
657 |
} |
|
658 |
size_t _bytes_in_collection_set_before_gc; |
|
659 |
// Used to count used bytes in CS. |
|
660 |
friend class CountCSClosure; |
|
661 |
||
662 |
// Statistics kept per GC stoppage, pause or full. |
|
663 |
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; |
|
664 |
||
665 |
// We track markings. |
|
666 |
int _num_markings; |
|
667 |
double _mark_thread_startup_sec; // Time at startup of marking thread |
|
668 |
||
669 |
// Add a new GC of the given duration and end time to the record. |
|
670 |
void update_recent_gc_times(double end_time_sec, double elapsed_ms); |
|
671 |
||
672 |
// The head of the list (via "next_in_collection_set()") representing the |
|
673 |
// current collection set. |
|
674 |
HeapRegion* _collection_set; |
|
675 |
size_t _collection_set_size; |
|
676 |
size_t _collection_set_bytes_used_before; |
|
677 |
||
678 |
// Info about marking. |
|
679 |
int _n_marks; // Sticky at 2, so we know when we've done at least 2. |
|
680 |
||
681 |
// The number of collection pauses at the end of the last mark. |
|
682 |
size_t _n_pauses_at_mark_end; |
|
683 |
||
684 |
// ==== This section is for stats related to starting Conc Refinement on time. |
|
685 |
size_t _conc_refine_enabled; |
|
686 |
size_t _conc_refine_zero_traversals; |
|
687 |
size_t _conc_refine_max_traversals; |
|
688 |
// In # of heap regions. |
|
689 |
size_t _conc_refine_current_delta; |
|
690 |
||
691 |
// At the beginning of a collection pause, update the variables above, |
|
692 |
// especially the "delta". |
|
693 |
void update_conc_refine_data(); |
|
694 |
// ==== |
|
695 |
||
696 |
// Stash a pointer to the g1 heap. |
|
697 |
G1CollectedHeap* _g1; |
|
698 |
||
699 |
// The average time in ms per collection pause, averaged over recent pauses. |
|
700 |
double recent_avg_time_for_pauses_ms(); |
|
701 |
||
702 |
// The average time in ms for processing CollectedHeap strong roots, per |
|
703 |
// collection pause, averaged over recent pauses. |
|
704 |
double recent_avg_time_for_CH_strong_ms(); |
|
705 |
||
706 |
// The average time in ms for processing the G1 remembered set, per |
|
707 |
// pause, averaged over recent pauses. |
|
708 |
double recent_avg_time_for_G1_strong_ms(); |
|
709 |
||
710 |
// The average time in ms for "evacuating followers", per pause, averaged |
|
711 |
// over recent pauses. |
|
712 |
double recent_avg_time_for_evac_ms(); |
|
713 |
||
714 |
// The number of "recent" GCs recorded in the number sequences |
|
715 |
int number_of_recent_gcs(); |
|
716 |
||
717 |
// The average survival ratio, computed by the total number of bytes |
|
718 |
// suriviving / total number of bytes before collection over the last |
|
719 |
// several recent pauses. |
|
720 |
double recent_avg_survival_fraction(); |
|
721 |
// The survival fraction of the most recent pause; if there have been no |
|
722 |
// pauses, returns 1.0. |
|
723 |
double last_survival_fraction(); |
|
724 |
||
725 |
// Returns a "conservative" estimate of the recent survival rate, i.e., |
|
726 |
// one that may be higher than "recent_avg_survival_fraction". |
|
727 |
// This is conservative in several ways: |
|
728 |
// If there have been few pauses, it will assume a potential high |
|
729 |
// variance, and err on the side of caution. |
|
730 |
// It puts a lower bound (currently 0.1) on the value it will return. |
|
731 |
// To try to detect phase changes, if the most recent pause ("latest") has a |
|
732 |
// higher-than average ("avg") survival rate, it returns that rate. |
|
733 |
// "work" version is a utility function; young is restricted to young regions. |
|
734 |
double conservative_avg_survival_fraction_work(double avg, |
|
735 |
double latest); |
|
736 |
||
737 |
// The arguments are the two sequences that keep track of the number of bytes |
|
738 |
// surviving and the total number of bytes before collection, resp., |
|
739 |
// over the last evereal recent pauses |
|
740 |
// Returns the survival rate for the category in the most recent pause. |
|
741 |
// If there have been no pauses, returns 1.0. |
|
742 |
double last_survival_fraction_work(TruncatedSeq* surviving, |
|
743 |
TruncatedSeq* before); |
|
744 |
||
745 |
// The arguments are the two sequences that keep track of the number of bytes |
|
746 |
// surviving and the total number of bytes before collection, resp., |
|
747 |
// over the last several recent pauses |
|
748 |
// Returns the average survival ration over the last several recent pauses |
|
749 |
// If there have been no pauses, return 1.0 |
|
750 |
double recent_avg_survival_fraction_work(TruncatedSeq* surviving, |
|
751 |
TruncatedSeq* before); |
|
752 |
||
753 |
double conservative_avg_survival_fraction() { |
|
754 |
double avg = recent_avg_survival_fraction(); |
|
755 |
double latest = last_survival_fraction(); |
|
756 |
return conservative_avg_survival_fraction_work(avg, latest); |
|
757 |
} |
|
758 |
||
759 |
// The ratio of gc time to elapsed time, computed over recent pauses. |
|
760 |
double _recent_avg_pause_time_ratio; |
|
761 |
||
762 |
double recent_avg_pause_time_ratio() { |
|
763 |
return _recent_avg_pause_time_ratio; |
|
764 |
} |
|
765 |
||
766 |
// Number of pauses between concurrent marking. |
|
767 |
size_t _pauses_btwn_concurrent_mark; |
|
768 |
||
769 |
size_t _n_marks_since_last_pause; |
|
770 |
||
771 |
// True iff CM has been initiated. |
|
772 |
bool _conc_mark_initiated; |
|
773 |
||
774 |
// True iff CM should be initiated |
|
775 |
bool _should_initiate_conc_mark; |
|
776 |
bool _should_revert_to_full_young_gcs; |
|
777 |
bool _last_full_young_gc; |
|
778 |
||
779 |
// This set of variables tracks the collector efficiency, in order to |
|
780 |
// determine whether we should initiate a new marking. |
|
781 |
double _cur_mark_stop_world_time_ms; |
|
782 |
double _mark_init_start_sec; |
|
783 |
double _mark_remark_start_sec; |
|
784 |
double _mark_cleanup_start_sec; |
|
785 |
double _mark_closure_time_ms; |
|
786 |
||
787 |
void calculate_young_list_min_length(); |
|
788 |
void calculate_young_list_target_config(); |
|
789 |
void calculate_young_list_target_config(size_t rs_lengths); |
|
790 |
size_t calculate_optimal_so_length(size_t young_list_length); |
|
791 |
||
792 |
public: |
|
793 |
||
794 |
G1CollectorPolicy(); |
|
795 |
||
796 |
virtual G1CollectorPolicy* as_g1_policy() { return this; } |
|
797 |
||
798 |
virtual CollectorPolicy::Name kind() { |
|
799 |
return CollectorPolicy::G1CollectorPolicyKind; |
|
800 |
} |
|
801 |
||
802 |
void check_prediction_validity(); |
|
803 |
||
804 |
size_t bytes_in_collection_set() { |
|
805 |
return _bytes_in_collection_set_before_gc; |
|
806 |
} |
|
807 |
||
808 |
size_t bytes_in_to_space() { |
|
809 |
return bytes_in_to_space_during_gc(); |
|
810 |
} |
|
811 |
||
812 |
unsigned calc_gc_alloc_time_stamp() { |
|
813 |
return _all_pause_times_ms->num() + 1; |
|
814 |
} |
|
815 |
||
816 |
protected: |
|
817 |
||
818 |
// Count the number of bytes used in the CS. |
|
819 |
void count_CS_bytes_used(); |
|
820 |
||
821 |
// Together these do the base cleanup-recording work. Subclasses might |
|
822 |
// want to put something between them. |
|
823 |
void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, |
|
824 |
size_t max_live_bytes); |
|
825 |
void record_concurrent_mark_cleanup_end_work2(); |
|
826 |
||
827 |
public: |
|
828 |
||
829 |
virtual void init(); |
|
830 |
||
2009 | 831 |
// Create jstat counters for the policy. |
832 |
virtual void initialize_gc_policy_counters(); |
|
833 |
||
1374 | 834 |
virtual HeapWord* mem_allocate_work(size_t size, |
835 |
bool is_tlab, |
|
836 |
bool* gc_overhead_limit_was_exceeded); |
|
837 |
||
838 |
// This method controls how a collector handles one or more |
|
839 |
// of its generations being fully allocated. |
|
840 |
virtual HeapWord* satisfy_failed_allocation(size_t size, |
|
841 |
bool is_tlab); |
|
842 |
||
843 |
BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } |
|
844 |
||
845 |
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } |
|
846 |
||
847 |
// The number of collection pauses so far. |
|
848 |
long n_pauses() const { return _n_pauses; } |
|
849 |
||
850 |
// Update the heuristic info to record a collection pause of the given |
|
851 |
// start time, where the given number of bytes were used at the start. |
|
852 |
// This may involve changing the desired size of a collection set. |
|
853 |
||
854 |
virtual void record_stop_world_start(); |
|
855 |
||
856 |
virtual void record_collection_pause_start(double start_time_sec, |
|
857 |
size_t start_used); |
|
858 |
||
859 |
virtual void record_popular_pause_preamble_start(); |
|
860 |
virtual void record_popular_pause_preamble_end(); |
|
861 |
||
862 |
// Must currently be called while the world is stopped. |
|
863 |
virtual void record_concurrent_mark_init_start(); |
|
864 |
virtual void record_concurrent_mark_init_end(); |
|
865 |
void record_concurrent_mark_init_end_pre(double |
|
866 |
mark_init_elapsed_time_ms); |
|
867 |
||
868 |
void record_mark_closure_time(double mark_closure_time_ms); |
|
869 |
||
870 |
virtual void record_concurrent_mark_remark_start(); |
|
871 |
virtual void record_concurrent_mark_remark_end(); |
|
872 |
||
873 |
virtual void record_concurrent_mark_cleanup_start(); |
|
874 |
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, |
|
875 |
size_t max_live_bytes); |
|
876 |
virtual void record_concurrent_mark_cleanup_completed(); |
|
877 |
||
878 |
virtual void record_concurrent_pause(); |
|
879 |
virtual void record_concurrent_pause_end(); |
|
880 |
||
881 |
virtual void record_collection_pause_end_CH_strong_roots(); |
|
882 |
virtual void record_collection_pause_end_G1_strong_roots(); |
|
883 |
||
884 |
virtual void record_collection_pause_end(bool popular, bool abandoned); |
|
885 |
||
886 |
// Record the fact that a full collection occurred. |
|
887 |
virtual void record_full_collection_start(); |
|
888 |
virtual void record_full_collection_end(); |
|
889 |
||
890 |
void record_ext_root_scan_time(int worker_i, double ms) { |
|
891 |
_par_last_ext_root_scan_times_ms[worker_i] = ms; |
|
892 |
} |
|
893 |
||
894 |
void record_mark_stack_scan_time(int worker_i, double ms) { |
|
895 |
_par_last_mark_stack_scan_times_ms[worker_i] = ms; |
|
896 |
} |
|
897 |
||
898 |
void record_scan_only_time(int worker_i, double ms, int n) { |
|
899 |
_par_last_scan_only_times_ms[worker_i] = ms; |
|
900 |
_par_last_scan_only_regions_scanned[worker_i] = (double) n; |
|
901 |
} |
|
902 |
||
903 |
void record_satb_drain_time(double ms) { |
|
904 |
_cur_satb_drain_time_ms = ms; |
|
905 |
_satb_drain_time_set = true; |
|
906 |
} |
|
907 |
||
908 |
void record_satb_drain_processed_buffers (int processed_buffers) { |
|
909 |
_last_satb_drain_processed_buffers = processed_buffers; |
|
910 |
} |
|
911 |
||
912 |
void record_mod_union_time(double ms) { |
|
913 |
_all_mod_union_times_ms->add(ms); |
|
914 |
} |
|
915 |
||
916 |
void record_update_rs_start_time(int thread, double ms) { |
|
917 |
_par_last_update_rs_start_times_ms[thread] = ms; |
|
918 |
} |
|
919 |
||
920 |
void record_update_rs_time(int thread, double ms) { |
|
921 |
_par_last_update_rs_times_ms[thread] = ms; |
|
922 |
} |
|
923 |
||
924 |
void record_update_rs_processed_buffers (int thread, |
|
925 |
double processed_buffers) { |
|
926 |
_par_last_update_rs_processed_buffers[thread] = processed_buffers; |
|
927 |
} |
|
928 |
||
929 |
void record_scan_rs_start_time(int thread, double ms) { |
|
930 |
_par_last_scan_rs_start_times_ms[thread] = ms; |
|
931 |
} |
|
932 |
||
933 |
void record_scan_rs_time(int thread, double ms) { |
|
934 |
_par_last_scan_rs_times_ms[thread] = ms; |
|
935 |
} |
|
936 |
||
937 |
void record_scan_new_refs_time(int thread, double ms) { |
|
938 |
_par_last_scan_new_refs_times_ms[thread] = ms; |
|
939 |
} |
|
940 |
||
941 |
double get_scan_new_refs_time(int thread) { |
|
942 |
return _par_last_scan_new_refs_times_ms[thread]; |
|
943 |
} |
|
944 |
||
945 |
void reset_obj_copy_time(int thread) { |
|
946 |
_par_last_obj_copy_times_ms[thread] = 0.0; |
|
947 |
} |
|
948 |
||
949 |
void reset_obj_copy_time() { |
|
950 |
reset_obj_copy_time(0); |
|
951 |
} |
|
952 |
||
953 |
void record_obj_copy_time(int thread, double ms) { |
|
954 |
_par_last_obj_copy_times_ms[thread] += ms; |
|
955 |
} |
|
956 |
||
957 |
void record_obj_copy_time(double ms) { |
|
958 |
record_obj_copy_time(0, ms); |
|
959 |
} |
|
960 |
||
961 |
void record_termination_time(int thread, double ms) { |
|
962 |
_par_last_termination_times_ms[thread] = ms; |
|
963 |
} |
|
964 |
||
965 |
void record_termination_time(double ms) { |
|
966 |
record_termination_time(0, ms); |
|
967 |
} |
|
968 |
||
2121
0b899b36d991
6804746: G1: guarantee(variance() > -1.0,"variance should be >= 0") (due to evacuation failure)
tonyp
parents:
2013
diff
changeset
|
969 |
void record_pause_time_ms(double ms) { |
1374 | 970 |
_last_pause_time_ms = ms; |
971 |
} |
|
972 |
||
973 |
void record_clear_ct_time(double ms) { |
|
974 |
_cur_clear_ct_time_ms = ms; |
|
975 |
} |
|
976 |
||
977 |
void record_par_time(double ms) { |
|
978 |
_cur_collection_par_time_ms = ms; |
|
979 |
} |
|
980 |
||
981 |
void record_aux_start_time(int i) { |
|
982 |
guarantee(i < _aux_num, "should be within range"); |
|
983 |
_cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0; |
|
984 |
} |
|
985 |
||
986 |
void record_aux_end_time(int i) { |
|
987 |
guarantee(i < _aux_num, "should be within range"); |
|
988 |
double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i]; |
|
989 |
_cur_aux_times_set[i] = true; |
|
990 |
_cur_aux_times_ms[i] += ms; |
|
991 |
} |
|
992 |
||
993 |
void record_pop_compute_rc_start(); |
|
994 |
void record_pop_compute_rc_end(); |
|
995 |
||
996 |
void record_pop_evac_start(); |
|
997 |
void record_pop_evac_end(); |
|
998 |
||
999 |
// Record the fact that "bytes" bytes allocated in a region. |
|
1000 |
void record_before_bytes(size_t bytes); |
|
1001 |
void record_after_bytes(size_t bytes); |
|
1002 |
||
1003 |
// Returns "true" if this is a good time to do a collection pause. |
|
1004 |
// The "word_size" argument, if non-zero, indicates the size of an |
|
1005 |
// allocation request that is prompting this query. |
|
1006 |
virtual bool should_do_collection_pause(size_t word_size) = 0; |
|
1007 |
||
1008 |
// Choose a new collection set. Marks the chosen regions as being |
|
1009 |
// "in_collection_set", and links them together. The head and number of |
|
1010 |
// the collection set are available via access methods. |
|
1011 |
// If "pop_region" is non-NULL, it is a popular region that has already |
|
1012 |
// been added to the collection set. |
|
1013 |
virtual void choose_collection_set(HeapRegion* pop_region = NULL) = 0; |
|
1014 |
||
1015 |
void clear_collection_set() { _collection_set = NULL; } |
|
1016 |
||
1017 |
// The head of the list (via "next_in_collection_set()") representing the |
|
1018 |
// current collection set. |
|
1019 |
HeapRegion* collection_set() { return _collection_set; } |
|
1020 |
||
1021 |
// Sets the collection set to the given single region. |
|
1022 |
virtual void set_single_region_collection_set(HeapRegion* hr); |
|
1023 |
||
1024 |
// The number of elements in the current collection set. |
|
1025 |
size_t collection_set_size() { return _collection_set_size; } |
|
1026 |
||
1027 |
// Add "hr" to the CS. |
|
1028 |
void add_to_collection_set(HeapRegion* hr); |
|
1029 |
||
1030 |
bool should_initiate_conc_mark() { return _should_initiate_conc_mark; } |
|
1031 |
void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; } |
|
1032 |
void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; } |
|
1033 |
||
1034 |
void checkpoint_conc_overhead(); |
|
1035 |
||
1036 |
// If an expansion would be appropriate, because recent GC overhead had |
|
1037 |
// exceeded the desired limit, return an amount to expand by. |
|
1038 |
virtual size_t expansion_amount(); |
|
1039 |
||
1040 |
// note start of mark thread |
|
1041 |
void note_start_of_mark_thread(); |
|
1042 |
||
1043 |
// The marked bytes of the "r" has changed; reclassify it's desirability |
|
1044 |
// for marking. Also asserts that "r" is eligible for a CS. |
|
1045 |
virtual void note_change_in_marked_bytes(HeapRegion* r) = 0; |
|
1046 |
||
1047 |
#ifndef PRODUCT |
|
1048 |
// Check any appropriate marked bytes info, asserting false if |
|
1049 |
// something's wrong, else returning "true". |
|
1050 |
virtual bool assertMarkedBytesDataOK() = 0; |
|
1051 |
#endif |
|
1052 |
||
1053 |
// Print tracing information. |
|
1054 |
void print_tracing_info() const; |
|
1055 |
||
1056 |
// Print stats on young survival ratio |
|
1057 |
void print_yg_surv_rate_info() const; |
|
1058 |
||
2009 | 1059 |
void finished_recalculating_age_indexes(bool is_survivors) { |
1060 |
if (is_survivors) { |
|
1061 |
_survivor_surv_rate_group->finished_recalculating_age_indexes(); |
|
1062 |
} else { |
|
1063 |
_short_lived_surv_rate_group->finished_recalculating_age_indexes(); |
|
1064 |
} |
|
1374 | 1065 |
// do that for any other surv rate groups |
1066 |
} |
|
1067 |
||
1068 |
bool should_add_next_region_to_young_list(); |
|
1069 |
||
1070 |
bool in_young_gc_mode() { |
|
1071 |
return _in_young_gc_mode; |
|
1072 |
} |
|
1073 |
void set_in_young_gc_mode(bool in_young_gc_mode) { |
|
1074 |
_in_young_gc_mode = in_young_gc_mode; |
|
1075 |
} |
|
1076 |
||
1077 |
bool full_young_gcs() { |
|
1078 |
return _full_young_gcs; |
|
1079 |
} |
|
1080 |
void set_full_young_gcs(bool full_young_gcs) { |
|
1081 |
_full_young_gcs = full_young_gcs; |
|
1082 |
} |
|
1083 |
||
1084 |
bool adaptive_young_list_length() { |
|
1085 |
return _adaptive_young_list_length; |
|
1086 |
} |
|
1087 |
void set_adaptive_young_list_length(bool adaptive_young_list_length) { |
|
1088 |
_adaptive_young_list_length = adaptive_young_list_length; |
|
1089 |
} |
|
1090 |
||
1091 |
inline double get_gc_eff_factor() { |
|
1092 |
double ratio = _known_garbage_ratio; |
|
1093 |
||
1094 |
double square = ratio * ratio; |
|
1095 |
// square = square * square; |
|
1096 |
double ret = square * 9.0 + 1.0; |
|
1097 |
#if 0 |
|
1098 |
gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); |
|
1099 |
#endif // 0 |
|
1100 |
guarantee(0.0 <= ret && ret < 10.0, "invariant!"); |
|
1101 |
return ret; |
|
1102 |
} |
|
1103 |
||
1104 |
// |
|
1105 |
// Survivor regions policy. |
|
1106 |
// |
|
1107 |
protected: |
|
1108 |
||
1109 |
// Current tenuring threshold, set to 0 if the collector reaches the |
|
1110 |
// maximum amount of suvivors regions. |
|
1111 |
int _tenuring_threshold; |
|
1112 |
||
2009 | 1113 |
// The limit on the number of regions allocated for survivors. |
1114 |
size_t _max_survivor_regions; |
|
1115 |
||
1116 |
// The amount of survor regions after a collection. |
|
1117 |
size_t _recorded_survivor_regions; |
|
1118 |
// List of survivor regions. |
|
1119 |
HeapRegion* _recorded_survivor_head; |
|
1120 |
HeapRegion* _recorded_survivor_tail; |
|
1121 |
||
1122 |
ageTable _survivors_age_table; |
|
1123 |
||
1374 | 1124 |
public: |
1125 |
||
1126 |
inline GCAllocPurpose |
|
1127 |
evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { |
|
1128 |
if (age < _tenuring_threshold && src_region->is_young()) { |
|
1129 |
return GCAllocForSurvived; |
|
1130 |
} else { |
|
1131 |
return GCAllocForTenured; |
|
1132 |
} |
|
1133 |
} |
|
1134 |
||
1135 |
inline bool track_object_age(GCAllocPurpose purpose) { |
|
1136 |
return purpose == GCAllocForSurvived; |
|
1137 |
} |
|
1138 |
||
1139 |
inline GCAllocPurpose alternative_purpose(int purpose) { |
|
1140 |
return GCAllocForTenured; |
|
1141 |
} |
|
1142 |
||
2009 | 1143 |
static const size_t REGIONS_UNLIMITED = ~(size_t)0; |
1144 |
||
1145 |
size_t max_regions(int purpose); |
|
1374 | 1146 |
|
1147 |
// The limit on regions for a particular purpose is reached. |
|
1148 |
void note_alloc_region_limit_reached(int purpose) { |
|
1149 |
if (purpose == GCAllocForSurvived) { |
|
1150 |
_tenuring_threshold = 0; |
|
1151 |
} |
|
1152 |
} |
|
1153 |
||
1154 |
void note_start_adding_survivor_regions() { |
|
1155 |
_survivor_surv_rate_group->start_adding_regions(); |
|
1156 |
} |
|
1157 |
||
1158 |
void note_stop_adding_survivor_regions() { |
|
1159 |
_survivor_surv_rate_group->stop_adding_regions(); |
|
1160 |
} |
|
2009 | 1161 |
|
1162 |
void record_survivor_regions(size_t regions, |
|
1163 |
HeapRegion* head, |
|
1164 |
HeapRegion* tail) { |
|
1165 |
_recorded_survivor_regions = regions; |
|
1166 |
_recorded_survivor_head = head; |
|
1167 |
_recorded_survivor_tail = tail; |
|
1168 |
} |
|
1169 |
||
1170 |
void record_thread_age_table(ageTable* age_table) |
|
1171 |
{ |
|
1172 |
_survivors_age_table.merge_par(age_table); |
|
1173 |
} |
|
1174 |
||
1175 |
// Calculates survivor space parameters. |
|
1176 |
void calculate_survivors_policy(); |
|
1177 |
||
1374 | 1178 |
}; |
1179 |
||
1180 |
// This encapsulates a particular strategy for a g1 Collector. |
|
1181 |
// |
|
1182 |
// Start a concurrent mark when our heap size is n bytes |
|
1183 |
// greater then our heap size was at the last concurrent |
|
1184 |
// mark. Where n is a function of the CMSTriggerRatio |
|
1185 |
// and the MinHeapFreeRatio. |
|
1186 |
// |
|
1187 |
// Start a g1 collection pause when we have allocated the |
|
1188 |
// average number of bytes currently being freed in |
|
1189 |
// a collection, but only if it is at least one region |
|
1190 |
// full |
|
1191 |
// |
|
1192 |
// Resize Heap based on desired |
|
1193 |
// allocation space, where desired allocation space is |
|
1194 |
// a function of survival rate and desired future to size. |
|
1195 |
// |
|
1196 |
// Choose collection set by first picking all older regions |
|
1197 |
// which have a survival rate which beats our projected young |
|
1198 |
// survival rate. Then fill out the number of needed regions |
|
1199 |
// with young regions. |
|
1200 |
||
1201 |
class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy { |
|
1202 |
CollectionSetChooser* _collectionSetChooser; |
|
1203 |
// If the estimated is less then desirable, resize if possible. |
|
1204 |
void expand_if_possible(size_t numRegions); |
|
1205 |
||
1206 |
virtual void choose_collection_set(HeapRegion* pop_region = NULL); |
|
1207 |
virtual void record_collection_pause_start(double start_time_sec, |
|
1208 |
size_t start_used); |
|
1209 |
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, |
|
1210 |
size_t max_live_bytes); |
|
1211 |
virtual void record_full_collection_end(); |
|
1212 |
||
1213 |
public: |
|
1214 |
G1CollectorPolicy_BestRegionsFirst() { |
|
1215 |
_collectionSetChooser = new CollectionSetChooser(); |
|
1216 |
} |
|
1217 |
void record_collection_pause_end(bool popular, bool abandoned); |
|
1218 |
bool should_do_collection_pause(size_t word_size); |
|
1219 |
virtual void set_single_region_collection_set(HeapRegion* hr); |
|
1220 |
// This is not needed any more, after the CSet choosing code was |
|
1221 |
// changed to use the pause prediction work. But let's leave the |
|
1222 |
// hook in just in case. |
|
1223 |
void note_change_in_marked_bytes(HeapRegion* r) { } |
|
1224 |
#ifndef PRODUCT |
|
1225 |
bool assertMarkedBytesDataOK(); |
|
1226 |
#endif |
|
1227 |
}; |
|
1228 |
||
1229 |
// This should move to some place more general... |
|
1230 |
||
1231 |
// If we have "n" measurements, and we've kept track of their "sum" and the |
|
1232 |
// "sum_of_squares" of the measurements, this returns the variance of the |
|
1233 |
// sequence. |
|
1234 |
inline double variance(int n, double sum_of_squares, double sum) { |
|
1235 |
double n_d = (double)n; |
|
1236 |
double avg = sum/n_d; |
|
1237 |
return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; |
|
1238 |
} |
|
1239 |
||
1240 |
// Local Variables: *** |
|
1241 |
// c-indentation-style: gnu *** |
|
1242 |
// End: *** |