|
1 /* |
|
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc/g1/concurrentG1Refine.hpp" |
|
27 #include "gc/g1/concurrentMarkThread.inline.hpp" |
|
28 #include "gc/g1/g1Analytics.hpp" |
|
29 #include "gc/g1/g1CollectedHeap.inline.hpp" |
|
30 #include "gc/g1/g1CollectionSet.hpp" |
|
31 #include "gc/g1/g1ConcurrentMark.hpp" |
|
32 #include "gc/g1/g1DefaultPolicy.hpp" |
|
33 #include "gc/g1/g1IHOPControl.hpp" |
|
34 #include "gc/g1/g1GCPhaseTimes.hpp" |
|
35 #include "gc/g1/g1Policy.hpp" |
|
36 #include "gc/g1/g1YoungGenSizer.hpp" |
|
37 #include "gc/g1/heapRegion.inline.hpp" |
|
38 #include "gc/g1/heapRegionRemSet.hpp" |
|
39 #include "gc/shared/gcPolicyCounters.hpp" |
|
40 #include "runtime/arguments.hpp" |
|
41 #include "runtime/java.hpp" |
|
42 #include "runtime/mutexLocker.hpp" |
|
43 #include "utilities/debug.hpp" |
|
44 #include "utilities/pair.hpp" |
|
45 |
|
46 G1DefaultPolicy::G1DefaultPolicy() : |
|
47 _predictor(G1ConfidencePercent / 100.0), |
|
48 _analytics(new G1Analytics(&_predictor)), |
|
49 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)), |
|
50 _ihop_control(create_ihop_control(&_predictor)), |
|
51 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 3)), |
|
52 _young_list_fixed_length(0), |
|
53 _short_lived_surv_rate_group(new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary)), |
|
54 _survivor_surv_rate_group(new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary)), |
|
55 _reserve_factor((double) G1ReservePercent / 100.0), |
|
56 _reserve_regions(0), |
|
57 _rs_lengths_prediction(0), |
|
58 _bytes_allocated_in_old_since_last_gc(0), |
|
59 _initial_mark_to_mixed(), |
|
60 _collection_set(NULL), |
|
61 _g1(NULL), |
|
62 _phase_times(new G1GCPhaseTimes(ParallelGCThreads)), |
|
63 _tenuring_threshold(MaxTenuringThreshold), |
|
64 _max_survivor_regions(0), |
|
65 _survivors_age_table(true) { } |
|
66 |
|
67 G1DefaultPolicy::~G1DefaultPolicy() { |
|
68 delete _ihop_control; |
|
69 } |
|
70 |
|
71 G1CollectorState* G1DefaultPolicy::collector_state() const { return _g1->collector_state(); } |
|
72 |
|
73 void G1DefaultPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { |
|
74 _g1 = g1h; |
|
75 _collection_set = collection_set; |
|
76 |
|
77 assert(Heap_lock->owned_by_self(), "Locking discipline."); |
|
78 |
|
79 if (!adaptive_young_list_length()) { |
|
80 _young_list_fixed_length = _young_gen_sizer.min_desired_young_length(); |
|
81 } |
|
82 _young_gen_sizer.adjust_max_new_size(_g1->max_regions()); |
|
83 |
|
84 _free_regions_at_end_of_collection = _g1->num_free_regions(); |
|
85 |
|
86 update_young_list_max_and_target_length(); |
|
87 // We may immediately start allocating regions and placing them on the |
|
88 // collection set list. Initialize the per-collection set info |
|
89 _collection_set->start_incremental_building(); |
|
90 } |
|
91 |
|
92 void G1DefaultPolicy::note_gc_start() { |
|
93 phase_times()->note_gc_start(); |
|
94 } |
|
95 |
|
96 bool G1DefaultPolicy::predict_will_fit(uint young_length, |
|
97 double base_time_ms, |
|
98 uint base_free_regions, |
|
99 double target_pause_time_ms) const { |
|
100 if (young_length >= base_free_regions) { |
|
101 // end condition 1: not enough space for the young regions |
|
102 return false; |
|
103 } |
|
104 |
|
105 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); |
|
106 size_t bytes_to_copy = |
|
107 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); |
|
108 double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy, |
|
109 collector_state()->during_concurrent_mark()); |
|
110 double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length); |
|
111 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; |
|
112 if (pause_time_ms > target_pause_time_ms) { |
|
113 // end condition 2: prediction is over the target pause time |
|
114 return false; |
|
115 } |
|
116 |
|
117 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; |
|
118 |
|
119 // When copying, we will likely need more bytes free than is live in the region. |
|
120 // Add some safety margin to factor in the confidence of our guess, and the |
|
121 // natural expected waste. |
|
122 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty |
|
123 // of the calculation: the lower the confidence, the more headroom. |
|
124 // (100 + TargetPLABWastePct) represents the increase in expected bytes during |
|
125 // copying due to anticipated waste in the PLABs. |
|
126 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; |
|
127 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); |
|
128 |
|
129 if (expected_bytes_to_copy > free_bytes) { |
|
130 // end condition 3: out-of-space |
|
131 return false; |
|
132 } |
|
133 |
|
134 // success! |
|
135 return true; |
|
136 } |
|
137 |
|
138 void G1DefaultPolicy::record_new_heap_size(uint new_number_of_regions) { |
|
139 // re-calculate the necessary reserve |
|
140 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; |
|
141 // We use ceiling so that if reserve_regions_d is > 0.0 (but |
|
142 // smaller than 1.0) we'll get 1. |
|
143 _reserve_regions = (uint) ceil(reserve_regions_d); |
|
144 |
|
145 _young_gen_sizer.heap_size_changed(new_number_of_regions); |
|
146 |
|
147 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); |
|
148 } |
|
149 |
|
150 uint G1DefaultPolicy::calculate_young_list_desired_min_length(uint base_min_length) const { |
|
151 uint desired_min_length = 0; |
|
152 if (adaptive_young_list_length()) { |
|
153 if (_analytics->num_alloc_rate_ms() > 3) { |
|
154 double now_sec = os::elapsedTime(); |
|
155 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; |
|
156 double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); |
|
157 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); |
|
158 } else { |
|
159 // otherwise we don't have enough info to make the prediction |
|
160 } |
|
161 } |
|
162 desired_min_length += base_min_length; |
|
163 // make sure we don't go below any user-defined minimum bound |
|
164 return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length); |
|
165 } |
|
166 |
|
167 uint G1DefaultPolicy::calculate_young_list_desired_max_length() const { |
|
168 // Here, we might want to also take into account any additional |
|
169 // constraints (i.e., user-defined minimum bound). Currently, we |
|
170 // effectively don't set this bound. |
|
171 return _young_gen_sizer.max_desired_young_length(); |
|
172 } |
|
173 |
|
174 uint G1DefaultPolicy::update_young_list_max_and_target_length() { |
|
175 return update_young_list_max_and_target_length(_analytics->predict_rs_lengths()); |
|
176 } |
|
177 |
|
178 uint G1DefaultPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { |
|
179 uint unbounded_target_length = update_young_list_target_length(rs_lengths); |
|
180 update_max_gc_locker_expansion(); |
|
181 return unbounded_target_length; |
|
182 } |
|
183 |
|
184 uint G1DefaultPolicy::update_young_list_target_length(size_t rs_lengths) { |
|
185 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); |
|
186 _young_list_target_length = young_lengths.first; |
|
187 return young_lengths.second; |
|
188 } |
|
189 |
|
190 G1DefaultPolicy::YoungTargetLengths G1DefaultPolicy::young_list_target_lengths(size_t rs_lengths) const { |
|
191 YoungTargetLengths result; |
|
192 |
|
193 // Calculate the absolute and desired min bounds first. |
|
194 |
|
195 // This is how many young regions we already have (currently: the survivors). |
|
196 const uint base_min_length = _g1->young_list()->survivor_length(); |
|
197 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); |
|
198 // This is the absolute minimum young length. Ensure that we |
|
199 // will at least have one eden region available for allocation. |
|
200 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); |
|
201 // If we shrank the young list target it should not shrink below the current size. |
|
202 desired_min_length = MAX2(desired_min_length, absolute_min_length); |
|
203 // Calculate the absolute and desired max bounds. |
|
204 |
|
205 uint desired_max_length = calculate_young_list_desired_max_length(); |
|
206 |
|
207 uint young_list_target_length = 0; |
|
208 if (adaptive_young_list_length()) { |
|
209 if (collector_state()->gcs_are_young()) { |
|
210 young_list_target_length = |
|
211 calculate_young_list_target_length(rs_lengths, |
|
212 base_min_length, |
|
213 desired_min_length, |
|
214 desired_max_length); |
|
215 } else { |
|
216 // Don't calculate anything and let the code below bound it to |
|
217 // the desired_min_length, i.e., do the next GC as soon as |
|
218 // possible to maximize how many old regions we can add to it. |
|
219 } |
|
220 } else { |
|
221 // The user asked for a fixed young gen so we'll fix the young gen |
|
222 // whether the next GC is young or mixed. |
|
223 young_list_target_length = _young_list_fixed_length; |
|
224 } |
|
225 |
|
226 result.second = young_list_target_length; |
|
227 |
|
228 // We will try our best not to "eat" into the reserve. |
|
229 uint absolute_max_length = 0; |
|
230 if (_free_regions_at_end_of_collection > _reserve_regions) { |
|
231 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; |
|
232 } |
|
233 if (desired_max_length > absolute_max_length) { |
|
234 desired_max_length = absolute_max_length; |
|
235 } |
|
236 |
|
237 // Make sure we don't go over the desired max length, nor under the |
|
238 // desired min length. In case they clash, desired_min_length wins |
|
239 // which is why that test is second. |
|
240 if (young_list_target_length > desired_max_length) { |
|
241 young_list_target_length = desired_max_length; |
|
242 } |
|
243 if (young_list_target_length < desired_min_length) { |
|
244 young_list_target_length = desired_min_length; |
|
245 } |
|
246 |
|
247 assert(young_list_target_length > base_min_length, |
|
248 "we should be able to allocate at least one eden region"); |
|
249 assert(young_list_target_length >= absolute_min_length, "post-condition"); |
|
250 |
|
251 result.first = young_list_target_length; |
|
252 return result; |
|
253 } |
|
254 |
|
255 uint |
|
256 G1DefaultPolicy::calculate_young_list_target_length(size_t rs_lengths, |
|
257 uint base_min_length, |
|
258 uint desired_min_length, |
|
259 uint desired_max_length) const { |
|
260 assert(adaptive_young_list_length(), "pre-condition"); |
|
261 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); |
|
262 |
|
263 // In case some edge-condition makes the desired max length too small... |
|
264 if (desired_max_length <= desired_min_length) { |
|
265 return desired_min_length; |
|
266 } |
|
267 |
|
268 // We'll adjust min_young_length and max_young_length not to include |
|
269 // the already allocated young regions (i.e., so they reflect the |
|
270 // min and max eden regions we'll allocate). The base_min_length |
|
271 // will be reflected in the predictions by the |
|
272 // survivor_regions_evac_time prediction. |
|
273 assert(desired_min_length > base_min_length, "invariant"); |
|
274 uint min_young_length = desired_min_length - base_min_length; |
|
275 assert(desired_max_length > base_min_length, "invariant"); |
|
276 uint max_young_length = desired_max_length - base_min_length; |
|
277 |
|
278 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; |
|
279 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); |
|
280 size_t pending_cards = _analytics->predict_pending_cards(); |
|
281 size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff(); |
|
282 size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true); |
|
283 double base_time_ms = |
|
284 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + |
|
285 survivor_regions_evac_time; |
|
286 uint available_free_regions = _free_regions_at_end_of_collection; |
|
287 uint base_free_regions = 0; |
|
288 if (available_free_regions > _reserve_regions) { |
|
289 base_free_regions = available_free_regions - _reserve_regions; |
|
290 } |
|
291 |
|
292 // Here, we will make sure that the shortest young length that |
|
293 // makes sense fits within the target pause time. |
|
294 |
|
295 if (predict_will_fit(min_young_length, base_time_ms, |
|
296 base_free_regions, target_pause_time_ms)) { |
|
297 // The shortest young length will fit into the target pause time; |
|
298 // we'll now check whether the absolute maximum number of young |
|
299 // regions will fit in the target pause time. If not, we'll do |
|
300 // a binary search between min_young_length and max_young_length. |
|
301 if (predict_will_fit(max_young_length, base_time_ms, |
|
302 base_free_regions, target_pause_time_ms)) { |
|
303 // The maximum young length will fit into the target pause time. |
|
304 // We are done so set min young length to the maximum length (as |
|
305 // the result is assumed to be returned in min_young_length). |
|
306 min_young_length = max_young_length; |
|
307 } else { |
|
308 // The maximum possible number of young regions will not fit within |
|
309 // the target pause time so we'll search for the optimal |
|
310 // length. The loop invariants are: |
|
311 // |
|
312 // min_young_length < max_young_length |
|
313 // min_young_length is known to fit into the target pause time |
|
314 // max_young_length is known not to fit into the target pause time |
|
315 // |
|
316 // Going into the loop we know the above hold as we've just |
|
317 // checked them. Every time around the loop we check whether |
|
318 // the middle value between min_young_length and |
|
319 // max_young_length fits into the target pause time. If it |
|
320 // does, it becomes the new min. If it doesn't, it becomes |
|
321 // the new max. This way we maintain the loop invariants. |
|
322 |
|
323 assert(min_young_length < max_young_length, "invariant"); |
|
324 uint diff = (max_young_length - min_young_length) / 2; |
|
325 while (diff > 0) { |
|
326 uint young_length = min_young_length + diff; |
|
327 if (predict_will_fit(young_length, base_time_ms, |
|
328 base_free_regions, target_pause_time_ms)) { |
|
329 min_young_length = young_length; |
|
330 } else { |
|
331 max_young_length = young_length; |
|
332 } |
|
333 assert(min_young_length < max_young_length, "invariant"); |
|
334 diff = (max_young_length - min_young_length) / 2; |
|
335 } |
|
336 // The results is min_young_length which, according to the |
|
337 // loop invariants, should fit within the target pause time. |
|
338 |
|
339 // These are the post-conditions of the binary search above: |
|
340 assert(min_young_length < max_young_length, |
|
341 "otherwise we should have discovered that max_young_length " |
|
342 "fits into the pause target and not done the binary search"); |
|
343 assert(predict_will_fit(min_young_length, base_time_ms, |
|
344 base_free_regions, target_pause_time_ms), |
|
345 "min_young_length, the result of the binary search, should " |
|
346 "fit into the pause target"); |
|
347 assert(!predict_will_fit(min_young_length + 1, base_time_ms, |
|
348 base_free_regions, target_pause_time_ms), |
|
349 "min_young_length, the result of the binary search, should be " |
|
350 "optimal, so no larger length should fit into the pause target"); |
|
351 } |
|
352 } else { |
|
353 // Even the minimum length doesn't fit into the pause time |
|
354 // target, return it as the result nevertheless. |
|
355 } |
|
356 return base_min_length + min_young_length; |
|
357 } |
|
358 |
|
359 double G1DefaultPolicy::predict_survivor_regions_evac_time() const { |
|
360 double survivor_regions_evac_time = 0.0; |
|
361 for (HeapRegion * r = _g1->young_list()->first_survivor_region(); |
|
362 r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region(); |
|
363 r = r->get_next_young_region()) { |
|
364 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); |
|
365 } |
|
366 return survivor_regions_evac_time; |
|
367 } |
|
368 |
|
369 void G1DefaultPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { |
|
370 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); |
|
371 |
|
372 if (rs_lengths > _rs_lengths_prediction) { |
|
373 // add 10% to avoid having to recalculate often |
|
374 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; |
|
375 update_rs_lengths_prediction(rs_lengths_prediction); |
|
376 |
|
377 update_young_list_max_and_target_length(rs_lengths_prediction); |
|
378 } |
|
379 } |
|
380 |
|
381 void G1DefaultPolicy::update_rs_lengths_prediction() { |
|
382 update_rs_lengths_prediction(_analytics->predict_rs_lengths()); |
|
383 } |
|
384 |
|
385 void G1DefaultPolicy::update_rs_lengths_prediction(size_t prediction) { |
|
386 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { |
|
387 _rs_lengths_prediction = prediction; |
|
388 } |
|
389 } |
|
390 |
|
391 #ifndef PRODUCT |
|
392 bool G1DefaultPolicy::verify_young_ages() { |
|
393 HeapRegion* head = _g1->young_list()->first_region(); |
|
394 return |
|
395 verify_young_ages(head, _short_lived_surv_rate_group); |
|
396 // also call verify_young_ages on any additional surv rate groups |
|
397 } |
|
398 |
|
399 bool G1DefaultPolicy::verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group) { |
|
400 guarantee( surv_rate_group != NULL, "pre-condition" ); |
|
401 |
|
402 const char* name = surv_rate_group->name(); |
|
403 bool ret = true; |
|
404 int prev_age = -1; |
|
405 |
|
406 for (HeapRegion* curr = head; |
|
407 curr != NULL; |
|
408 curr = curr->get_next_young_region()) { |
|
409 SurvRateGroup* group = curr->surv_rate_group(); |
|
410 if (group == NULL && !curr->is_survivor()) { |
|
411 log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); |
|
412 ret = false; |
|
413 } |
|
414 |
|
415 if (surv_rate_group == group) { |
|
416 int age = curr->age_in_surv_rate_group(); |
|
417 |
|
418 if (age < 0) { |
|
419 log_error(gc, verify)("## %s: encountered negative age", name); |
|
420 ret = false; |
|
421 } |
|
422 |
|
423 if (age <= prev_age) { |
|
424 log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); |
|
425 ret = false; |
|
426 } |
|
427 prev_age = age; |
|
428 } |
|
429 } |
|
430 |
|
431 return ret; |
|
432 } |
|
433 #endif // PRODUCT |
|
434 |
|
435 void G1DefaultPolicy::record_full_collection_start() { |
|
436 _full_collection_start_sec = os::elapsedTime(); |
|
437 // Release the future to-space so that it is available for compaction into. |
|
438 collector_state()->set_full_collection(true); |
|
439 } |
|
440 |
|
441 void G1DefaultPolicy::record_full_collection_end() { |
|
442 // Consider this like a collection pause for the purposes of allocation |
|
443 // since last pause. |
|
444 double end_sec = os::elapsedTime(); |
|
445 double full_gc_time_sec = end_sec - _full_collection_start_sec; |
|
446 double full_gc_time_ms = full_gc_time_sec * 1000.0; |
|
447 |
|
448 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); |
|
449 |
|
450 collector_state()->set_full_collection(false); |
|
451 |
|
452 // "Nuke" the heuristics that control the young/mixed GC |
|
453 // transitions and make sure we start with young GCs after the Full GC. |
|
454 collector_state()->set_gcs_are_young(true); |
|
455 collector_state()->set_last_young_gc(false); |
|
456 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); |
|
457 collector_state()->set_during_initial_mark_pause(false); |
|
458 collector_state()->set_in_marking_window(false); |
|
459 collector_state()->set_in_marking_window_im(false); |
|
460 |
|
461 _short_lived_surv_rate_group->start_adding_regions(); |
|
462 // also call this on any additional surv rate groups |
|
463 |
|
464 _free_regions_at_end_of_collection = _g1->num_free_regions(); |
|
465 // Reset survivors SurvRateGroup. |
|
466 _survivor_surv_rate_group->reset(); |
|
467 update_young_list_max_and_target_length(); |
|
468 update_rs_lengths_prediction(); |
|
469 cset_chooser()->clear(); |
|
470 |
|
471 _bytes_allocated_in_old_since_last_gc = 0; |
|
472 |
|
473 record_pause(FullGC, _full_collection_start_sec, end_sec); |
|
474 } |
|
475 |
|
476 void G1DefaultPolicy::record_collection_pause_start(double start_time_sec) { |
|
477 // We only need to do this here as the policy will only be applied |
|
478 // to the GC we're about to start. so, no point is calculating this |
|
479 // every time we calculate / recalculate the target young length. |
|
480 update_survivors_policy(); |
|
481 |
|
482 assert(_g1->used() == _g1->recalculate_used(), |
|
483 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, |
|
484 _g1->used(), _g1->recalculate_used()); |
|
485 |
|
486 phase_times()->record_cur_collection_start_sec(start_time_sec); |
|
487 _pending_cards = _g1->pending_card_num(); |
|
488 |
|
489 _collection_set->reset_bytes_used_before(); |
|
490 _bytes_copied_during_gc = 0; |
|
491 |
|
492 collector_state()->set_last_gc_was_young(false); |
|
493 |
|
494 // do that for any other surv rate groups |
|
495 _short_lived_surv_rate_group->stop_adding_regions(); |
|
496 _survivors_age_table.clear(); |
|
497 |
|
498 assert( verify_young_ages(), "region age verification" ); |
|
499 } |
|
500 |
|
501 void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { |
|
502 collector_state()->set_during_marking(true); |
|
503 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); |
|
504 collector_state()->set_during_initial_mark_pause(false); |
|
505 } |
|
506 |
|
507 void G1DefaultPolicy::record_concurrent_mark_remark_start() { |
|
508 _mark_remark_start_sec = os::elapsedTime(); |
|
509 collector_state()->set_during_marking(false); |
|
510 } |
|
511 |
|
512 void G1DefaultPolicy::record_concurrent_mark_remark_end() { |
|
513 double end_time_sec = os::elapsedTime(); |
|
514 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; |
|
515 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); |
|
516 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); |
|
517 |
|
518 record_pause(Remark, _mark_remark_start_sec, end_time_sec); |
|
519 } |
|
520 |
|
521 void G1DefaultPolicy::record_concurrent_mark_cleanup_start() { |
|
522 _mark_cleanup_start_sec = os::elapsedTime(); |
|
523 } |
|
524 |
|
525 void G1DefaultPolicy::record_concurrent_mark_cleanup_completed() { |
|
526 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", |
|
527 "skip last young-only gc"); |
|
528 collector_state()->set_last_young_gc(should_continue_with_reclaim); |
|
529 // We skip the marking phase. |
|
530 if (!should_continue_with_reclaim) { |
|
531 abort_time_to_mixed_tracking(); |
|
532 } |
|
533 collector_state()->set_in_marking_window(false); |
|
534 } |
|
535 |
|
536 double G1DefaultPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { |
|
537 return phase_times()->average_time_ms(phase); |
|
538 } |
|
539 |
|
540 double G1DefaultPolicy::young_other_time_ms() const { |
|
541 return phase_times()->young_cset_choice_time_ms() + |
|
542 phase_times()->young_free_cset_time_ms(); |
|
543 } |
|
544 |
|
545 double G1DefaultPolicy::non_young_other_time_ms() const { |
|
546 return phase_times()->non_young_cset_choice_time_ms() + |
|
547 phase_times()->non_young_free_cset_time_ms(); |
|
548 |
|
549 } |
|
550 |
|
551 double G1DefaultPolicy::other_time_ms(double pause_time_ms) const { |
|
552 return pause_time_ms - |
|
553 average_time_ms(G1GCPhaseTimes::UpdateRS) - |
|
554 average_time_ms(G1GCPhaseTimes::ScanRS) - |
|
555 average_time_ms(G1GCPhaseTimes::ObjCopy) - |
|
556 average_time_ms(G1GCPhaseTimes::Termination); |
|
557 } |
|
558 |
|
559 double G1DefaultPolicy::constant_other_time_ms(double pause_time_ms) const { |
|
560 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); |
|
561 } |
|
562 |
|
563 CollectionSetChooser* G1DefaultPolicy::cset_chooser() const { |
|
564 return _collection_set->cset_chooser(); |
|
565 } |
|
566 |
|
567 bool G1DefaultPolicy::about_to_start_mixed_phase() const { |
|
568 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); |
|
569 } |
|
570 |
|
571 bool G1DefaultPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { |
|
572 if (about_to_start_mixed_phase()) { |
|
573 return false; |
|
574 } |
|
575 |
|
576 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); |
|
577 |
|
578 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); |
|
579 size_t alloc_byte_size = alloc_word_size * HeapWordSize; |
|
580 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; |
|
581 |
|
582 bool result = false; |
|
583 if (marking_request_bytes > marking_initiating_used_threshold) { |
|
584 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); |
|
585 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", |
|
586 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", |
|
587 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); |
|
588 } |
|
589 |
|
590 return result; |
|
591 } |
|
592 |
|
593 // Anything below that is considered to be zero |
|
594 #define MIN_TIMER_GRANULARITY 0.0000001 |
|
595 |
|
596 void G1DefaultPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { |
|
597 double end_time_sec = os::elapsedTime(); |
|
598 |
|
599 size_t cur_used_bytes = _g1->used(); |
|
600 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); |
|
601 bool last_pause_included_initial_mark = false; |
|
602 bool update_stats = !_g1->evacuation_failed(); |
|
603 |
|
604 NOT_PRODUCT(_short_lived_surv_rate_group->print()); |
|
605 |
|
606 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); |
|
607 |
|
608 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); |
|
609 if (last_pause_included_initial_mark) { |
|
610 record_concurrent_mark_init_end(0.0); |
|
611 } else { |
|
612 maybe_start_marking(); |
|
613 } |
|
614 |
|
615 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); |
|
616 if (app_time_ms < MIN_TIMER_GRANULARITY) { |
|
617 // This usually happens due to the timer not having the required |
|
618 // granularity. Some Linuxes are the usual culprits. |
|
619 // We'll just set it to something (arbitrarily) small. |
|
620 app_time_ms = 1.0; |
|
621 } |
|
622 |
|
623 if (update_stats) { |
|
624 // We maintain the invariant that all objects allocated by mutator |
|
625 // threads will be allocated out of eden regions. So, we can use |
|
626 // the eden region number allocated since the previous GC to |
|
627 // calculate the application's allocate rate. The only exception |
|
628 // to that is humongous objects that are allocated separately. But |
|
629 // given that humongous object allocations do not really affect |
|
630 // either the pause's duration nor when the next pause will take |
|
631 // place we can safely ignore them here. |
|
632 uint regions_allocated = _collection_set->eden_region_length(); |
|
633 double alloc_rate_ms = (double) regions_allocated / app_time_ms; |
|
634 _analytics->report_alloc_rate_ms(alloc_rate_ms); |
|
635 |
|
636 double interval_ms = |
|
637 (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0; |
|
638 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); |
|
639 _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); |
|
640 } |
|
641 |
|
642 bool new_in_marking_window = collector_state()->in_marking_window(); |
|
643 bool new_in_marking_window_im = false; |
|
644 if (last_pause_included_initial_mark) { |
|
645 new_in_marking_window = true; |
|
646 new_in_marking_window_im = true; |
|
647 } |
|
648 |
|
649 if (collector_state()->last_young_gc()) { |
|
650 // This is supposed to to be the "last young GC" before we start |
|
651 // doing mixed GCs. Here we decide whether to start mixed GCs or not. |
|
652 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); |
|
653 |
|
654 if (next_gc_should_be_mixed("start mixed GCs", |
|
655 "do not start mixed GCs")) { |
|
656 collector_state()->set_gcs_are_young(false); |
|
657 } else { |
|
658 // We aborted the mixed GC phase early. |
|
659 abort_time_to_mixed_tracking(); |
|
660 } |
|
661 |
|
662 collector_state()->set_last_young_gc(false); |
|
663 } |
|
664 |
|
665 if (!collector_state()->last_gc_was_young()) { |
|
666 // This is a mixed GC. Here we decide whether to continue doing |
|
667 // mixed GCs or not. |
|
668 if (!next_gc_should_be_mixed("continue mixed GCs", |
|
669 "do not continue mixed GCs")) { |
|
670 collector_state()->set_gcs_are_young(true); |
|
671 |
|
672 maybe_start_marking(); |
|
673 } |
|
674 } |
|
675 |
|
676 _short_lived_surv_rate_group->start_adding_regions(); |
|
677 // Do that for any other surv rate groups |
|
678 |
|
679 double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0; |
|
680 |
|
681 if (update_stats) { |
|
682 double cost_per_card_ms = 0.0; |
|
683 if (_pending_cards > 0) { |
|
684 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; |
|
685 _analytics->report_cost_per_card_ms(cost_per_card_ms); |
|
686 } |
|
687 _analytics->report_cost_scan_hcc(scan_hcc_time_ms); |
|
688 |
|
689 double cost_per_entry_ms = 0.0; |
|
690 if (cards_scanned > 10) { |
|
691 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; |
|
692 _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young()); |
|
693 } |
|
694 |
|
695 if (_max_rs_lengths > 0) { |
|
696 double cards_per_entry_ratio = |
|
697 (double) cards_scanned / (double) _max_rs_lengths; |
|
698 _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young()); |
|
699 } |
|
700 |
|
701 // This is defensive. For a while _max_rs_lengths could get |
|
702 // smaller than _recorded_rs_lengths which was causing |
|
703 // rs_length_diff to get very large and mess up the RSet length |
|
704 // predictions. The reason was unsafe concurrent updates to the |
|
705 // _inc_cset_recorded_rs_lengths field which the code below guards |
|
706 // against (see CR 7118202). This bug has now been fixed (see CR |
|
707 // 7119027). However, I'm still worried that |
|
708 // _inc_cset_recorded_rs_lengths might still end up somewhat |
|
709 // inaccurate. The concurrent refinement thread calculates an |
|
710 // RSet's length concurrently with other CR threads updating it |
|
711 // which might cause it to calculate the length incorrectly (if, |
|
712 // say, it's in mid-coarsening). So I'll leave in the defensive |
|
713 // conditional below just in case. |
|
714 size_t rs_length_diff = 0; |
|
715 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); |
|
716 if (_max_rs_lengths > recorded_rs_lengths) { |
|
717 rs_length_diff = _max_rs_lengths - recorded_rs_lengths; |
|
718 } |
|
719 _analytics->report_rs_length_diff((double) rs_length_diff); |
|
720 |
|
721 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; |
|
722 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; |
|
723 double cost_per_byte_ms = 0.0; |
|
724 |
|
725 if (copied_bytes > 0) { |
|
726 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; |
|
727 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window()); |
|
728 } |
|
729 |
|
730 if (_collection_set->young_region_length() > 0) { |
|
731 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / |
|
732 _collection_set->young_region_length()); |
|
733 } |
|
734 |
|
735 if (_collection_set->old_region_length() > 0) { |
|
736 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / |
|
737 _collection_set->old_region_length()); |
|
738 } |
|
739 |
|
740 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); |
|
741 |
|
742 _analytics->report_pending_cards((double) _pending_cards); |
|
743 _analytics->report_rs_lengths((double) _max_rs_lengths); |
|
744 } |
|
745 |
|
746 collector_state()->set_in_marking_window(new_in_marking_window); |
|
747 collector_state()->set_in_marking_window_im(new_in_marking_window_im); |
|
748 _free_regions_at_end_of_collection = _g1->num_free_regions(); |
|
749 // IHOP control wants to know the expected young gen length if it were not |
|
750 // restrained by the heap reserve. Using the actual length would make the |
|
751 // prediction too small and the limit the young gen every time we get to the |
|
752 // predicted target occupancy. |
|
753 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); |
|
754 update_rs_lengths_prediction(); |
|
755 |
|
756 update_ihop_prediction(app_time_ms / 1000.0, |
|
757 _bytes_allocated_in_old_since_last_gc, |
|
758 last_unrestrained_young_length * HeapRegion::GrainBytes); |
|
759 _bytes_allocated_in_old_since_last_gc = 0; |
|
760 |
|
761 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); |
|
762 |
|
763 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. |
|
764 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; |
|
765 |
|
766 if (update_rs_time_goal_ms < scan_hcc_time_ms) { |
|
767 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." |
|
768 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", |
|
769 update_rs_time_goal_ms, scan_hcc_time_ms); |
|
770 |
|
771 update_rs_time_goal_ms = 0; |
|
772 } else { |
|
773 update_rs_time_goal_ms -= scan_hcc_time_ms; |
|
774 } |
|
775 _g1->concurrent_g1_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, |
|
776 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), |
|
777 update_rs_time_goal_ms); |
|
778 |
|
779 cset_chooser()->verify(); |
|
780 } |
|
781 |
|
782 G1IHOPControl* G1DefaultPolicy::create_ihop_control(const G1Predictions* predictor){ |
|
783 if (G1UseAdaptiveIHOP) { |
|
784 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, |
|
785 predictor, |
|
786 G1ReservePercent, |
|
787 G1HeapWastePercent); |
|
788 } else { |
|
789 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); |
|
790 } |
|
791 } |
|
792 |
|
793 void G1DefaultPolicy::update_ihop_prediction(double mutator_time_s, |
|
794 size_t mutator_alloc_bytes, |
|
795 size_t young_gen_size) { |
|
796 // Always try to update IHOP prediction. Even evacuation failures give information |
|
797 // about e.g. whether to start IHOP earlier next time. |
|
798 |
|
799 // Avoid using really small application times that might create samples with |
|
800 // very high or very low values. They may be caused by e.g. back-to-back gcs. |
|
801 double const min_valid_time = 1e-6; |
|
802 |
|
803 bool report = false; |
|
804 |
|
805 double marking_to_mixed_time = -1.0; |
|
806 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { |
|
807 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); |
|
808 assert(marking_to_mixed_time > 0.0, |
|
809 "Initial mark to mixed time must be larger than zero but is %.3f", |
|
810 marking_to_mixed_time); |
|
811 if (marking_to_mixed_time > min_valid_time) { |
|
812 _ihop_control->update_marking_length(marking_to_mixed_time); |
|
813 report = true; |
|
814 } |
|
815 } |
|
816 |
|
817 // As an approximation for the young gc promotion rates during marking we use |
|
818 // all of them. In many applications there are only a few if any young gcs during |
|
819 // marking, which makes any prediction useless. This increases the accuracy of the |
|
820 // prediction. |
|
821 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { |
|
822 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); |
|
823 report = true; |
|
824 } |
|
825 |
|
826 if (report) { |
|
827 report_ihop_statistics(); |
|
828 } |
|
829 } |
|
830 |
|
831 void G1DefaultPolicy::report_ihop_statistics() { |
|
832 _ihop_control->print(); |
|
833 } |
|
834 |
|
835 void G1DefaultPolicy::print_phases() { |
|
836 phase_times()->print(); |
|
837 } |
|
838 |
|
839 double G1DefaultPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { |
|
840 TruncatedSeq* seq = surv_rate_group->get_seq(age); |
|
841 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); |
|
842 double pred = _predictor.get_new_prediction(seq); |
|
843 if (pred > 1.0) { |
|
844 pred = 1.0; |
|
845 } |
|
846 return pred; |
|
847 } |
|
848 |
|
849 double G1DefaultPolicy::predict_yg_surv_rate(int age) const { |
|
850 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); |
|
851 } |
|
852 |
|
853 double G1DefaultPolicy::accum_yg_surv_rate_pred(int age) const { |
|
854 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); |
|
855 } |
|
856 |
|
857 double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards, |
|
858 size_t scanned_cards) const { |
|
859 return |
|
860 _analytics->predict_rs_update_time_ms(pending_cards) + |
|
861 _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) + |
|
862 _analytics->predict_constant_other_time_ms(); |
|
863 } |
|
864 |
|
865 double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { |
|
866 size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff(); |
|
867 size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young()); |
|
868 return predict_base_elapsed_time_ms(pending_cards, card_num); |
|
869 } |
|
870 |
|
871 size_t G1DefaultPolicy::predict_bytes_to_copy(HeapRegion* hr) const { |
|
872 size_t bytes_to_copy; |
|
873 if (hr->is_marked()) |
|
874 bytes_to_copy = hr->max_live_bytes(); |
|
875 else { |
|
876 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); |
|
877 int age = hr->age_in_surv_rate_group(); |
|
878 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); |
|
879 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); |
|
880 } |
|
881 return bytes_to_copy; |
|
882 } |
|
883 |
|
884 double G1DefaultPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, |
|
885 bool for_young_gc) const { |
|
886 size_t rs_length = hr->rem_set()->occupied(); |
|
887 // Predicting the number of cards is based on which type of GC |
|
888 // we're predicting for. |
|
889 size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc); |
|
890 size_t bytes_to_copy = predict_bytes_to_copy(hr); |
|
891 |
|
892 double region_elapsed_time_ms = |
|
893 _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) + |
|
894 _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark()); |
|
895 |
|
896 // The prediction of the "other" time for this region is based |
|
897 // upon the region type and NOT the GC type. |
|
898 if (hr->is_young()) { |
|
899 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); |
|
900 } else { |
|
901 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); |
|
902 } |
|
903 return region_elapsed_time_ms; |
|
904 } |
|
905 |
|
906 |
|
907 void G1DefaultPolicy::print_yg_surv_rate_info() const { |
|
908 #ifndef PRODUCT |
|
909 _short_lived_surv_rate_group->print_surv_rate_summary(); |
|
910 // add this call for any other surv rate groups |
|
911 #endif // PRODUCT |
|
912 } |
|
913 |
|
914 bool G1DefaultPolicy::is_young_list_full() const { |
|
915 uint young_list_length = _g1->young_list()->length(); |
|
916 uint young_list_target_length = _young_list_target_length; |
|
917 return young_list_length >= young_list_target_length; |
|
918 } |
|
919 |
|
920 bool G1DefaultPolicy::can_expand_young_list() const { |
|
921 uint young_list_length = _g1->young_list()->length(); |
|
922 uint young_list_max_length = _young_list_max_length; |
|
923 return young_list_length < young_list_max_length; |
|
924 } |
|
925 |
|
926 bool G1DefaultPolicy::adaptive_young_list_length() const { |
|
927 return _young_gen_sizer.adaptive_young_list_length(); |
|
928 } |
|
929 |
|
930 void G1DefaultPolicy::update_max_gc_locker_expansion() { |
|
931 uint expansion_region_num = 0; |
|
932 if (GCLockerEdenExpansionPercent > 0) { |
|
933 double perc = (double) GCLockerEdenExpansionPercent / 100.0; |
|
934 double expansion_region_num_d = perc * (double) _young_list_target_length; |
|
935 // We use ceiling so that if expansion_region_num_d is > 0.0 (but |
|
936 // less than 1.0) we'll get 1. |
|
937 expansion_region_num = (uint) ceil(expansion_region_num_d); |
|
938 } else { |
|
939 assert(expansion_region_num == 0, "sanity"); |
|
940 } |
|
941 _young_list_max_length = _young_list_target_length + expansion_region_num; |
|
942 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); |
|
943 } |
|
944 |
|
945 // Calculates survivor space parameters. |
|
946 void G1DefaultPolicy::update_survivors_policy() { |
|
947 double max_survivor_regions_d = |
|
948 (double) _young_list_target_length / (double) SurvivorRatio; |
|
949 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but |
|
950 // smaller than 1.0) we'll get 1. |
|
951 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); |
|
952 |
|
953 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( |
|
954 HeapRegion::GrainWords * _max_survivor_regions, _policy_counters); |
|
955 } |
|
956 |
|
957 bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { |
|
958 // We actually check whether we are marking here and not if we are in a |
|
959 // reclamation phase. This means that we will schedule a concurrent mark |
|
960 // even while we are still in the process of reclaiming memory. |
|
961 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); |
|
962 if (!during_cycle) { |
|
963 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); |
|
964 collector_state()->set_initiate_conc_mark_if_possible(true); |
|
965 return true; |
|
966 } else { |
|
967 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); |
|
968 return false; |
|
969 } |
|
970 } |
|
971 |
|
972 void G1DefaultPolicy::initiate_conc_mark() { |
|
973 collector_state()->set_during_initial_mark_pause(true); |
|
974 collector_state()->set_initiate_conc_mark_if_possible(false); |
|
975 } |
|
976 |
|
977 void G1DefaultPolicy::decide_on_conc_mark_initiation() { |
|
978 // We are about to decide on whether this pause will be an |
|
979 // initial-mark pause. |
|
980 |
|
981 // First, collector_state()->during_initial_mark_pause() should not be already set. We |
|
982 // will set it here if we have to. However, it should be cleared by |
|
983 // the end of the pause (it's only set for the duration of an |
|
984 // initial-mark pause). |
|
985 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); |
|
986 |
|
987 if (collector_state()->initiate_conc_mark_if_possible()) { |
|
988 // We had noticed on a previous pause that the heap occupancy has |
|
989 // gone over the initiating threshold and we should start a |
|
990 // concurrent marking cycle. So we might initiate one. |
|
991 |
|
992 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { |
|
993 // Initiate a new initial mark if there is no marking or reclamation going on. |
|
994 initiate_conc_mark(); |
|
995 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); |
|
996 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { |
|
997 // Initiate a user requested initial mark. An initial mark must be young only |
|
998 // GC, so the collector state must be updated to reflect this. |
|
999 collector_state()->set_gcs_are_young(true); |
|
1000 collector_state()->set_last_young_gc(false); |
|
1001 |
|
1002 abort_time_to_mixed_tracking(); |
|
1003 initiate_conc_mark(); |
|
1004 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); |
|
1005 } else { |
|
1006 // The concurrent marking thread is still finishing up the |
|
1007 // previous cycle. If we start one right now the two cycles |
|
1008 // overlap. In particular, the concurrent marking thread might |
|
1009 // be in the process of clearing the next marking bitmap (which |
|
1010 // we will use for the next cycle if we start one). Starting a |
|
1011 // cycle now will be bad given that parts of the marking |
|
1012 // information might get cleared by the marking thread. And we |
|
1013 // cannot wait for the marking thread to finish the cycle as it |
|
1014 // periodically yields while clearing the next marking bitmap |
|
1015 // and, if it's in a yield point, it's waiting for us to |
|
1016 // finish. So, at this point we will not start a cycle and we'll |
|
1017 // let the concurrent marking thread complete the last one. |
|
1018 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); |
|
1019 } |
|
1020 } |
|
1021 } |
|
1022 |
|
1023 void G1DefaultPolicy::record_concurrent_mark_cleanup_end() { |
|
1024 cset_chooser()->rebuild(_g1->workers(), _g1->num_regions()); |
|
1025 |
|
1026 double end_sec = os::elapsedTime(); |
|
1027 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; |
|
1028 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); |
|
1029 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); |
|
1030 |
|
1031 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); |
|
1032 } |
|
1033 |
|
1034 double G1DefaultPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { |
|
1035 // Returns the given amount of reclaimable bytes (that represents |
|
1036 // the amount of reclaimable space still to be collected) as a |
|
1037 // percentage of the current heap capacity. |
|
1038 size_t capacity_bytes = _g1->capacity(); |
|
1039 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; |
|
1040 } |
|
1041 |
|
1042 void G1DefaultPolicy::maybe_start_marking() { |
|
1043 if (need_to_start_conc_mark("end of GC")) { |
|
1044 // Note: this might have already been set, if during the last |
|
1045 // pause we decided to start a cycle but at the beginning of |
|
1046 // this pause we decided to postpone it. That's OK. |
|
1047 collector_state()->set_initiate_conc_mark_if_possible(true); |
|
1048 } |
|
1049 } |
|
1050 |
|
1051 G1DefaultPolicy::PauseKind G1DefaultPolicy::young_gc_pause_kind() const { |
|
1052 assert(!collector_state()->full_collection(), "must be"); |
|
1053 if (collector_state()->during_initial_mark_pause()) { |
|
1054 assert(collector_state()->last_gc_was_young(), "must be"); |
|
1055 assert(!collector_state()->last_young_gc(), "must be"); |
|
1056 return InitialMarkGC; |
|
1057 } else if (collector_state()->last_young_gc()) { |
|
1058 assert(!collector_state()->during_initial_mark_pause(), "must be"); |
|
1059 assert(collector_state()->last_gc_was_young(), "must be"); |
|
1060 return LastYoungGC; |
|
1061 } else if (!collector_state()->last_gc_was_young()) { |
|
1062 assert(!collector_state()->during_initial_mark_pause(), "must be"); |
|
1063 assert(!collector_state()->last_young_gc(), "must be"); |
|
1064 return MixedGC; |
|
1065 } else { |
|
1066 assert(collector_state()->last_gc_was_young(), "must be"); |
|
1067 assert(!collector_state()->during_initial_mark_pause(), "must be"); |
|
1068 assert(!collector_state()->last_young_gc(), "must be"); |
|
1069 return YoungOnlyGC; |
|
1070 } |
|
1071 } |
|
1072 |
|
1073 void G1DefaultPolicy::record_pause(PauseKind kind, double start, double end) { |
|
1074 // Manage the MMU tracker. For some reason it ignores Full GCs. |
|
1075 if (kind != FullGC) { |
|
1076 _mmu_tracker->add_pause(start, end); |
|
1077 } |
|
1078 // Manage the mutator time tracking from initial mark to first mixed gc. |
|
1079 switch (kind) { |
|
1080 case FullGC: |
|
1081 abort_time_to_mixed_tracking(); |
|
1082 break; |
|
1083 case Cleanup: |
|
1084 case Remark: |
|
1085 case YoungOnlyGC: |
|
1086 case LastYoungGC: |
|
1087 _initial_mark_to_mixed.add_pause(end - start); |
|
1088 break; |
|
1089 case InitialMarkGC: |
|
1090 _initial_mark_to_mixed.record_initial_mark_end(end); |
|
1091 break; |
|
1092 case MixedGC: |
|
1093 _initial_mark_to_mixed.record_mixed_gc_start(start); |
|
1094 break; |
|
1095 default: |
|
1096 ShouldNotReachHere(); |
|
1097 } |
|
1098 } |
|
1099 |
|
1100 void G1DefaultPolicy::abort_time_to_mixed_tracking() { |
|
1101 _initial_mark_to_mixed.reset(); |
|
1102 } |
|
1103 |
|
1104 bool G1DefaultPolicy::next_gc_should_be_mixed(const char* true_action_str, |
|
1105 const char* false_action_str) const { |
|
1106 if (cset_chooser()->is_empty()) { |
|
1107 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); |
|
1108 return false; |
|
1109 } |
|
1110 |
|
1111 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? |
|
1112 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); |
|
1113 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); |
|
1114 double threshold = (double) G1HeapWastePercent; |
|
1115 if (reclaimable_perc <= threshold) { |
|
1116 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, |
|
1117 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); |
|
1118 return false; |
|
1119 } |
|
1120 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, |
|
1121 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); |
|
1122 return true; |
|
1123 } |
|
1124 |
|
1125 uint G1DefaultPolicy::calc_min_old_cset_length() const { |
|
1126 // The min old CSet region bound is based on the maximum desired |
|
1127 // number of mixed GCs after a cycle. I.e., even if some old regions |
|
1128 // look expensive, we should add them to the CSet anyway to make |
|
1129 // sure we go through the available old regions in no more than the |
|
1130 // maximum desired number of mixed GCs. |
|
1131 // |
|
1132 // The calculation is based on the number of marked regions we added |
|
1133 // to the CSet chooser in the first place, not how many remain, so |
|
1134 // that the result is the same during all mixed GCs that follow a cycle. |
|
1135 |
|
1136 const size_t region_num = (size_t) cset_chooser()->length(); |
|
1137 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); |
|
1138 size_t result = region_num / gc_num; |
|
1139 // emulate ceiling |
|
1140 if (result * gc_num < region_num) { |
|
1141 result += 1; |
|
1142 } |
|
1143 return (uint) result; |
|
1144 } |
|
1145 |
|
1146 uint G1DefaultPolicy::calc_max_old_cset_length() const { |
|
1147 // The max old CSet region bound is based on the threshold expressed |
|
1148 // as a percentage of the heap size. I.e., it should bound the |
|
1149 // number of old regions added to the CSet irrespective of how many |
|
1150 // of them are available. |
|
1151 |
|
1152 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
1153 const size_t region_num = g1h->num_regions(); |
|
1154 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; |
|
1155 size_t result = region_num * perc / 100; |
|
1156 // emulate ceiling |
|
1157 if (100 * result < region_num * perc) { |
|
1158 result += 1; |
|
1159 } |
|
1160 return (uint) result; |
|
1161 } |
|
1162 |
|
1163 void G1DefaultPolicy::finalize_collection_set(double target_pause_time_ms) { |
|
1164 double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); |
|
1165 _collection_set->finalize_old_part(time_remaining_ms); |
|
1166 } |