1 /* |
|
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP |
|
26 #define SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP |
|
27 |
|
28 #include "gc/cms/cmsHeap.hpp" |
|
29 #include "gc/cms/cmsLockVerifier.hpp" |
|
30 #include "gc/cms/compactibleFreeListSpace.inline.hpp" |
|
31 #include "gc/cms/concurrentMarkSweepGeneration.hpp" |
|
32 #include "gc/cms/concurrentMarkSweepThread.hpp" |
|
33 #include "gc/cms/parNewGeneration.hpp" |
|
34 #include "gc/shared/gcUtil.hpp" |
|
35 #include "utilities/align.hpp" |
|
36 #include "utilities/bitMap.inline.hpp" |
|
37 |
|
38 inline void CMSBitMap::clear_all() { |
|
39 assert_locked(); |
|
40 // CMS bitmaps are usually cover large memory regions |
|
41 _bm.clear_large(); |
|
42 return; |
|
43 } |
|
44 |
|
45 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const { |
|
46 return (pointer_delta(addr, _bmStartWord)) >> _shifter; |
|
47 } |
|
48 |
|
49 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const { |
|
50 return _bmStartWord + (offset << _shifter); |
|
51 } |
|
52 |
|
53 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const { |
|
54 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); |
|
55 return diff >> _shifter; |
|
56 } |
|
57 |
|
58 inline void CMSBitMap::mark(HeapWord* addr) { |
|
59 assert_locked(); |
|
60 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), |
|
61 "outside underlying space?"); |
|
62 _bm.set_bit(heapWordToOffset(addr)); |
|
63 } |
|
64 |
|
65 inline bool CMSBitMap::par_mark(HeapWord* addr) { |
|
66 assert_locked(); |
|
67 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), |
|
68 "outside underlying space?"); |
|
69 return _bm.par_at_put(heapWordToOffset(addr), true); |
|
70 } |
|
71 |
|
72 inline void CMSBitMap::par_clear(HeapWord* addr) { |
|
73 assert_locked(); |
|
74 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), |
|
75 "outside underlying space?"); |
|
76 _bm.par_at_put(heapWordToOffset(addr), false); |
|
77 } |
|
78 |
|
79 inline void CMSBitMap::mark_range(MemRegion mr) { |
|
80 NOT_PRODUCT(region_invariant(mr)); |
|
81 // Range size is usually just 1 bit. |
|
82 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
83 BitMap::small_range); |
|
84 } |
|
85 |
|
86 inline void CMSBitMap::clear_range(MemRegion mr) { |
|
87 NOT_PRODUCT(region_invariant(mr)); |
|
88 // Range size is usually just 1 bit. |
|
89 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
90 BitMap::small_range); |
|
91 } |
|
92 |
|
93 inline void CMSBitMap::par_mark_range(MemRegion mr) { |
|
94 NOT_PRODUCT(region_invariant(mr)); |
|
95 // Range size is usually just 1 bit. |
|
96 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
97 BitMap::small_range); |
|
98 } |
|
99 |
|
100 inline void CMSBitMap::par_clear_range(MemRegion mr) { |
|
101 NOT_PRODUCT(region_invariant(mr)); |
|
102 // Range size is usually just 1 bit. |
|
103 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
104 BitMap::small_range); |
|
105 } |
|
106 |
|
107 inline void CMSBitMap::mark_large_range(MemRegion mr) { |
|
108 NOT_PRODUCT(region_invariant(mr)); |
|
109 // Range size must be greater than 32 bytes. |
|
110 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
111 BitMap::large_range); |
|
112 } |
|
113 |
|
114 inline void CMSBitMap::clear_large_range(MemRegion mr) { |
|
115 NOT_PRODUCT(region_invariant(mr)); |
|
116 // Range size must be greater than 32 bytes. |
|
117 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
118 BitMap::large_range); |
|
119 } |
|
120 |
|
121 inline void CMSBitMap::par_mark_large_range(MemRegion mr) { |
|
122 NOT_PRODUCT(region_invariant(mr)); |
|
123 // Range size must be greater than 32 bytes. |
|
124 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
125 BitMap::large_range); |
|
126 } |
|
127 |
|
128 inline void CMSBitMap::par_clear_large_range(MemRegion mr) { |
|
129 NOT_PRODUCT(region_invariant(mr)); |
|
130 // Range size must be greater than 32 bytes. |
|
131 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), |
|
132 BitMap::large_range); |
|
133 } |
|
134 |
|
135 // Starting at "addr" (inclusive) return a memory region |
|
136 // corresponding to the first maximally contiguous marked ("1") region. |
|
137 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) { |
|
138 return getAndClearMarkedRegion(addr, endWord()); |
|
139 } |
|
140 |
|
141 // Starting at "start_addr" (inclusive) return a memory region |
|
142 // corresponding to the first maximal contiguous marked ("1") region |
|
143 // strictly less than end_addr. |
|
144 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr, |
|
145 HeapWord* end_addr) { |
|
146 HeapWord *start, *end; |
|
147 assert_locked(); |
|
148 start = getNextMarkedWordAddress (start_addr, end_addr); |
|
149 end = getNextUnmarkedWordAddress(start, end_addr); |
|
150 assert(start <= end, "Consistency check"); |
|
151 MemRegion mr(start, end); |
|
152 if (!mr.is_empty()) { |
|
153 clear_range(mr); |
|
154 } |
|
155 return mr; |
|
156 } |
|
157 |
|
158 inline bool CMSBitMap::isMarked(HeapWord* addr) const { |
|
159 assert_locked(); |
|
160 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), |
|
161 "outside underlying space?"); |
|
162 return _bm.at(heapWordToOffset(addr)); |
|
163 } |
|
164 |
|
165 // The same as isMarked() but without a lock check. |
|
166 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const { |
|
167 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), |
|
168 "outside underlying space?"); |
|
169 return _bm.at(heapWordToOffset(addr)); |
|
170 } |
|
171 |
|
172 |
|
173 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const { |
|
174 assert_locked(); |
|
175 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), |
|
176 "outside underlying space?"); |
|
177 return !_bm.at(heapWordToOffset(addr)); |
|
178 } |
|
179 |
|
180 // Return the HeapWord address corresponding to next "1" bit |
|
181 // (inclusive). |
|
182 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const { |
|
183 return getNextMarkedWordAddress(addr, endWord()); |
|
184 } |
|
185 |
|
186 // Return the least HeapWord address corresponding to next "1" bit |
|
187 // starting at start_addr (inclusive) but strictly less than end_addr. |
|
188 inline HeapWord* CMSBitMap::getNextMarkedWordAddress( |
|
189 HeapWord* start_addr, HeapWord* end_addr) const { |
|
190 assert_locked(); |
|
191 size_t nextOffset = _bm.get_next_one_offset( |
|
192 heapWordToOffset(start_addr), |
|
193 heapWordToOffset(end_addr)); |
|
194 HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
|
195 assert(nextAddr >= start_addr && |
|
196 nextAddr <= end_addr, "get_next_one postcondition"); |
|
197 assert((nextAddr == end_addr) || |
|
198 isMarked(nextAddr), "get_next_one postcondition"); |
|
199 return nextAddr; |
|
200 } |
|
201 |
|
202 |
|
203 // Return the HeapWord address corresponding to the next "0" bit |
|
204 // (inclusive). |
|
205 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const { |
|
206 return getNextUnmarkedWordAddress(addr, endWord()); |
|
207 } |
|
208 |
|
209 // Return the HeapWord address corresponding to the next "0" bit |
|
210 // (inclusive). |
|
211 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress( |
|
212 HeapWord* start_addr, HeapWord* end_addr) const { |
|
213 assert_locked(); |
|
214 size_t nextOffset = _bm.get_next_zero_offset( |
|
215 heapWordToOffset(start_addr), |
|
216 heapWordToOffset(end_addr)); |
|
217 HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
|
218 assert(nextAddr >= start_addr && |
|
219 nextAddr <= end_addr, "get_next_zero postcondition"); |
|
220 assert((nextAddr == end_addr) || |
|
221 isUnmarked(nextAddr), "get_next_zero postcondition"); |
|
222 return nextAddr; |
|
223 } |
|
224 |
|
225 inline bool CMSBitMap::isAllClear() const { |
|
226 assert_locked(); |
|
227 return getNextMarkedWordAddress(startWord()) >= endWord(); |
|
228 } |
|
229 |
|
230 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left, |
|
231 HeapWord* right) { |
|
232 assert_locked(); |
|
233 left = MAX2(_bmStartWord, left); |
|
234 right = MIN2(_bmStartWord + _bmWordSize, right); |
|
235 if (right > left) { |
|
236 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right)); |
|
237 } |
|
238 } |
|
239 |
|
240 inline void CMSCollector::save_sweep_limits() { |
|
241 _cmsGen->save_sweep_limit(); |
|
242 } |
|
243 |
|
244 inline bool CMSCollector::is_dead_obj(oop obj) const { |
|
245 HeapWord* addr = (HeapWord*)obj; |
|
246 assert((_cmsGen->cmsSpace()->is_in_reserved(addr) |
|
247 && _cmsGen->cmsSpace()->block_is_obj(addr)), |
|
248 "must be object"); |
|
249 return should_unload_classes() && |
|
250 _collectorState == Sweeping && |
|
251 !_markBitMap.isMarked(addr); |
|
252 } |
|
253 |
|
254 inline bool CMSCollector::should_abort_preclean() const { |
|
255 // We are in the midst of an "abortable preclean" and either |
|
256 // scavenge is done or foreground GC wants to take over collection |
|
257 return _collectorState == AbortablePreclean && |
|
258 (_abort_preclean || _foregroundGCIsActive || |
|
259 CMSHeap::heap()->incremental_collection_will_fail(true /* consult_young */)); |
|
260 } |
|
261 |
|
262 inline size_t CMSCollector::get_eden_used() const { |
|
263 return _young_gen->eden()->used(); |
|
264 } |
|
265 |
|
266 inline size_t CMSCollector::get_eden_capacity() const { |
|
267 return _young_gen->eden()->capacity(); |
|
268 } |
|
269 |
|
270 inline bool CMSStats::valid() const { |
|
271 return _valid_bits == _ALL_VALID; |
|
272 } |
|
273 |
|
274 inline void CMSStats::record_gc0_begin() { |
|
275 if (_gc0_begin_time.is_updated()) { |
|
276 float last_gc0_period = _gc0_begin_time.seconds(); |
|
277 _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period, |
|
278 last_gc0_period, _gc0_alpha); |
|
279 _gc0_alpha = _saved_alpha; |
|
280 _valid_bits |= _GC0_VALID; |
|
281 } |
|
282 _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used(); |
|
283 |
|
284 _gc0_begin_time.update(); |
|
285 } |
|
286 |
|
287 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) { |
|
288 float last_gc0_duration = _gc0_begin_time.seconds(); |
|
289 _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration, |
|
290 last_gc0_duration, _gc0_alpha); |
|
291 |
|
292 // Amount promoted. |
|
293 _cms_used_at_gc0_end = cms_gen_bytes_used; |
|
294 |
|
295 size_t promoted_bytes = 0; |
|
296 if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) { |
|
297 promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin; |
|
298 } |
|
299 |
|
300 // If the young gen collection was skipped, then the |
|
301 // number of promoted bytes will be 0 and adding it to the |
|
302 // average will incorrectly lessen the average. It is, however, |
|
303 // also possible that no promotion was needed. |
|
304 // |
|
305 // _gc0_promoted used to be calculated as |
|
306 // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted, |
|
307 // promoted_bytes, _gc0_alpha); |
|
308 _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes); |
|
309 _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average(); |
|
310 |
|
311 // Amount directly allocated. |
|
312 size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize; |
|
313 _cms_gen->reset_direct_allocated_words(); |
|
314 _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated, |
|
315 allocated_bytes, _gc0_alpha); |
|
316 } |
|
317 |
|
318 inline void CMSStats::record_cms_begin() { |
|
319 _cms_timer.stop(); |
|
320 |
|
321 // This is just an approximate value, but is good enough. |
|
322 _cms_used_at_cms_begin = _cms_used_at_gc0_end; |
|
323 |
|
324 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period, |
|
325 (float) _cms_timer.seconds(), _cms_alpha); |
|
326 _cms_begin_time.update(); |
|
327 |
|
328 _cms_timer.reset(); |
|
329 _cms_timer.start(); |
|
330 } |
|
331 |
|
332 inline void CMSStats::record_cms_end() { |
|
333 _cms_timer.stop(); |
|
334 |
|
335 float cur_duration = _cms_timer.seconds(); |
|
336 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, |
|
337 cur_duration, _cms_alpha); |
|
338 |
|
339 _cms_end_time.update(); |
|
340 _cms_alpha = _saved_alpha; |
|
341 _allow_duty_cycle_reduction = true; |
|
342 _valid_bits |= _CMS_VALID; |
|
343 |
|
344 _cms_timer.start(); |
|
345 } |
|
346 |
|
347 inline double CMSStats::cms_time_since_begin() const { |
|
348 return _cms_begin_time.seconds(); |
|
349 } |
|
350 |
|
351 inline double CMSStats::cms_time_since_end() const { |
|
352 return _cms_end_time.seconds(); |
|
353 } |
|
354 |
|
355 inline double CMSStats::promotion_rate() const { |
|
356 assert(valid(), "statistics not valid yet"); |
|
357 return gc0_promoted() / gc0_period(); |
|
358 } |
|
359 |
|
360 inline double CMSStats::cms_allocation_rate() const { |
|
361 assert(valid(), "statistics not valid yet"); |
|
362 return cms_allocated() / gc0_period(); |
|
363 } |
|
364 |
|
365 inline double CMSStats::cms_consumption_rate() const { |
|
366 assert(valid(), "statistics not valid yet"); |
|
367 return (gc0_promoted() + cms_allocated()) / gc0_period(); |
|
368 } |
|
369 |
|
370 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() { |
|
371 cmsSpace()->save_sweep_limit(); |
|
372 } |
|
373 |
|
374 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const { |
|
375 return _cmsSpace->used_region_at_save_marks(); |
|
376 } |
|
377 |
|
378 template <typename OopClosureType> |
|
379 void ConcurrentMarkSweepGeneration::oop_since_save_marks_iterate(OopClosureType* cl) { |
|
380 cl->set_generation(this); |
|
381 cmsSpace()->oop_since_save_marks_iterate(cl); |
|
382 cl->reset_generation(); |
|
383 save_marks(); |
|
384 } |
|
385 |
|
386 inline void MarkFromRootsClosure::do_yield_check() { |
|
387 if (ConcurrentMarkSweepThread::should_yield() && |
|
388 !_collector->foregroundGCIsActive() && |
|
389 _yield) { |
|
390 do_yield_work(); |
|
391 } |
|
392 } |
|
393 |
|
394 inline void ParMarkFromRootsClosure::do_yield_check() { |
|
395 if (ConcurrentMarkSweepThread::should_yield() && |
|
396 !_collector->foregroundGCIsActive()) { |
|
397 do_yield_work(); |
|
398 } |
|
399 } |
|
400 |
|
401 inline void PushOrMarkClosure::do_yield_check() { |
|
402 _parent->do_yield_check(); |
|
403 } |
|
404 |
|
405 inline void ParPushOrMarkClosure::do_yield_check() { |
|
406 _parent->do_yield_check(); |
|
407 } |
|
408 |
|
409 // Return value of "true" indicates that the on-going preclean |
|
410 // should be aborted. |
|
411 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() { |
|
412 if (ConcurrentMarkSweepThread::should_yield() && |
|
413 !_collector->foregroundGCIsActive() && |
|
414 _yield) { |
|
415 // Sample young gen size before and after yield |
|
416 _collector->sample_eden(); |
|
417 do_yield_work(); |
|
418 _collector->sample_eden(); |
|
419 return _collector->should_abort_preclean(); |
|
420 } |
|
421 return false; |
|
422 } |
|
423 |
|
424 inline void SurvivorSpacePrecleanClosure::do_yield_check() { |
|
425 if (ConcurrentMarkSweepThread::should_yield() && |
|
426 !_collector->foregroundGCIsActive() && |
|
427 _yield) { |
|
428 // Sample young gen size before and after yield |
|
429 _collector->sample_eden(); |
|
430 do_yield_work(); |
|
431 _collector->sample_eden(); |
|
432 } |
|
433 } |
|
434 |
|
435 inline void SweepClosure::do_yield_check(HeapWord* addr) { |
|
436 if (ConcurrentMarkSweepThread::should_yield() && |
|
437 !_collector->foregroundGCIsActive() && |
|
438 _yield) { |
|
439 do_yield_work(addr); |
|
440 } |
|
441 } |
|
442 |
|
443 inline void MarkRefsIntoAndScanClosure::do_yield_check() { |
|
444 // The conditions are ordered for the remarking phase |
|
445 // when _yield is false. |
|
446 if (_yield && |
|
447 !_collector->foregroundGCIsActive() && |
|
448 ConcurrentMarkSweepThread::should_yield()) { |
|
449 do_yield_work(); |
|
450 } |
|
451 } |
|
452 |
|
453 |
|
454 inline void ModUnionClosure::do_MemRegion(MemRegion mr) { |
|
455 // Align the end of mr so it's at a card boundary. |
|
456 // This is superfluous except at the end of the space; |
|
457 // we should do better than this XXX |
|
458 MemRegion mr2(mr.start(), align_up(mr.end(), |
|
459 CardTable::card_size /* bytes */)); |
|
460 _t->mark_range(mr2); |
|
461 } |
|
462 |
|
463 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) { |
|
464 // Align the end of mr so it's at a card boundary. |
|
465 // This is superfluous except at the end of the space; |
|
466 // we should do better than this XXX |
|
467 MemRegion mr2(mr.start(), align_up(mr.end(), |
|
468 CardTable::card_size /* bytes */)); |
|
469 _t->par_mark_range(mr2); |
|
470 } |
|
471 |
|
472 #endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP |
|