1 /* |
|
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "classfile/classLoaderDataGraph.hpp" |
|
27 #include "classfile/systemDictionary.hpp" |
|
28 #include "code/codeCache.hpp" |
|
29 #include "gc/cms/cmsGCStats.hpp" |
|
30 #include "gc/cms/cmsHeap.hpp" |
|
31 #include "gc/cms/cmsOopClosures.inline.hpp" |
|
32 #include "gc/cms/cmsVMOperations.hpp" |
|
33 #include "gc/cms/compactibleFreeListSpace.hpp" |
|
34 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp" |
|
35 #include "gc/cms/concurrentMarkSweepThread.hpp" |
|
36 #include "gc/cms/parNewGeneration.hpp" |
|
37 #include "gc/cms/promotionInfo.inline.hpp" |
|
38 #include "gc/serial/genMarkSweep.hpp" |
|
39 #include "gc/serial/tenuredGeneration.hpp" |
|
40 #include "gc/shared/adaptiveSizePolicy.hpp" |
|
41 #include "gc/shared/cardGeneration.inline.hpp" |
|
42 #include "gc/shared/cardTableRS.hpp" |
|
43 #include "gc/shared/collectedHeap.inline.hpp" |
|
44 #include "gc/shared/collectorCounters.hpp" |
|
45 #include "gc/shared/gcLocker.hpp" |
|
46 #include "gc/shared/gcPolicyCounters.hpp" |
|
47 #include "gc/shared/gcTimer.hpp" |
|
48 #include "gc/shared/gcTrace.hpp" |
|
49 #include "gc/shared/gcTraceTime.inline.hpp" |
|
50 #include "gc/shared/genCollectedHeap.hpp" |
|
51 #include "gc/shared/genOopClosures.inline.hpp" |
|
52 #include "gc/shared/isGCActiveMark.hpp" |
|
53 #include "gc/shared/owstTaskTerminator.hpp" |
|
54 #include "gc/shared/referencePolicy.hpp" |
|
55 #include "gc/shared/referenceProcessorPhaseTimes.hpp" |
|
56 #include "gc/shared/space.inline.hpp" |
|
57 #include "gc/shared/strongRootsScope.hpp" |
|
58 #include "gc/shared/taskqueue.inline.hpp" |
|
59 #include "gc/shared/weakProcessor.hpp" |
|
60 #include "gc/shared/workerPolicy.hpp" |
|
61 #include "logging/log.hpp" |
|
62 #include "logging/logStream.hpp" |
|
63 #include "memory/allocation.hpp" |
|
64 #include "memory/binaryTreeDictionary.inline.hpp" |
|
65 #include "memory/iterator.inline.hpp" |
|
66 #include "memory/padded.hpp" |
|
67 #include "memory/resourceArea.hpp" |
|
68 #include "memory/universe.hpp" |
|
69 #include "oops/access.inline.hpp" |
|
70 #include "oops/oop.inline.hpp" |
|
71 #include "prims/jvmtiExport.hpp" |
|
72 #include "runtime/atomic.hpp" |
|
73 #include "runtime/flags/flagSetting.hpp" |
|
74 #include "runtime/globals_extension.hpp" |
|
75 #include "runtime/handles.inline.hpp" |
|
76 #include "runtime/java.hpp" |
|
77 #include "runtime/orderAccess.hpp" |
|
78 #include "runtime/timer.hpp" |
|
79 #include "runtime/vmThread.hpp" |
|
80 #include "services/memoryService.hpp" |
|
81 #include "services/runtimeService.hpp" |
|
82 #include "utilities/align.hpp" |
|
83 #include "utilities/stack.inline.hpp" |
|
84 #if INCLUDE_JVMCI |
|
85 #include "jvmci/jvmci.hpp" |
|
86 #endif |
|
87 |
|
88 // statics |
|
89 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; |
|
90 bool CMSCollector::_full_gc_requested = false; |
|
91 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc; |
|
92 |
|
93 ////////////////////////////////////////////////////////////////// |
|
94 // In support of CMS/VM thread synchronization |
|
95 ////////////////////////////////////////////////////////////////// |
|
96 // We split use of the CGC_lock into 2 "levels". |
|
97 // The low-level locking is of the usual CGC_lock monitor. We introduce |
|
98 // a higher level "token" (hereafter "CMS token") built on top of the |
|
99 // low level monitor (hereafter "CGC lock"). |
|
100 // The token-passing protocol gives priority to the VM thread. The |
|
101 // CMS-lock doesn't provide any fairness guarantees, but clients |
|
102 // should ensure that it is only held for very short, bounded |
|
103 // durations. |
|
104 // |
|
105 // When either of the CMS thread or the VM thread is involved in |
|
106 // collection operations during which it does not want the other |
|
107 // thread to interfere, it obtains the CMS token. |
|
108 // |
|
109 // If either thread tries to get the token while the other has |
|
110 // it, that thread waits. However, if the VM thread and CMS thread |
|
111 // both want the token, then the VM thread gets priority while the |
|
112 // CMS thread waits. This ensures, for instance, that the "concurrent" |
|
113 // phases of the CMS thread's work do not block out the VM thread |
|
114 // for long periods of time as the CMS thread continues to hog |
|
115 // the token. (See bug 4616232). |
|
116 // |
|
117 // The baton-passing functions are, however, controlled by the |
|
118 // flags _foregroundGCShouldWait and _foregroundGCIsActive, |
|
119 // and here the low-level CMS lock, not the high level token, |
|
120 // ensures mutual exclusion. |
|
121 // |
|
122 // Two important conditions that we have to satisfy: |
|
123 // 1. if a thread does a low-level wait on the CMS lock, then it |
|
124 // relinquishes the CMS token if it were holding that token |
|
125 // when it acquired the low-level CMS lock. |
|
126 // 2. any low-level notifications on the low-level lock |
|
127 // should only be sent when a thread has relinquished the token. |
|
128 // |
|
129 // In the absence of either property, we'd have potential deadlock. |
|
130 // |
|
131 // We protect each of the CMS (concurrent and sequential) phases |
|
132 // with the CMS _token_, not the CMS _lock_. |
|
133 // |
|
134 // The only code protected by CMS lock is the token acquisition code |
|
135 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the |
|
136 // baton-passing code. |
|
137 // |
|
138 // Unfortunately, i couldn't come up with a good abstraction to factor and |
|
139 // hide the naked CGC_lock manipulation in the baton-passing code |
|
140 // further below. That's something we should try to do. Also, the proof |
|
141 // of correctness of this 2-level locking scheme is far from obvious, |
|
142 // and potentially quite slippery. We have an uneasy suspicion, for instance, |
|
143 // that there may be a theoretical possibility of delay/starvation in the |
|
144 // low-level lock/wait/notify scheme used for the baton-passing because of |
|
145 // potential interference with the priority scheme embodied in the |
|
146 // CMS-token-passing protocol. See related comments at a CGC_lock->wait() |
|
147 // invocation further below and marked with "XXX 20011219YSR". |
|
148 // Indeed, as we note elsewhere, this may become yet more slippery |
|
149 // in the presence of multiple CMS and/or multiple VM threads. XXX |
|
150 |
|
151 class CMSTokenSync: public StackObj { |
|
152 private: |
|
153 bool _is_cms_thread; |
|
154 public: |
|
155 CMSTokenSync(bool is_cms_thread): |
|
156 _is_cms_thread(is_cms_thread) { |
|
157 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(), |
|
158 "Incorrect argument to constructor"); |
|
159 ConcurrentMarkSweepThread::synchronize(_is_cms_thread); |
|
160 } |
|
161 |
|
162 ~CMSTokenSync() { |
|
163 assert(_is_cms_thread ? |
|
164 ConcurrentMarkSweepThread::cms_thread_has_cms_token() : |
|
165 ConcurrentMarkSweepThread::vm_thread_has_cms_token(), |
|
166 "Incorrect state"); |
|
167 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread); |
|
168 } |
|
169 }; |
|
170 |
|
171 // Convenience class that does a CMSTokenSync, and then acquires |
|
172 // upto three locks. |
|
173 class CMSTokenSyncWithLocks: public CMSTokenSync { |
|
174 private: |
|
175 // Note: locks are acquired in textual declaration order |
|
176 // and released in the opposite order |
|
177 MutexLocker _locker1, _locker2, _locker3; |
|
178 public: |
|
179 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1, |
|
180 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL): |
|
181 CMSTokenSync(is_cms_thread), |
|
182 _locker1(mutex1, Mutex::_no_safepoint_check_flag), |
|
183 _locker2(mutex2, Mutex::_no_safepoint_check_flag), |
|
184 _locker3(mutex3, Mutex::_no_safepoint_check_flag) |
|
185 { } |
|
186 }; |
|
187 |
|
188 |
|
189 ////////////////////////////////////////////////////////////////// |
|
190 // Concurrent Mark-Sweep Generation ///////////////////////////// |
|
191 ////////////////////////////////////////////////////////////////// |
|
192 |
|
193 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;) |
|
194 |
|
195 // This struct contains per-thread things necessary to support parallel |
|
196 // young-gen collection. |
|
197 class CMSParGCThreadState: public CHeapObj<mtGC> { |
|
198 public: |
|
199 CompactibleFreeListSpaceLAB lab; |
|
200 PromotionInfo promo; |
|
201 |
|
202 // Constructor. |
|
203 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { |
|
204 promo.setSpace(cfls); |
|
205 } |
|
206 }; |
|
207 |
|
208 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( |
|
209 ReservedSpace rs, |
|
210 size_t initial_byte_size, |
|
211 size_t min_byte_size, |
|
212 size_t max_byte_size, |
|
213 CardTableRS* ct) : |
|
214 CardGeneration(rs, initial_byte_size, ct), |
|
215 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), |
|
216 _did_compact(false) |
|
217 { |
|
218 HeapWord* bottom = (HeapWord*) _virtual_space.low(); |
|
219 HeapWord* end = (HeapWord*) _virtual_space.high(); |
|
220 |
|
221 _direct_allocated_words = 0; |
|
222 NOT_PRODUCT( |
|
223 _numObjectsPromoted = 0; |
|
224 _numWordsPromoted = 0; |
|
225 _numObjectsAllocated = 0; |
|
226 _numWordsAllocated = 0; |
|
227 ) |
|
228 |
|
229 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end)); |
|
230 NOT_PRODUCT(debug_cms_space = _cmsSpace;) |
|
231 _cmsSpace->_old_gen = this; |
|
232 |
|
233 _gc_stats = new CMSGCStats(); |
|
234 |
|
235 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass |
|
236 // offsets match. The ability to tell free chunks from objects |
|
237 // depends on this property. |
|
238 debug_only( |
|
239 FreeChunk* junk = NULL; |
|
240 assert(UseCompressedClassPointers || |
|
241 junk->prev_addr() == (void*)(oop(junk)->klass_addr()), |
|
242 "Offset of FreeChunk::_prev within FreeChunk must match" |
|
243 " that of OopDesc::_klass within OopDesc"); |
|
244 ) |
|
245 |
|
246 _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC); |
|
247 for (uint i = 0; i < ParallelGCThreads; i++) { |
|
248 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace()); |
|
249 } |
|
250 |
|
251 _incremental_collection_failed = false; |
|
252 // The "dilatation_factor" is the expansion that can occur on |
|
253 // account of the fact that the minimum object size in the CMS |
|
254 // generation may be larger than that in, say, a contiguous young |
|
255 // generation. |
|
256 // Ideally, in the calculation below, we'd compute the dilatation |
|
257 // factor as: MinChunkSize/(promoting_gen's min object size) |
|
258 // Since we do not have such a general query interface for the |
|
259 // promoting generation, we'll instead just use the minimum |
|
260 // object size (which today is a header's worth of space); |
|
261 // note that all arithmetic is in units of HeapWords. |
|
262 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); |
|
263 assert(_dilatation_factor >= 1.0, "from previous assert"); |
|
264 |
|
265 initialize_performance_counters(min_byte_size, max_byte_size); |
|
266 } |
|
267 |
|
268 |
|
269 // The field "_initiating_occupancy" represents the occupancy percentage |
|
270 // at which we trigger a new collection cycle. Unless explicitly specified |
|
271 // via CMSInitiatingOccupancyFraction (argument "io" below), it |
|
272 // is calculated by: |
|
273 // |
|
274 // Let "f" be MinHeapFreeRatio in |
|
275 // |
|
276 // _initiating_occupancy = 100-f + |
|
277 // f * (CMSTriggerRatio/100) |
|
278 // where CMSTriggerRatio is the argument "tr" below. |
|
279 // |
|
280 // That is, if we assume the heap is at its desired maximum occupancy at the |
|
281 // end of a collection, we let CMSTriggerRatio of the (purported) free |
|
282 // space be allocated before initiating a new collection cycle. |
|
283 // |
|
284 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) { |
|
285 assert(io <= 100 && tr <= 100, "Check the arguments"); |
|
286 if (io >= 0) { |
|
287 _initiating_occupancy = (double)io / 100.0; |
|
288 } else { |
|
289 _initiating_occupancy = ((100 - MinHeapFreeRatio) + |
|
290 (double)(tr * MinHeapFreeRatio) / 100.0) |
|
291 / 100.0; |
|
292 } |
|
293 } |
|
294 |
|
295 void ConcurrentMarkSweepGeneration::ref_processor_init() { |
|
296 assert(collector() != NULL, "no collector"); |
|
297 collector()->ref_processor_init(); |
|
298 } |
|
299 |
|
300 void CMSCollector::ref_processor_init() { |
|
301 if (_ref_processor == NULL) { |
|
302 // Allocate and initialize a reference processor |
|
303 _ref_processor = |
|
304 new ReferenceProcessor(&_span_based_discoverer, |
|
305 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing |
|
306 ParallelGCThreads, // mt processing degree |
|
307 _cmsGen->refs_discovery_is_mt(), // mt discovery |
|
308 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree |
|
309 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic |
|
310 &_is_alive_closure, // closure for liveness info |
|
311 false); // disable adjusting number of processing threads |
|
312 // Initialize the _ref_processor field of CMSGen |
|
313 _cmsGen->set_ref_processor(_ref_processor); |
|
314 |
|
315 } |
|
316 } |
|
317 |
|
318 AdaptiveSizePolicy* CMSCollector::size_policy() { |
|
319 return CMSHeap::heap()->size_policy(); |
|
320 } |
|
321 |
|
322 void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size, |
|
323 size_t max_old_size) { |
|
324 |
|
325 const char* gen_name = "old"; |
|
326 // Generation Counters - generation 1, 1 subspace |
|
327 _gen_counters = new GenerationCounters(gen_name, 1, 1, |
|
328 min_old_size, max_old_size, &_virtual_space); |
|
329 |
|
330 _space_counters = new GSpaceCounters(gen_name, 0, |
|
331 _virtual_space.reserved_size(), |
|
332 this, _gen_counters); |
|
333 } |
|
334 |
|
335 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha): |
|
336 _cms_gen(cms_gen) |
|
337 { |
|
338 assert(alpha <= 100, "bad value"); |
|
339 _saved_alpha = alpha; |
|
340 |
|
341 // Initialize the alphas to the bootstrap value of 100. |
|
342 _gc0_alpha = _cms_alpha = 100; |
|
343 |
|
344 _cms_begin_time.update(); |
|
345 _cms_end_time.update(); |
|
346 |
|
347 _gc0_duration = 0.0; |
|
348 _gc0_period = 0.0; |
|
349 _gc0_promoted = 0; |
|
350 |
|
351 _cms_duration = 0.0; |
|
352 _cms_period = 0.0; |
|
353 _cms_allocated = 0; |
|
354 |
|
355 _cms_used_at_gc0_begin = 0; |
|
356 _cms_used_at_gc0_end = 0; |
|
357 _allow_duty_cycle_reduction = false; |
|
358 _valid_bits = 0; |
|
359 } |
|
360 |
|
361 double CMSStats::cms_free_adjustment_factor(size_t free) const { |
|
362 // TBD: CR 6909490 |
|
363 return 1.0; |
|
364 } |
|
365 |
|
366 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) { |
|
367 } |
|
368 |
|
369 // If promotion failure handling is on use |
|
370 // the padded average size of the promotion for each |
|
371 // young generation collection. |
|
372 double CMSStats::time_until_cms_gen_full() const { |
|
373 size_t cms_free = _cms_gen->cmsSpace()->free(); |
|
374 CMSHeap* heap = CMSHeap::heap(); |
|
375 size_t expected_promotion = MIN2(heap->young_gen()->capacity(), |
|
376 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); |
|
377 if (cms_free > expected_promotion) { |
|
378 // Start a cms collection if there isn't enough space to promote |
|
379 // for the next young collection. Use the padded average as |
|
380 // a safety factor. |
|
381 cms_free -= expected_promotion; |
|
382 |
|
383 // Adjust by the safety factor. |
|
384 double cms_free_dbl = (double)cms_free; |
|
385 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0; |
|
386 // Apply a further correction factor which tries to adjust |
|
387 // for recent occurance of concurrent mode failures. |
|
388 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); |
|
389 cms_free_dbl = cms_free_dbl * cms_adjustment; |
|
390 |
|
391 log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT, |
|
392 cms_free, expected_promotion); |
|
393 log_trace(gc)(" cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0); |
|
394 // Add 1 in case the consumption rate goes to zero. |
|
395 return cms_free_dbl / (cms_consumption_rate() + 1.0); |
|
396 } |
|
397 return 0.0; |
|
398 } |
|
399 |
|
400 // Compare the duration of the cms collection to the |
|
401 // time remaining before the cms generation is empty. |
|
402 // Note that the time from the start of the cms collection |
|
403 // to the start of the cms sweep (less than the total |
|
404 // duration of the cms collection) can be used. This |
|
405 // has been tried and some applications experienced |
|
406 // promotion failures early in execution. This was |
|
407 // possibly because the averages were not accurate |
|
408 // enough at the beginning. |
|
409 double CMSStats::time_until_cms_start() const { |
|
410 // We add "gc0_period" to the "work" calculation |
|
411 // below because this query is done (mostly) at the |
|
412 // end of a scavenge, so we need to conservatively |
|
413 // account for that much possible delay |
|
414 // in the query so as to avoid concurrent mode failures |
|
415 // due to starting the collection just a wee bit too |
|
416 // late. |
|
417 double work = cms_duration() + gc0_period(); |
|
418 double deadline = time_until_cms_gen_full(); |
|
419 // If a concurrent mode failure occurred recently, we want to be |
|
420 // more conservative and halve our expected time_until_cms_gen_full() |
|
421 if (work > deadline) { |
|
422 log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ", |
|
423 cms_duration(), gc0_period(), time_until_cms_gen_full()); |
|
424 return 0.0; |
|
425 } |
|
426 return work - deadline; |
|
427 } |
|
428 |
|
429 #ifndef PRODUCT |
|
430 void CMSStats::print_on(outputStream *st) const { |
|
431 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); |
|
432 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, |
|
433 gc0_duration(), gc0_period(), gc0_promoted()); |
|
434 st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, |
|
435 cms_duration(), cms_period(), cms_allocated()); |
|
436 st->print(",cms_since_beg=%g,cms_since_end=%g", |
|
437 cms_time_since_begin(), cms_time_since_end()); |
|
438 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, |
|
439 _cms_used_at_gc0_begin, _cms_used_at_gc0_end); |
|
440 |
|
441 if (valid()) { |
|
442 st->print(",promo_rate=%g,cms_alloc_rate=%g", |
|
443 promotion_rate(), cms_allocation_rate()); |
|
444 st->print(",cms_consumption_rate=%g,time_until_full=%g", |
|
445 cms_consumption_rate(), time_until_cms_gen_full()); |
|
446 } |
|
447 st->cr(); |
|
448 } |
|
449 #endif // #ifndef PRODUCT |
|
450 |
|
451 CMSCollector::CollectorState CMSCollector::_collectorState = |
|
452 CMSCollector::Idling; |
|
453 bool CMSCollector::_foregroundGCIsActive = false; |
|
454 bool CMSCollector::_foregroundGCShouldWait = false; |
|
455 |
|
456 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, |
|
457 CardTableRS* ct): |
|
458 _overflow_list(NULL), |
|
459 _conc_workers(NULL), // may be set later |
|
460 _completed_initialization(false), |
|
461 _collection_count_start(0), |
|
462 _should_unload_classes(CMSClassUnloadingEnabled), |
|
463 _concurrent_cycles_since_last_unload(0), |
|
464 _roots_scanning_options(GenCollectedHeap::SO_None), |
|
465 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), |
|
466 _verifying(false), |
|
467 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), |
|
468 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), |
|
469 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()), |
|
470 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), |
|
471 _cms_start_registered(false), |
|
472 _cmsGen(cmsGen), |
|
473 // Adjust span to cover old (cms) gen |
|
474 _span(cmsGen->reserved()), |
|
475 _ct(ct), |
|
476 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), |
|
477 _modUnionTable((CardTable::card_shift - LogHeapWordSize), |
|
478 -1 /* lock-free */, "No_lock" /* dummy */), |
|
479 _restart_addr(NULL), |
|
480 _ser_pmc_preclean_ovflw(0), |
|
481 _ser_pmc_remark_ovflw(0), |
|
482 _par_pmc_remark_ovflw(0), |
|
483 _ser_kac_preclean_ovflw(0), |
|
484 _ser_kac_ovflw(0), |
|
485 _par_kac_ovflw(0), |
|
486 #ifndef PRODUCT |
|
487 _num_par_pushes(0), |
|
488 #endif |
|
489 _span_based_discoverer(_span), |
|
490 _ref_processor(NULL), // will be set later |
|
491 // Construct the is_alive_closure with _span & markBitMap |
|
492 _is_alive_closure(_span, &_markBitMap), |
|
493 _modUnionClosurePar(&_modUnionTable), |
|
494 _between_prologue_and_epilogue(false), |
|
495 _abort_preclean(false), |
|
496 _start_sampling(false), |
|
497 _stats(cmsGen), |
|
498 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true, |
|
499 //verify that this lock should be acquired with safepoint check. |
|
500 Monitor::_safepoint_check_never)), |
|
501 _eden_chunk_array(NULL), // may be set in ctor body |
|
502 _eden_chunk_index(0), // -- ditto -- |
|
503 _eden_chunk_capacity(0), // -- ditto -- |
|
504 _survivor_chunk_array(NULL), // -- ditto -- |
|
505 _survivor_chunk_index(0), // -- ditto -- |
|
506 _survivor_chunk_capacity(0), // -- ditto -- |
|
507 _survivor_plab_array(NULL) // -- ditto -- |
|
508 { |
|
509 // Now expand the span and allocate the collection support structures |
|
510 // (MUT, marking bit map etc.) to cover both generations subject to |
|
511 // collection. |
|
512 |
|
513 // For use by dirty card to oop closures. |
|
514 _cmsGen->cmsSpace()->set_collector(this); |
|
515 |
|
516 // Allocate MUT and marking bit map |
|
517 { |
|
518 MutexLocker x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); |
|
519 if (!_markBitMap.allocate(_span)) { |
|
520 log_warning(gc)("Failed to allocate CMS Bit Map"); |
|
521 return; |
|
522 } |
|
523 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?"); |
|
524 } |
|
525 { |
|
526 _modUnionTable.allocate(_span); |
|
527 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); |
|
528 } |
|
529 |
|
530 if (!_markStack.allocate(MarkStackSize)) { |
|
531 log_warning(gc)("Failed to allocate CMS Marking Stack"); |
|
532 return; |
|
533 } |
|
534 |
|
535 // Support for multi-threaded concurrent phases |
|
536 if (CMSConcurrentMTEnabled) { |
|
537 if (FLAG_IS_DEFAULT(ConcGCThreads)) { |
|
538 // just for now |
|
539 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4); |
|
540 } |
|
541 if (ConcGCThreads > 1) { |
|
542 _conc_workers = new YieldingFlexibleWorkGang("CMS Thread", |
|
543 ConcGCThreads, true); |
|
544 if (_conc_workers == NULL) { |
|
545 log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled"); |
|
546 CMSConcurrentMTEnabled = false; |
|
547 } else { |
|
548 _conc_workers->initialize_workers(); |
|
549 } |
|
550 } else { |
|
551 CMSConcurrentMTEnabled = false; |
|
552 } |
|
553 } |
|
554 if (!CMSConcurrentMTEnabled) { |
|
555 ConcGCThreads = 0; |
|
556 } else { |
|
557 // Turn off CMSCleanOnEnter optimization temporarily for |
|
558 // the MT case where it's not fixed yet; see 6178663. |
|
559 CMSCleanOnEnter = false; |
|
560 } |
|
561 assert((_conc_workers != NULL) == (ConcGCThreads > 1), |
|
562 "Inconsistency"); |
|
563 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); |
|
564 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); |
|
565 |
|
566 // Parallel task queues; these are shared for the |
|
567 // concurrent and stop-world phases of CMS, but |
|
568 // are not shared with parallel scavenge (ParNew). |
|
569 { |
|
570 uint i; |
|
571 uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads); |
|
572 |
|
573 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled |
|
574 || ParallelRefProcEnabled) |
|
575 && num_queues > 0) { |
|
576 _task_queues = new OopTaskQueueSet(num_queues); |
|
577 if (_task_queues == NULL) { |
|
578 log_warning(gc)("task_queues allocation failure."); |
|
579 return; |
|
580 } |
|
581 typedef Padded<OopTaskQueue> PaddedOopTaskQueue; |
|
582 for (i = 0; i < num_queues; i++) { |
|
583 PaddedOopTaskQueue *q = new PaddedOopTaskQueue(); |
|
584 if (q == NULL) { |
|
585 log_warning(gc)("work_queue allocation failure."); |
|
586 return; |
|
587 } |
|
588 _task_queues->register_queue(i, q); |
|
589 } |
|
590 for (i = 0; i < num_queues; i++) { |
|
591 _task_queues->queue(i)->initialize(); |
|
592 } |
|
593 } |
|
594 } |
|
595 |
|
596 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); |
|
597 |
|
598 // Clip CMSBootstrapOccupancy between 0 and 100. |
|
599 _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0; |
|
600 |
|
601 // Now tell CMS generations the identity of their collector |
|
602 ConcurrentMarkSweepGeneration::set_collector(this); |
|
603 |
|
604 // Create & start a CMS thread for this CMS collector |
|
605 _cmsThread = ConcurrentMarkSweepThread::start(this); |
|
606 assert(cmsThread() != NULL, "CMS Thread should have been created"); |
|
607 assert(cmsThread()->collector() == this, |
|
608 "CMS Thread should refer to this gen"); |
|
609 assert(CGC_lock != NULL, "Where's the CGC_lock?"); |
|
610 |
|
611 // Support for parallelizing young gen rescan |
|
612 CMSHeap* heap = CMSHeap::heap(); |
|
613 _young_gen = heap->young_gen(); |
|
614 if (heap->supports_inline_contig_alloc()) { |
|
615 _top_addr = heap->top_addr(); |
|
616 _end_addr = heap->end_addr(); |
|
617 assert(_young_gen != NULL, "no _young_gen"); |
|
618 _eden_chunk_index = 0; |
|
619 _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain; |
|
620 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); |
|
621 } |
|
622 |
|
623 // Support for parallelizing survivor space rescan |
|
624 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) { |
|
625 const size_t max_plab_samples = |
|
626 _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize); |
|
627 |
|
628 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC); |
|
629 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC); |
|
630 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC); |
|
631 _survivor_chunk_capacity = max_plab_samples; |
|
632 for (uint i = 0; i < ParallelGCThreads; i++) { |
|
633 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC); |
|
634 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples); |
|
635 assert(cur->end() == 0, "Should be 0"); |
|
636 assert(cur->array() == vec, "Should be vec"); |
|
637 assert(cur->capacity() == max_plab_samples, "Error"); |
|
638 } |
|
639 } |
|
640 |
|
641 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) |
|
642 _gc_counters = new CollectorCounters("CMS full collection pauses", 1); |
|
643 _cgc_counters = new CollectorCounters("CMS concurrent cycle pauses", 2); |
|
644 _completed_initialization = true; |
|
645 _inter_sweep_timer.start(); // start of time |
|
646 } |
|
647 |
|
648 const char* ConcurrentMarkSweepGeneration::name() const { |
|
649 return "concurrent mark-sweep generation"; |
|
650 } |
|
651 void ConcurrentMarkSweepGeneration::update_counters() { |
|
652 if (UsePerfData) { |
|
653 _space_counters->update_all(); |
|
654 _gen_counters->update_all(); |
|
655 } |
|
656 } |
|
657 |
|
658 // this is an optimized version of update_counters(). it takes the |
|
659 // used value as a parameter rather than computing it. |
|
660 // |
|
661 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { |
|
662 if (UsePerfData) { |
|
663 _space_counters->update_used(used); |
|
664 _space_counters->update_capacity(); |
|
665 _gen_counters->update_all(); |
|
666 } |
|
667 } |
|
668 |
|
669 void ConcurrentMarkSweepGeneration::print() const { |
|
670 Generation::print(); |
|
671 cmsSpace()->print(); |
|
672 } |
|
673 |
|
674 #ifndef PRODUCT |
|
675 void ConcurrentMarkSweepGeneration::print_statistics() { |
|
676 cmsSpace()->printFLCensus(0); |
|
677 } |
|
678 #endif |
|
679 |
|
680 size_t |
|
681 ConcurrentMarkSweepGeneration::contiguous_available() const { |
|
682 // dld proposes an improvement in precision here. If the committed |
|
683 // part of the space ends in a free block we should add that to |
|
684 // uncommitted size in the calculation below. Will make this |
|
685 // change later, staying with the approximation below for the |
|
686 // time being. -- ysr. |
|
687 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc()); |
|
688 } |
|
689 |
|
690 size_t |
|
691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const { |
|
692 return _cmsSpace->max_alloc_in_words() * HeapWordSize; |
|
693 } |
|
694 |
|
695 size_t ConcurrentMarkSweepGeneration::used_stable() const { |
|
696 return cmsSpace()->used_stable(); |
|
697 } |
|
698 |
|
699 size_t ConcurrentMarkSweepGeneration::max_available() const { |
|
700 return free() + _virtual_space.uncommitted_size(); |
|
701 } |
|
702 |
|
703 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { |
|
704 size_t available = max_available(); |
|
705 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); |
|
706 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); |
|
707 log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")", |
|
708 res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes); |
|
709 return res; |
|
710 } |
|
711 |
|
712 // At a promotion failure dump information on block layout in heap |
|
713 // (cms old generation). |
|
714 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { |
|
715 Log(gc, promotion) log; |
|
716 if (log.is_trace()) { |
|
717 LogStream ls(log.trace()); |
|
718 cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls); |
|
719 } |
|
720 } |
|
721 |
|
722 void ConcurrentMarkSweepGeneration::reset_after_compaction() { |
|
723 // Clear the promotion information. These pointers can be adjusted |
|
724 // along with all the other pointers into the heap but |
|
725 // compaction is expected to be a rare event with |
|
726 // a heap using cms so don't do it without seeing the need. |
|
727 for (uint i = 0; i < ParallelGCThreads; i++) { |
|
728 _par_gc_thread_states[i]->promo.reset(); |
|
729 } |
|
730 } |
|
731 |
|
732 void ConcurrentMarkSweepGeneration::compute_new_size() { |
|
733 assert_locked_or_safepoint(Heap_lock); |
|
734 |
|
735 // If incremental collection failed, we just want to expand |
|
736 // to the limit. |
|
737 if (incremental_collection_failed()) { |
|
738 clear_incremental_collection_failed(); |
|
739 grow_to_reserved(); |
|
740 return; |
|
741 } |
|
742 |
|
743 // The heap has been compacted but not reset yet. |
|
744 // Any metric such as free() or used() will be incorrect. |
|
745 |
|
746 CardGeneration::compute_new_size(); |
|
747 |
|
748 // Reset again after a possible resizing |
|
749 if (did_compact()) { |
|
750 cmsSpace()->reset_after_compaction(); |
|
751 } |
|
752 } |
|
753 |
|
754 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() { |
|
755 assert_locked_or_safepoint(Heap_lock); |
|
756 |
|
757 // If incremental collection failed, we just want to expand |
|
758 // to the limit. |
|
759 if (incremental_collection_failed()) { |
|
760 clear_incremental_collection_failed(); |
|
761 grow_to_reserved(); |
|
762 return; |
|
763 } |
|
764 |
|
765 double free_percentage = ((double) free()) / capacity(); |
|
766 double desired_free_percentage = (double) MinHeapFreeRatio / 100; |
|
767 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; |
|
768 |
|
769 // compute expansion delta needed for reaching desired free percentage |
|
770 if (free_percentage < desired_free_percentage) { |
|
771 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); |
|
772 assert(desired_capacity >= capacity(), "invalid expansion size"); |
|
773 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); |
|
774 Log(gc) log; |
|
775 if (log.is_trace()) { |
|
776 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); |
|
777 log.trace("From compute_new_size: "); |
|
778 log.trace(" Free fraction %f", free_percentage); |
|
779 log.trace(" Desired free fraction %f", desired_free_percentage); |
|
780 log.trace(" Maximum free fraction %f", maximum_free_percentage); |
|
781 log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000); |
|
782 log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); |
|
783 CMSHeap* heap = CMSHeap::heap(); |
|
784 size_t young_size = heap->young_gen()->capacity(); |
|
785 log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000); |
|
786 log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); |
|
787 log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); |
|
788 log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); |
|
789 } |
|
790 // safe if expansion fails |
|
791 expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); |
|
792 log.trace(" Expanded free fraction %f", ((double) free()) / capacity()); |
|
793 } else { |
|
794 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); |
|
795 assert(desired_capacity <= capacity(), "invalid expansion size"); |
|
796 size_t shrink_bytes = capacity() - desired_capacity; |
|
797 // Don't shrink unless the delta is greater than the minimum shrink we want |
|
798 if (shrink_bytes >= MinHeapDeltaBytes) { |
|
799 shrink_free_list_by(shrink_bytes); |
|
800 } |
|
801 } |
|
802 } |
|
803 |
|
804 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { |
|
805 return cmsSpace()->freelistLock(); |
|
806 } |
|
807 |
|
808 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) { |
|
809 CMSSynchronousYieldRequest yr; |
|
810 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
811 return have_lock_and_allocate(size, tlab); |
|
812 } |
|
813 |
|
814 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, |
|
815 bool tlab /* ignored */) { |
|
816 assert_lock_strong(freelistLock()); |
|
817 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); |
|
818 HeapWord* res = cmsSpace()->allocate(adjustedSize); |
|
819 // Allocate the object live (grey) if the background collector has |
|
820 // started marking. This is necessary because the marker may |
|
821 // have passed this address and consequently this object will |
|
822 // not otherwise be greyed and would be incorrectly swept up. |
|
823 // Note that if this object contains references, the writing |
|
824 // of those references will dirty the card containing this object |
|
825 // allowing the object to be blackened (and its references scanned) |
|
826 // either during a preclean phase or at the final checkpoint. |
|
827 if (res != NULL) { |
|
828 // We may block here with an uninitialized object with |
|
829 // its mark-bit or P-bits not yet set. Such objects need |
|
830 // to be safely navigable by block_start(). |
|
831 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here."); |
|
832 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size"); |
|
833 collector()->direct_allocated(res, adjustedSize); |
|
834 _direct_allocated_words += adjustedSize; |
|
835 // allocation counters |
|
836 NOT_PRODUCT( |
|
837 _numObjectsAllocated++; |
|
838 _numWordsAllocated += (int)adjustedSize; |
|
839 ) |
|
840 } |
|
841 return res; |
|
842 } |
|
843 |
|
844 // In the case of direct allocation by mutators in a generation that |
|
845 // is being concurrently collected, the object must be allocated |
|
846 // live (grey) if the background collector has started marking. |
|
847 // This is necessary because the marker may |
|
848 // have passed this address and consequently this object will |
|
849 // not otherwise be greyed and would be incorrectly swept up. |
|
850 // Note that if this object contains references, the writing |
|
851 // of those references will dirty the card containing this object |
|
852 // allowing the object to be blackened (and its references scanned) |
|
853 // either during a preclean phase or at the final checkpoint. |
|
854 void CMSCollector::direct_allocated(HeapWord* start, size_t size) { |
|
855 assert(_markBitMap.covers(start, size), "Out of bounds"); |
|
856 if (_collectorState >= Marking) { |
|
857 MutexLocker y(_markBitMap.lock(), |
|
858 Mutex::_no_safepoint_check_flag); |
|
859 // [see comments preceding SweepClosure::do_blk() below for details] |
|
860 // |
|
861 // Can the P-bits be deleted now? JJJ |
|
862 // |
|
863 // 1. need to mark the object as live so it isn't collected |
|
864 // 2. need to mark the 2nd bit to indicate the object may be uninitialized |
|
865 // 3. need to mark the end of the object so marking, precleaning or sweeping |
|
866 // can skip over uninitialized or unparsable objects. An allocated |
|
867 // object is considered uninitialized for our purposes as long as |
|
868 // its klass word is NULL. All old gen objects are parsable |
|
869 // as soon as they are initialized.) |
|
870 _markBitMap.mark(start); // object is live |
|
871 _markBitMap.mark(start + 1); // object is potentially uninitialized? |
|
872 _markBitMap.mark(start + size - 1); |
|
873 // mark end of object |
|
874 } |
|
875 // check that oop looks uninitialized |
|
876 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL"); |
|
877 } |
|
878 |
|
879 void CMSCollector::promoted(bool par, HeapWord* start, |
|
880 bool is_obj_array, size_t obj_size) { |
|
881 assert(_markBitMap.covers(start), "Out of bounds"); |
|
882 // See comment in direct_allocated() about when objects should |
|
883 // be allocated live. |
|
884 if (_collectorState >= Marking) { |
|
885 // we already hold the marking bit map lock, taken in |
|
886 // the prologue |
|
887 if (par) { |
|
888 _markBitMap.par_mark(start); |
|
889 } else { |
|
890 _markBitMap.mark(start); |
|
891 } |
|
892 // We don't need to mark the object as uninitialized (as |
|
893 // in direct_allocated above) because this is being done with the |
|
894 // world stopped and the object will be initialized by the |
|
895 // time the marking, precleaning or sweeping get to look at it. |
|
896 // But see the code for copying objects into the CMS generation, |
|
897 // where we need to ensure that concurrent readers of the |
|
898 // block offset table are able to safely navigate a block that |
|
899 // is in flux from being free to being allocated (and in |
|
900 // transition while being copied into) and subsequently |
|
901 // becoming a bona-fide object when the copy/promotion is complete. |
|
902 assert(SafepointSynchronize::is_at_safepoint(), |
|
903 "expect promotion only at safepoints"); |
|
904 |
|
905 if (_collectorState < Sweeping) { |
|
906 // Mark the appropriate cards in the modUnionTable, so that |
|
907 // this object gets scanned before the sweep. If this is |
|
908 // not done, CMS generation references in the object might |
|
909 // not get marked. |
|
910 // For the case of arrays, which are otherwise precisely |
|
911 // marked, we need to dirty the entire array, not just its head. |
|
912 if (is_obj_array) { |
|
913 // The [par_]mark_range() method expects mr.end() below to |
|
914 // be aligned to the granularity of a bit's representation |
|
915 // in the heap. In the case of the MUT below, that's a |
|
916 // card size. |
|
917 MemRegion mr(start, |
|
918 align_up(start + obj_size, |
|
919 CardTable::card_size /* bytes */)); |
|
920 if (par) { |
|
921 _modUnionTable.par_mark_range(mr); |
|
922 } else { |
|
923 _modUnionTable.mark_range(mr); |
|
924 } |
|
925 } else { // not an obj array; we can just mark the head |
|
926 if (par) { |
|
927 _modUnionTable.par_mark(start); |
|
928 } else { |
|
929 _modUnionTable.mark(start); |
|
930 } |
|
931 } |
|
932 } |
|
933 } |
|
934 } |
|
935 |
|
936 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { |
|
937 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
|
938 // allocate, copy and if necessary update promoinfo -- |
|
939 // delegate to underlying space. |
|
940 assert_lock_strong(freelistLock()); |
|
941 |
|
942 #ifndef PRODUCT |
|
943 if (CMSHeap::heap()->promotion_should_fail()) { |
|
944 return NULL; |
|
945 } |
|
946 #endif // #ifndef PRODUCT |
|
947 |
|
948 oop res = _cmsSpace->promote(obj, obj_size); |
|
949 if (res == NULL) { |
|
950 // expand and retry |
|
951 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords |
|
952 expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion); |
|
953 // Since this is the old generation, we don't try to promote |
|
954 // into a more senior generation. |
|
955 res = _cmsSpace->promote(obj, obj_size); |
|
956 } |
|
957 if (res != NULL) { |
|
958 // See comment in allocate() about when objects should |
|
959 // be allocated live. |
|
960 assert(oopDesc::is_oop(obj), "Will dereference klass pointer below"); |
|
961 collector()->promoted(false, // Not parallel |
|
962 (HeapWord*)res, obj->is_objArray(), obj_size); |
|
963 // promotion counters |
|
964 NOT_PRODUCT( |
|
965 _numObjectsPromoted++; |
|
966 _numWordsPromoted += |
|
967 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size())); |
|
968 ) |
|
969 } |
|
970 return res; |
|
971 } |
|
972 |
|
973 |
|
974 // IMPORTANT: Notes on object size recognition in CMS. |
|
975 // --------------------------------------------------- |
|
976 // A block of storage in the CMS generation is always in |
|
977 // one of three states. A free block (FREE), an allocated |
|
978 // object (OBJECT) whose size() method reports the correct size, |
|
979 // and an intermediate state (TRANSIENT) in which its size cannot |
|
980 // be accurately determined. |
|
981 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS) |
|
982 // ----------------------------------------------------- |
|
983 // FREE: klass_word & 1 == 1; mark_word holds block size |
|
984 // |
|
985 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0; |
|
986 // obj->size() computes correct size |
|
987 // |
|
988 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT |
|
989 // |
|
990 // STATE IDENTIFICATION: (64 bit+COOPS) |
|
991 // ------------------------------------ |
|
992 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size |
|
993 // |
|
994 // OBJECT: klass_word installed; klass_word != 0; |
|
995 // obj->size() computes correct size |
|
996 // |
|
997 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT |
|
998 // |
|
999 // |
|
1000 // STATE TRANSITION DIAGRAM |
|
1001 // |
|
1002 // mut / parnew mut / parnew |
|
1003 // FREE --------------------> TRANSIENT ---------------------> OBJECT --| |
|
1004 // ^ | |
|
1005 // |------------------------ DEAD <------------------------------------| |
|
1006 // sweep mut |
|
1007 // |
|
1008 // While a block is in TRANSIENT state its size cannot be determined |
|
1009 // so readers will either need to come back later or stall until |
|
1010 // the size can be determined. Note that for the case of direct |
|
1011 // allocation, P-bits, when available, may be used to determine the |
|
1012 // size of an object that may not yet have been initialized. |
|
1013 |
|
1014 // Things to support parallel young-gen collection. |
|
1015 oop |
|
1016 ConcurrentMarkSweepGeneration::par_promote(int thread_num, |
|
1017 oop old, markWord m, |
|
1018 size_t word_sz) { |
|
1019 #ifndef PRODUCT |
|
1020 if (CMSHeap::heap()->promotion_should_fail()) { |
|
1021 return NULL; |
|
1022 } |
|
1023 #endif // #ifndef PRODUCT |
|
1024 |
|
1025 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; |
|
1026 PromotionInfo* promoInfo = &ps->promo; |
|
1027 // if we are tracking promotions, then first ensure space for |
|
1028 // promotion (including spooling space for saving header if necessary). |
|
1029 // then allocate and copy, then track promoted info if needed. |
|
1030 // When tracking (see PromotionInfo::track()), the mark word may |
|
1031 // be displaced and in this case restoration of the mark word |
|
1032 // occurs in the (oop_since_save_marks_)iterate phase. |
|
1033 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) { |
|
1034 // Out of space for allocating spooling buffers; |
|
1035 // try expanding and allocating spooling buffers. |
|
1036 if (!expand_and_ensure_spooling_space(promoInfo)) { |
|
1037 return NULL; |
|
1038 } |
|
1039 } |
|
1040 assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant"); |
|
1041 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz); |
|
1042 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz); |
|
1043 if (obj_ptr == NULL) { |
|
1044 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz); |
|
1045 if (obj_ptr == NULL) { |
|
1046 return NULL; |
|
1047 } |
|
1048 } |
|
1049 oop obj = oop(obj_ptr); |
|
1050 OrderAccess::storestore(); |
|
1051 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
|
1052 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); |
|
1053 // IMPORTANT: See note on object initialization for CMS above. |
|
1054 // Otherwise, copy the object. Here we must be careful to insert the |
|
1055 // klass pointer last, since this marks the block as an allocated object. |
|
1056 // Except with compressed oops it's the mark word. |
|
1057 HeapWord* old_ptr = (HeapWord*)old; |
|
1058 // Restore the mark word copied above. |
|
1059 obj->set_mark_raw(m); |
|
1060 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
|
1061 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); |
|
1062 OrderAccess::storestore(); |
|
1063 |
|
1064 if (UseCompressedClassPointers) { |
|
1065 // Copy gap missed by (aligned) header size calculation below |
|
1066 obj->set_klass_gap(old->klass_gap()); |
|
1067 } |
|
1068 if (word_sz > (size_t)oopDesc::header_size()) { |
|
1069 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), |
|
1070 obj_ptr + oopDesc::header_size(), |
|
1071 word_sz - oopDesc::header_size()); |
|
1072 } |
|
1073 |
|
1074 // Now we can track the promoted object, if necessary. We take care |
|
1075 // to delay the transition from uninitialized to full object |
|
1076 // (i.e., insertion of klass pointer) until after, so that it |
|
1077 // atomically becomes a promoted object. |
|
1078 if (promoInfo->tracking()) { |
|
1079 promoInfo->track((PromotedObject*)obj, old->klass()); |
|
1080 } |
|
1081 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
|
1082 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); |
|
1083 assert(oopDesc::is_oop(old), "Will use and dereference old klass ptr below"); |
|
1084 |
|
1085 // Finally, install the klass pointer (this should be volatile). |
|
1086 OrderAccess::storestore(); |
|
1087 obj->set_klass(old->klass()); |
|
1088 // We should now be able to calculate the right size for this object |
|
1089 assert(oopDesc::is_oop(obj) && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object"); |
|
1090 |
|
1091 collector()->promoted(true, // parallel |
|
1092 obj_ptr, old->is_objArray(), word_sz); |
|
1093 |
|
1094 NOT_PRODUCT( |
|
1095 Atomic::inc(&_numObjectsPromoted); |
|
1096 Atomic::add(alloc_sz, &_numWordsPromoted); |
|
1097 ) |
|
1098 |
|
1099 return obj; |
|
1100 } |
|
1101 |
|
1102 void |
|
1103 ConcurrentMarkSweepGeneration:: |
|
1104 par_promote_alloc_done(int thread_num) { |
|
1105 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; |
|
1106 ps->lab.retire(thread_num); |
|
1107 } |
|
1108 |
|
1109 void |
|
1110 ConcurrentMarkSweepGeneration:: |
|
1111 par_oop_since_save_marks_iterate_done(int thread_num) { |
|
1112 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; |
|
1113 ParScanWithoutBarrierClosure* dummy_cl = NULL; |
|
1114 ps->promo.promoted_oops_iterate(dummy_cl); |
|
1115 |
|
1116 // Because card-scanning has been completed, subsequent phases |
|
1117 // (e.g., reference processing) will not need to recognize which |
|
1118 // objects have been promoted during this GC. So, we can now disable |
|
1119 // promotion tracking. |
|
1120 ps->promo.stopTrackingPromotions(); |
|
1121 } |
|
1122 |
|
1123 bool ConcurrentMarkSweepGeneration::should_collect(bool full, |
|
1124 size_t size, |
|
1125 bool tlab) |
|
1126 { |
|
1127 // We allow a STW collection only if a full |
|
1128 // collection was requested. |
|
1129 return full || should_allocate(size, tlab); // FIX ME !!! |
|
1130 // This and promotion failure handling are connected at the |
|
1131 // hip and should be fixed by untying them. |
|
1132 } |
|
1133 |
|
1134 bool CMSCollector::shouldConcurrentCollect() { |
|
1135 LogTarget(Trace, gc) log; |
|
1136 |
|
1137 if (_full_gc_requested) { |
|
1138 log.print("CMSCollector: collect because of explicit gc request (or GCLocker)"); |
|
1139 return true; |
|
1140 } |
|
1141 |
|
1142 FreelistLocker x(this); |
|
1143 // ------------------------------------------------------------------ |
|
1144 // Print out lots of information which affects the initiation of |
|
1145 // a collection. |
|
1146 if (log.is_enabled() && stats().valid()) { |
|
1147 log.print("CMSCollector shouldConcurrentCollect: "); |
|
1148 |
|
1149 LogStream out(log); |
|
1150 stats().print_on(&out); |
|
1151 |
|
1152 log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full()); |
|
1153 log.print("free=" SIZE_FORMAT, _cmsGen->free()); |
|
1154 log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available()); |
|
1155 log.print("promotion_rate=%g", stats().promotion_rate()); |
|
1156 log.print("cms_allocation_rate=%g", stats().cms_allocation_rate()); |
|
1157 log.print("occupancy=%3.7f", _cmsGen->occupancy()); |
|
1158 log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); |
|
1159 log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin()); |
|
1160 log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end()); |
|
1161 log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect()); |
|
1162 } |
|
1163 // ------------------------------------------------------------------ |
|
1164 |
|
1165 // If the estimated time to complete a cms collection (cms_duration()) |
|
1166 // is less than the estimated time remaining until the cms generation |
|
1167 // is full, start a collection. |
|
1168 if (!UseCMSInitiatingOccupancyOnly) { |
|
1169 if (stats().valid()) { |
|
1170 if (stats().time_until_cms_start() == 0.0) { |
|
1171 return true; |
|
1172 } |
|
1173 } else { |
|
1174 // We want to conservatively collect somewhat early in order |
|
1175 // to try and "bootstrap" our CMS/promotion statistics; |
|
1176 // this branch will not fire after the first successful CMS |
|
1177 // collection because the stats should then be valid. |
|
1178 if (_cmsGen->occupancy() >= _bootstrap_occupancy) { |
|
1179 log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f", |
|
1180 _cmsGen->occupancy(), _bootstrap_occupancy); |
|
1181 return true; |
|
1182 } |
|
1183 } |
|
1184 } |
|
1185 |
|
1186 // Otherwise, we start a collection cycle if |
|
1187 // old gen want a collection cycle started. Each may use |
|
1188 // an appropriate criterion for making this decision. |
|
1189 // XXX We need to make sure that the gen expansion |
|
1190 // criterion dovetails well with this. XXX NEED TO FIX THIS |
|
1191 if (_cmsGen->should_concurrent_collect()) { |
|
1192 log.print("CMS old gen initiated"); |
|
1193 return true; |
|
1194 } |
|
1195 |
|
1196 // We start a collection if we believe an incremental collection may fail; |
|
1197 // this is not likely to be productive in practice because it's probably too |
|
1198 // late anyway. |
|
1199 CMSHeap* heap = CMSHeap::heap(); |
|
1200 if (heap->incremental_collection_will_fail(true /* consult_young */)) { |
|
1201 log.print("CMSCollector: collect because incremental collection will fail "); |
|
1202 return true; |
|
1203 } |
|
1204 |
|
1205 if (MetaspaceGC::should_concurrent_collect()) { |
|
1206 log.print("CMSCollector: collect for metadata allocation "); |
|
1207 return true; |
|
1208 } |
|
1209 |
|
1210 // CMSTriggerInterval starts a CMS cycle if enough time has passed. |
|
1211 if (CMSTriggerInterval >= 0) { |
|
1212 if (CMSTriggerInterval == 0) { |
|
1213 // Trigger always |
|
1214 return true; |
|
1215 } |
|
1216 |
|
1217 // Check the CMS time since begin (we do not check the stats validity |
|
1218 // as we want to be able to trigger the first CMS cycle as well) |
|
1219 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) { |
|
1220 if (stats().valid()) { |
|
1221 log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)", |
|
1222 stats().cms_time_since_begin()); |
|
1223 } else { |
|
1224 log.print("CMSCollector: collect because of trigger interval (first collection)"); |
|
1225 } |
|
1226 return true; |
|
1227 } |
|
1228 } |
|
1229 |
|
1230 return false; |
|
1231 } |
|
1232 |
|
1233 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); } |
|
1234 |
|
1235 // Clear _expansion_cause fields of constituent generations |
|
1236 void CMSCollector::clear_expansion_cause() { |
|
1237 _cmsGen->clear_expansion_cause(); |
|
1238 } |
|
1239 |
|
1240 // We should be conservative in starting a collection cycle. To |
|
1241 // start too eagerly runs the risk of collecting too often in the |
|
1242 // extreme. To collect too rarely falls back on full collections, |
|
1243 // which works, even if not optimum in terms of concurrent work. |
|
1244 // As a work around for too eagerly collecting, use the flag |
|
1245 // UseCMSInitiatingOccupancyOnly. This also has the advantage of |
|
1246 // giving the user an easily understandable way of controlling the |
|
1247 // collections. |
|
1248 // We want to start a new collection cycle if any of the following |
|
1249 // conditions hold: |
|
1250 // . our current occupancy exceeds the configured initiating occupancy |
|
1251 // for this generation, or |
|
1252 // . we recently needed to expand this space and have not, since that |
|
1253 // expansion, done a collection of this generation, or |
|
1254 // . the underlying space believes that it may be a good idea to initiate |
|
1255 // a concurrent collection (this may be based on criteria such as the |
|
1256 // following: the space uses linear allocation and linear allocation is |
|
1257 // going to fail, or there is believed to be excessive fragmentation in |
|
1258 // the generation, etc... or ... |
|
1259 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for |
|
1260 // the case of the old generation; see CR 6543076): |
|
1261 // we may be approaching a point at which allocation requests may fail because |
|
1262 // we will be out of sufficient free space given allocation rate estimates.] |
|
1263 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { |
|
1264 |
|
1265 assert_lock_strong(freelistLock()); |
|
1266 if (occupancy() > initiating_occupancy()) { |
|
1267 log_trace(gc)(" %s: collect because of occupancy %f / %f ", |
|
1268 short_name(), occupancy(), initiating_occupancy()); |
|
1269 return true; |
|
1270 } |
|
1271 if (UseCMSInitiatingOccupancyOnly) { |
|
1272 return false; |
|
1273 } |
|
1274 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { |
|
1275 log_trace(gc)(" %s: collect because expanded for allocation ", short_name()); |
|
1276 return true; |
|
1277 } |
|
1278 return false; |
|
1279 } |
|
1280 |
|
1281 void ConcurrentMarkSweepGeneration::collect(bool full, |
|
1282 bool clear_all_soft_refs, |
|
1283 size_t size, |
|
1284 bool tlab) |
|
1285 { |
|
1286 collector()->collect(full, clear_all_soft_refs, size, tlab); |
|
1287 } |
|
1288 |
|
1289 void CMSCollector::collect(bool full, |
|
1290 bool clear_all_soft_refs, |
|
1291 size_t size, |
|
1292 bool tlab) |
|
1293 { |
|
1294 // The following "if" branch is present for defensive reasons. |
|
1295 // In the current uses of this interface, it can be replaced with: |
|
1296 // assert(!GCLocker.is_active(), "Can't be called otherwise"); |
|
1297 // But I am not placing that assert here to allow future |
|
1298 // generality in invoking this interface. |
|
1299 if (GCLocker::is_active()) { |
|
1300 // A consistency test for GCLocker |
|
1301 assert(GCLocker::needs_gc(), "Should have been set already"); |
|
1302 // Skip this foreground collection, instead |
|
1303 // expanding the heap if necessary. |
|
1304 // Need the free list locks for the call to free() in compute_new_size() |
|
1305 compute_new_size(); |
|
1306 return; |
|
1307 } |
|
1308 acquire_control_and_collect(full, clear_all_soft_refs); |
|
1309 } |
|
1310 |
|
1311 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { |
|
1312 CMSHeap* heap = CMSHeap::heap(); |
|
1313 unsigned int gc_count = heap->total_full_collections(); |
|
1314 if (gc_count == full_gc_count) { |
|
1315 MutexLocker y(CGC_lock, Mutex::_no_safepoint_check_flag); |
|
1316 _full_gc_requested = true; |
|
1317 _full_gc_cause = cause; |
|
1318 CGC_lock->notify(); // nudge CMS thread |
|
1319 } else { |
|
1320 assert(gc_count > full_gc_count, "Error: causal loop"); |
|
1321 } |
|
1322 } |
|
1323 |
|
1324 bool CMSCollector::is_external_interruption() { |
|
1325 GCCause::Cause cause = CMSHeap::heap()->gc_cause(); |
|
1326 return GCCause::is_user_requested_gc(cause) || |
|
1327 GCCause::is_serviceability_requested_gc(cause); |
|
1328 } |
|
1329 |
|
1330 void CMSCollector::report_concurrent_mode_interruption() { |
|
1331 if (is_external_interruption()) { |
|
1332 log_debug(gc)("Concurrent mode interrupted"); |
|
1333 } else { |
|
1334 log_debug(gc)("Concurrent mode failure"); |
|
1335 _gc_tracer_cm->report_concurrent_mode_failure(); |
|
1336 } |
|
1337 } |
|
1338 |
|
1339 |
|
1340 // The foreground and background collectors need to coordinate in order |
|
1341 // to make sure that they do not mutually interfere with CMS collections. |
|
1342 // When a background collection is active, |
|
1343 // the foreground collector may need to take over (preempt) and |
|
1344 // synchronously complete an ongoing collection. Depending on the |
|
1345 // frequency of the background collections and the heap usage |
|
1346 // of the application, this preemption can be seldom or frequent. |
|
1347 // There are only certain |
|
1348 // points in the background collection that the "collection-baton" |
|
1349 // can be passed to the foreground collector. |
|
1350 // |
|
1351 // The foreground collector will wait for the baton before |
|
1352 // starting any part of the collection. The foreground collector |
|
1353 // will only wait at one location. |
|
1354 // |
|
1355 // The background collector will yield the baton before starting a new |
|
1356 // phase of the collection (e.g., before initial marking, marking from roots, |
|
1357 // precleaning, final re-mark, sweep etc.) This is normally done at the head |
|
1358 // of the loop which switches the phases. The background collector does some |
|
1359 // of the phases (initial mark, final re-mark) with the world stopped. |
|
1360 // Because of locking involved in stopping the world, |
|
1361 // the foreground collector should not block waiting for the background |
|
1362 // collector when it is doing a stop-the-world phase. The background |
|
1363 // collector will yield the baton at an additional point just before |
|
1364 // it enters a stop-the-world phase. Once the world is stopped, the |
|
1365 // background collector checks the phase of the collection. If the |
|
1366 // phase has not changed, it proceeds with the collection. If the |
|
1367 // phase has changed, it skips that phase of the collection. See |
|
1368 // the comments on the use of the Heap_lock in collect_in_background(). |
|
1369 // |
|
1370 // Variable used in baton passing. |
|
1371 // _foregroundGCIsActive - Set to true by the foreground collector when |
|
1372 // it wants the baton. The foreground clears it when it has finished |
|
1373 // the collection. |
|
1374 // _foregroundGCShouldWait - Set to true by the background collector |
|
1375 // when it is running. The foreground collector waits while |
|
1376 // _foregroundGCShouldWait is true. |
|
1377 // CGC_lock - monitor used to protect access to the above variables |
|
1378 // and to notify the foreground and background collectors. |
|
1379 // _collectorState - current state of the CMS collection. |
|
1380 // |
|
1381 // The foreground collector |
|
1382 // acquires the CGC_lock |
|
1383 // sets _foregroundGCIsActive |
|
1384 // waits on the CGC_lock for _foregroundGCShouldWait to be false |
|
1385 // various locks acquired in preparation for the collection |
|
1386 // are released so as not to block the background collector |
|
1387 // that is in the midst of a collection |
|
1388 // proceeds with the collection |
|
1389 // clears _foregroundGCIsActive |
|
1390 // returns |
|
1391 // |
|
1392 // The background collector in a loop iterating on the phases of the |
|
1393 // collection |
|
1394 // acquires the CGC_lock |
|
1395 // sets _foregroundGCShouldWait |
|
1396 // if _foregroundGCIsActive is set |
|
1397 // clears _foregroundGCShouldWait, notifies _CGC_lock |
|
1398 // waits on _CGC_lock for _foregroundGCIsActive to become false |
|
1399 // and exits the loop. |
|
1400 // otherwise |
|
1401 // proceed with that phase of the collection |
|
1402 // if the phase is a stop-the-world phase, |
|
1403 // yield the baton once more just before enqueueing |
|
1404 // the stop-world CMS operation (executed by the VM thread). |
|
1405 // returns after all phases of the collection are done |
|
1406 // |
|
1407 |
|
1408 void CMSCollector::acquire_control_and_collect(bool full, |
|
1409 bool clear_all_soft_refs) { |
|
1410 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
|
1411 assert(!Thread::current()->is_ConcurrentGC_thread(), |
|
1412 "shouldn't try to acquire control from self!"); |
|
1413 |
|
1414 // Start the protocol for acquiring control of the |
|
1415 // collection from the background collector (aka CMS thread). |
|
1416 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), |
|
1417 "VM thread should have CMS token"); |
|
1418 // Remember the possibly interrupted state of an ongoing |
|
1419 // concurrent collection |
|
1420 CollectorState first_state = _collectorState; |
|
1421 |
|
1422 // Signal to a possibly ongoing concurrent collection that |
|
1423 // we want to do a foreground collection. |
|
1424 _foregroundGCIsActive = true; |
|
1425 |
|
1426 // release locks and wait for a notify from the background collector |
|
1427 // releasing the locks in only necessary for phases which |
|
1428 // do yields to improve the granularity of the collection. |
|
1429 assert_lock_strong(bitMapLock()); |
|
1430 // We need to lock the Free list lock for the space that we are |
|
1431 // currently collecting. |
|
1432 assert(haveFreelistLocks(), "Must be holding free list locks"); |
|
1433 bitMapLock()->unlock(); |
|
1434 releaseFreelistLocks(); |
|
1435 { |
|
1436 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); |
|
1437 if (_foregroundGCShouldWait) { |
|
1438 // We are going to be waiting for action for the CMS thread; |
|
1439 // it had better not be gone (for instance at shutdown)! |
|
1440 assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(), |
|
1441 "CMS thread must be running"); |
|
1442 // Wait here until the background collector gives us the go-ahead |
|
1443 ConcurrentMarkSweepThread::clear_CMS_flag( |
|
1444 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token |
|
1445 // Get a possibly blocked CMS thread going: |
|
1446 // Note that we set _foregroundGCIsActive true above, |
|
1447 // without protection of the CGC_lock. |
|
1448 CGC_lock->notify(); |
|
1449 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(), |
|
1450 "Possible deadlock"); |
|
1451 while (_foregroundGCShouldWait) { |
|
1452 // wait for notification |
|
1453 CGC_lock->wait_without_safepoint_check(); |
|
1454 // Possibility of delay/starvation here, since CMS token does |
|
1455 // not know to give priority to VM thread? Actually, i think |
|
1456 // there wouldn't be any delay/starvation, but the proof of |
|
1457 // that "fact" (?) appears non-trivial. XXX 20011219YSR |
|
1458 } |
|
1459 ConcurrentMarkSweepThread::set_CMS_flag( |
|
1460 ConcurrentMarkSweepThread::CMS_vm_has_token); |
|
1461 } |
|
1462 } |
|
1463 // The CMS_token is already held. Get back the other locks. |
|
1464 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), |
|
1465 "VM thread should have CMS token"); |
|
1466 getFreelistLocks(); |
|
1467 bitMapLock()->lock_without_safepoint_check(); |
|
1468 log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d", |
|
1469 p2i(Thread::current()), first_state); |
|
1470 log_debug(gc, state)(" gets control with state %d", _collectorState); |
|
1471 |
|
1472 // Inform cms gen if this was due to partial collection failing. |
|
1473 // The CMS gen may use this fact to determine its expansion policy. |
|
1474 CMSHeap* heap = CMSHeap::heap(); |
|
1475 if (heap->incremental_collection_will_fail(false /* don't consult_young */)) { |
|
1476 assert(!_cmsGen->incremental_collection_failed(), |
|
1477 "Should have been noticed, reacted to and cleared"); |
|
1478 _cmsGen->set_incremental_collection_failed(); |
|
1479 } |
|
1480 |
|
1481 if (first_state > Idling) { |
|
1482 report_concurrent_mode_interruption(); |
|
1483 } |
|
1484 |
|
1485 set_did_compact(true); |
|
1486 |
|
1487 // If the collection is being acquired from the background |
|
1488 // collector, there may be references on the discovered |
|
1489 // references lists. Abandon those references, since some |
|
1490 // of them may have become unreachable after concurrent |
|
1491 // discovery; the STW compacting collector will redo discovery |
|
1492 // more precisely, without being subject to floating garbage. |
|
1493 // Leaving otherwise unreachable references in the discovered |
|
1494 // lists would require special handling. |
|
1495 ref_processor()->disable_discovery(); |
|
1496 ref_processor()->abandon_partial_discovery(); |
|
1497 ref_processor()->verify_no_references_recorded(); |
|
1498 |
|
1499 if (first_state > Idling) { |
|
1500 save_heap_summary(); |
|
1501 } |
|
1502 |
|
1503 do_compaction_work(clear_all_soft_refs); |
|
1504 |
|
1505 // Has the GC time limit been exceeded? |
|
1506 size_t max_eden_size = _young_gen->max_eden_size(); |
|
1507 GCCause::Cause gc_cause = heap->gc_cause(); |
|
1508 size_policy()->check_gc_overhead_limit(_young_gen->eden()->used(), |
|
1509 _cmsGen->max_capacity(), |
|
1510 max_eden_size, |
|
1511 full, |
|
1512 gc_cause, |
|
1513 heap->soft_ref_policy()); |
|
1514 |
|
1515 // Reset the expansion cause, now that we just completed |
|
1516 // a collection cycle. |
|
1517 clear_expansion_cause(); |
|
1518 _foregroundGCIsActive = false; |
|
1519 return; |
|
1520 } |
|
1521 |
|
1522 // Resize the tenured generation |
|
1523 // after obtaining the free list locks for the |
|
1524 // two generations. |
|
1525 void CMSCollector::compute_new_size() { |
|
1526 assert_locked_or_safepoint(Heap_lock); |
|
1527 FreelistLocker z(this); |
|
1528 MetaspaceGC::compute_new_size(); |
|
1529 _cmsGen->compute_new_size_free_list(); |
|
1530 // recalculate CMS used space after CMS collection |
|
1531 _cmsGen->cmsSpace()->recalculate_used_stable(); |
|
1532 } |
|
1533 |
|
1534 // A work method used by the foreground collector to do |
|
1535 // a mark-sweep-compact. |
|
1536 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { |
|
1537 CMSHeap* heap = CMSHeap::heap(); |
|
1538 |
|
1539 STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); |
|
1540 gc_timer->register_gc_start(); |
|
1541 |
|
1542 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); |
|
1543 gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start()); |
|
1544 |
|
1545 heap->pre_full_gc_dump(gc_timer); |
|
1546 |
|
1547 GCTraceTime(Trace, gc, phases) t("CMS:MSC"); |
|
1548 |
|
1549 // Temporarily widen the span of the weak reference processing to |
|
1550 // the entire heap. |
|
1551 MemRegion new_span(CMSHeap::heap()->reserved_region()); |
|
1552 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); |
|
1553 // Temporarily, clear the "is_alive_non_header" field of the |
|
1554 // reference processor. |
|
1555 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); |
|
1556 // Temporarily make reference _processing_ single threaded (non-MT). |
|
1557 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false); |
|
1558 // Temporarily make refs discovery atomic |
|
1559 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true); |
|
1560 // Temporarily make reference _discovery_ single threaded (non-MT) |
|
1561 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
|
1562 |
|
1563 ref_processor()->set_enqueuing_is_done(false); |
|
1564 ref_processor()->enable_discovery(); |
|
1565 ref_processor()->setup_policy(clear_all_soft_refs); |
|
1566 // If an asynchronous collection finishes, the _modUnionTable is |
|
1567 // all clear. If we are assuming the collection from an asynchronous |
|
1568 // collection, clear the _modUnionTable. |
|
1569 assert(_collectorState != Idling || _modUnionTable.isAllClear(), |
|
1570 "_modUnionTable should be clear if the baton was not passed"); |
|
1571 _modUnionTable.clear_all(); |
|
1572 assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(), |
|
1573 "mod union for klasses should be clear if the baton was passed"); |
|
1574 _ct->cld_rem_set()->clear_mod_union(); |
|
1575 |
|
1576 |
|
1577 // We must adjust the allocation statistics being maintained |
|
1578 // in the free list space. We do so by reading and clearing |
|
1579 // the sweep timer and updating the block flux rate estimates below. |
|
1580 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive"); |
|
1581 if (_inter_sweep_timer.is_active()) { |
|
1582 _inter_sweep_timer.stop(); |
|
1583 // Note that we do not use this sample to update the _inter_sweep_estimate. |
|
1584 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
|
1585 _inter_sweep_estimate.padded_average(), |
|
1586 _intra_sweep_estimate.padded_average()); |
|
1587 } |
|
1588 |
|
1589 GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); |
|
1590 #ifdef ASSERT |
|
1591 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); |
|
1592 size_t free_size = cms_space->free(); |
|
1593 assert(free_size == |
|
1594 pointer_delta(cms_space->end(), cms_space->compaction_top()) |
|
1595 * HeapWordSize, |
|
1596 "All the free space should be compacted into one chunk at top"); |
|
1597 assert(cms_space->dictionary()->total_chunk_size( |
|
1598 debug_only(cms_space->freelistLock())) == 0 || |
|
1599 cms_space->totalSizeInIndexedFreeLists() == 0, |
|
1600 "All the free space should be in a single chunk"); |
|
1601 size_t num = cms_space->totalCount(); |
|
1602 assert((free_size == 0 && num == 0) || |
|
1603 (free_size > 0 && (num == 1 || num == 2)), |
|
1604 "There should be at most 2 free chunks after compaction"); |
|
1605 #endif // ASSERT |
|
1606 _collectorState = Resetting; |
|
1607 assert(_restart_addr == NULL, |
|
1608 "Should have been NULL'd before baton was passed"); |
|
1609 reset_stw(); |
|
1610 _cmsGen->reset_after_compaction(); |
|
1611 _concurrent_cycles_since_last_unload = 0; |
|
1612 |
|
1613 // Clear any data recorded in the PLAB chunk arrays. |
|
1614 if (_survivor_plab_array != NULL) { |
|
1615 reset_survivor_plab_arrays(); |
|
1616 } |
|
1617 |
|
1618 // Adjust the per-size allocation stats for the next epoch. |
|
1619 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); |
|
1620 // Restart the "inter sweep timer" for the next epoch. |
|
1621 _inter_sweep_timer.reset(); |
|
1622 _inter_sweep_timer.start(); |
|
1623 |
|
1624 // No longer a need to do a concurrent collection for Metaspace. |
|
1625 MetaspaceGC::set_should_concurrent_collect(false); |
|
1626 |
|
1627 heap->post_full_gc_dump(gc_timer); |
|
1628 |
|
1629 gc_timer->register_gc_end(); |
|
1630 |
|
1631 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
|
1632 |
|
1633 // For a mark-sweep-compact, compute_new_size() will be called |
|
1634 // in the heap's do_collection() method. |
|
1635 } |
|
1636 |
|
1637 void CMSCollector::print_eden_and_survivor_chunk_arrays() { |
|
1638 Log(gc, heap) log; |
|
1639 if (!log.is_trace()) { |
|
1640 return; |
|
1641 } |
|
1642 |
|
1643 ContiguousSpace* eden_space = _young_gen->eden(); |
|
1644 ContiguousSpace* from_space = _young_gen->from(); |
|
1645 ContiguousSpace* to_space = _young_gen->to(); |
|
1646 // Eden |
|
1647 if (_eden_chunk_array != NULL) { |
|
1648 log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", |
|
1649 p2i(eden_space->bottom()), p2i(eden_space->top()), |
|
1650 p2i(eden_space->end()), eden_space->capacity()); |
|
1651 log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT, |
|
1652 _eden_chunk_index, _eden_chunk_capacity); |
|
1653 for (size_t i = 0; i < _eden_chunk_index; i++) { |
|
1654 log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i])); |
|
1655 } |
|
1656 } |
|
1657 // Survivor |
|
1658 if (_survivor_chunk_array != NULL) { |
|
1659 log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", |
|
1660 p2i(from_space->bottom()), p2i(from_space->top()), |
|
1661 p2i(from_space->end()), from_space->capacity()); |
|
1662 log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT, |
|
1663 _survivor_chunk_index, _survivor_chunk_capacity); |
|
1664 for (size_t i = 0; i < _survivor_chunk_index; i++) { |
|
1665 log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i])); |
|
1666 } |
|
1667 } |
|
1668 } |
|
1669 |
|
1670 void CMSCollector::getFreelistLocks() const { |
|
1671 // Get locks for all free lists in all generations that this |
|
1672 // collector is responsible for |
|
1673 _cmsGen->freelistLock()->lock_without_safepoint_check(); |
|
1674 } |
|
1675 |
|
1676 void CMSCollector::releaseFreelistLocks() const { |
|
1677 // Release locks for all free lists in all generations that this |
|
1678 // collector is responsible for |
|
1679 _cmsGen->freelistLock()->unlock(); |
|
1680 } |
|
1681 |
|
1682 bool CMSCollector::haveFreelistLocks() const { |
|
1683 // Check locks for all free lists in all generations that this |
|
1684 // collector is responsible for |
|
1685 assert_lock_strong(_cmsGen->freelistLock()); |
|
1686 PRODUCT_ONLY(ShouldNotReachHere()); |
|
1687 return true; |
|
1688 } |
|
1689 |
|
1690 // A utility class that is used by the CMS collector to |
|
1691 // temporarily "release" the foreground collector from its |
|
1692 // usual obligation to wait for the background collector to |
|
1693 // complete an ongoing phase before proceeding. |
|
1694 class ReleaseForegroundGC: public StackObj { |
|
1695 private: |
|
1696 CMSCollector* _c; |
|
1697 public: |
|
1698 ReleaseForegroundGC(CMSCollector* c) : _c(c) { |
|
1699 assert(_c->_foregroundGCShouldWait, "Else should not need to call"); |
|
1700 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); |
|
1701 // allow a potentially blocked foreground collector to proceed |
|
1702 _c->_foregroundGCShouldWait = false; |
|
1703 if (_c->_foregroundGCIsActive) { |
|
1704 CGC_lock->notify(); |
|
1705 } |
|
1706 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
1707 "Possible deadlock"); |
|
1708 } |
|
1709 |
|
1710 ~ReleaseForegroundGC() { |
|
1711 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?"); |
|
1712 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); |
|
1713 _c->_foregroundGCShouldWait = true; |
|
1714 } |
|
1715 }; |
|
1716 |
|
1717 void CMSCollector::collect_in_background(GCCause::Cause cause) { |
|
1718 assert(Thread::current()->is_ConcurrentGC_thread(), |
|
1719 "A CMS asynchronous collection is only allowed on a CMS thread."); |
|
1720 |
|
1721 CMSHeap* heap = CMSHeap::heap(); |
|
1722 { |
|
1723 MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag); |
|
1724 FreelistLocker fll(this); |
|
1725 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); |
|
1726 if (_foregroundGCIsActive) { |
|
1727 // The foreground collector is. Skip this |
|
1728 // background collection. |
|
1729 assert(!_foregroundGCShouldWait, "Should be clear"); |
|
1730 return; |
|
1731 } else { |
|
1732 assert(_collectorState == Idling, "Should be idling before start."); |
|
1733 _collectorState = InitialMarking; |
|
1734 register_gc_start(cause); |
|
1735 // Reset the expansion cause, now that we are about to begin |
|
1736 // a new cycle. |
|
1737 clear_expansion_cause(); |
|
1738 |
|
1739 // Clear the MetaspaceGC flag since a concurrent collection |
|
1740 // is starting but also clear it after the collection. |
|
1741 MetaspaceGC::set_should_concurrent_collect(false); |
|
1742 } |
|
1743 // Decide if we want to enable class unloading as part of the |
|
1744 // ensuing concurrent GC cycle. |
|
1745 update_should_unload_classes(); |
|
1746 _full_gc_requested = false; // acks all outstanding full gc requests |
|
1747 _full_gc_cause = GCCause::_no_gc; |
|
1748 // Signal that we are about to start a collection |
|
1749 heap->increment_total_full_collections(); // ... starting a collection cycle |
|
1750 _collection_count_start = heap->total_full_collections(); |
|
1751 } |
|
1752 |
|
1753 size_t prev_used = _cmsGen->used(); |
|
1754 |
|
1755 // The change of the collection state is normally done at this level; |
|
1756 // the exceptions are phases that are executed while the world is |
|
1757 // stopped. For those phases the change of state is done while the |
|
1758 // world is stopped. For baton passing purposes this allows the |
|
1759 // background collector to finish the phase and change state atomically. |
|
1760 // The foreground collector cannot wait on a phase that is done |
|
1761 // while the world is stopped because the foreground collector already |
|
1762 // has the world stopped and would deadlock. |
|
1763 while (_collectorState != Idling) { |
|
1764 log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d", |
|
1765 p2i(Thread::current()), _collectorState); |
|
1766 // The foreground collector |
|
1767 // holds the Heap_lock throughout its collection. |
|
1768 // holds the CMS token (but not the lock) |
|
1769 // except while it is waiting for the background collector to yield. |
|
1770 // |
|
1771 // The foreground collector should be blocked (not for long) |
|
1772 // if the background collector is about to start a phase |
|
1773 // executed with world stopped. If the background |
|
1774 // collector has already started such a phase, the |
|
1775 // foreground collector is blocked waiting for the |
|
1776 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking) |
|
1777 // are executed in the VM thread. |
|
1778 // |
|
1779 // The locking order is |
|
1780 // PendingListLock (PLL) -- if applicable (FinalMarking) |
|
1781 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue()) |
|
1782 // CMS token (claimed in |
|
1783 // stop_world_and_do() --> |
|
1784 // safepoint_synchronize() --> |
|
1785 // CMSThread::synchronize()) |
|
1786 |
|
1787 { |
|
1788 // Check if the FG collector wants us to yield. |
|
1789 CMSTokenSync x(true); // is cms thread |
|
1790 if (waitForForegroundGC()) { |
|
1791 // We yielded to a foreground GC, nothing more to be |
|
1792 // done this round. |
|
1793 assert(_foregroundGCShouldWait == false, "We set it to false in " |
|
1794 "waitForForegroundGC()"); |
|
1795 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", |
|
1796 p2i(Thread::current()), _collectorState); |
|
1797 return; |
|
1798 } else { |
|
1799 // The background collector can run but check to see if the |
|
1800 // foreground collector has done a collection while the |
|
1801 // background collector was waiting to get the CGC_lock |
|
1802 // above. If yes, break so that _foregroundGCShouldWait |
|
1803 // is cleared before returning. |
|
1804 if (_collectorState == Idling) { |
|
1805 break; |
|
1806 } |
|
1807 } |
|
1808 } |
|
1809 |
|
1810 assert(_foregroundGCShouldWait, "Foreground collector, if active, " |
|
1811 "should be waiting"); |
|
1812 |
|
1813 switch (_collectorState) { |
|
1814 case InitialMarking: |
|
1815 { |
|
1816 ReleaseForegroundGC x(this); |
|
1817 stats().record_cms_begin(); |
|
1818 VM_CMS_Initial_Mark initial_mark_op(this); |
|
1819 VMThread::execute(&initial_mark_op); |
|
1820 } |
|
1821 // The collector state may be any legal state at this point |
|
1822 // since the background collector may have yielded to the |
|
1823 // foreground collector. |
|
1824 break; |
|
1825 case Marking: |
|
1826 // initial marking in checkpointRootsInitialWork has been completed |
|
1827 if (markFromRoots()) { // we were successful |
|
1828 assert(_collectorState == Precleaning, "Collector state should " |
|
1829 "have changed"); |
|
1830 } else { |
|
1831 assert(_foregroundGCIsActive, "Internal state inconsistency"); |
|
1832 } |
|
1833 break; |
|
1834 case Precleaning: |
|
1835 // marking from roots in markFromRoots has been completed |
|
1836 preclean(); |
|
1837 assert(_collectorState == AbortablePreclean || |
|
1838 _collectorState == FinalMarking, |
|
1839 "Collector state should have changed"); |
|
1840 break; |
|
1841 case AbortablePreclean: |
|
1842 abortable_preclean(); |
|
1843 assert(_collectorState == FinalMarking, "Collector state should " |
|
1844 "have changed"); |
|
1845 break; |
|
1846 case FinalMarking: |
|
1847 { |
|
1848 ReleaseForegroundGC x(this); |
|
1849 |
|
1850 VM_CMS_Final_Remark final_remark_op(this); |
|
1851 VMThread::execute(&final_remark_op); |
|
1852 } |
|
1853 assert(_foregroundGCShouldWait, "block post-condition"); |
|
1854 break; |
|
1855 case Sweeping: |
|
1856 // final marking in checkpointRootsFinal has been completed |
|
1857 sweep(); |
|
1858 assert(_collectorState == Resizing, "Collector state change " |
|
1859 "to Resizing must be done under the free_list_lock"); |
|
1860 |
|
1861 case Resizing: { |
|
1862 // Sweeping has been completed... |
|
1863 // At this point the background collection has completed. |
|
1864 // Don't move the call to compute_new_size() down |
|
1865 // into code that might be executed if the background |
|
1866 // collection was preempted. |
|
1867 { |
|
1868 ReleaseForegroundGC x(this); // unblock FG collection |
|
1869 MutexLocker y(Heap_lock, Mutex::_no_safepoint_check_flag); |
|
1870 CMSTokenSync z(true); // not strictly needed. |
|
1871 if (_collectorState == Resizing) { |
|
1872 compute_new_size(); |
|
1873 save_heap_summary(); |
|
1874 _collectorState = Resetting; |
|
1875 } else { |
|
1876 assert(_collectorState == Idling, "The state should only change" |
|
1877 " because the foreground collector has finished the collection"); |
|
1878 } |
|
1879 } |
|
1880 break; |
|
1881 } |
|
1882 case Resetting: |
|
1883 // CMS heap resizing has been completed |
|
1884 reset_concurrent(); |
|
1885 assert(_collectorState == Idling, "Collector state should " |
|
1886 "have changed"); |
|
1887 |
|
1888 MetaspaceGC::set_should_concurrent_collect(false); |
|
1889 |
|
1890 stats().record_cms_end(); |
|
1891 // Don't move the concurrent_phases_end() and compute_new_size() |
|
1892 // calls to here because a preempted background collection |
|
1893 // has it's state set to "Resetting". |
|
1894 break; |
|
1895 case Idling: |
|
1896 default: |
|
1897 ShouldNotReachHere(); |
|
1898 break; |
|
1899 } |
|
1900 log_debug(gc, state)(" Thread " INTPTR_FORMAT " done - next CMS state %d", |
|
1901 p2i(Thread::current()), _collectorState); |
|
1902 assert(_foregroundGCShouldWait, "block post-condition"); |
|
1903 } |
|
1904 |
|
1905 // Should this be in gc_epilogue? |
|
1906 heap->counters()->update_counters(); |
|
1907 |
|
1908 { |
|
1909 // Clear _foregroundGCShouldWait and, in the event that the |
|
1910 // foreground collector is waiting, notify it, before |
|
1911 // returning. |
|
1912 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); |
|
1913 _foregroundGCShouldWait = false; |
|
1914 if (_foregroundGCIsActive) { |
|
1915 CGC_lock->notify(); |
|
1916 } |
|
1917 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
1918 "Possible deadlock"); |
|
1919 } |
|
1920 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", |
|
1921 p2i(Thread::current()), _collectorState); |
|
1922 log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", |
|
1923 prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K); |
|
1924 } |
|
1925 |
|
1926 void CMSCollector::register_gc_start(GCCause::Cause cause) { |
|
1927 _cms_start_registered = true; |
|
1928 _gc_timer_cm->register_gc_start(); |
|
1929 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start()); |
|
1930 } |
|
1931 |
|
1932 void CMSCollector::register_gc_end() { |
|
1933 if (_cms_start_registered) { |
|
1934 report_heap_summary(GCWhen::AfterGC); |
|
1935 |
|
1936 _gc_timer_cm->register_gc_end(); |
|
1937 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); |
|
1938 _cms_start_registered = false; |
|
1939 } |
|
1940 } |
|
1941 |
|
1942 void CMSCollector::save_heap_summary() { |
|
1943 CMSHeap* heap = CMSHeap::heap(); |
|
1944 _last_heap_summary = heap->create_heap_summary(); |
|
1945 _last_metaspace_summary = heap->create_metaspace_summary(); |
|
1946 } |
|
1947 |
|
1948 void CMSCollector::report_heap_summary(GCWhen::Type when) { |
|
1949 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary); |
|
1950 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary); |
|
1951 } |
|
1952 |
|
1953 bool CMSCollector::waitForForegroundGC() { |
|
1954 bool res = false; |
|
1955 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
1956 "CMS thread should have CMS token"); |
|
1957 // Block the foreground collector until the |
|
1958 // background collectors decides whether to |
|
1959 // yield. |
|
1960 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); |
|
1961 _foregroundGCShouldWait = true; |
|
1962 if (_foregroundGCIsActive) { |
|
1963 // The background collector yields to the |
|
1964 // foreground collector and returns a value |
|
1965 // indicating that it has yielded. The foreground |
|
1966 // collector can proceed. |
|
1967 res = true; |
|
1968 _foregroundGCShouldWait = false; |
|
1969 ConcurrentMarkSweepThread::clear_CMS_flag( |
|
1970 ConcurrentMarkSweepThread::CMS_cms_has_token); |
|
1971 ConcurrentMarkSweepThread::set_CMS_flag( |
|
1972 ConcurrentMarkSweepThread::CMS_cms_wants_token); |
|
1973 // Get a possibly blocked foreground thread going |
|
1974 CGC_lock->notify(); |
|
1975 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", |
|
1976 p2i(Thread::current()), _collectorState); |
|
1977 while (_foregroundGCIsActive) { |
|
1978 CGC_lock->wait_without_safepoint_check(); |
|
1979 } |
|
1980 ConcurrentMarkSweepThread::set_CMS_flag( |
|
1981 ConcurrentMarkSweepThread::CMS_cms_has_token); |
|
1982 ConcurrentMarkSweepThread::clear_CMS_flag( |
|
1983 ConcurrentMarkSweepThread::CMS_cms_wants_token); |
|
1984 } |
|
1985 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", |
|
1986 p2i(Thread::current()), _collectorState); |
|
1987 return res; |
|
1988 } |
|
1989 |
|
1990 // Because of the need to lock the free lists and other structures in |
|
1991 // the collector, common to all the generations that the collector is |
|
1992 // collecting, we need the gc_prologues of individual CMS generations |
|
1993 // delegate to their collector. It may have been simpler had the |
|
1994 // current infrastructure allowed one to call a prologue on a |
|
1995 // collector. In the absence of that we have the generation's |
|
1996 // prologue delegate to the collector, which delegates back |
|
1997 // some "local" work to a worker method in the individual generations |
|
1998 // that it's responsible for collecting, while itself doing any |
|
1999 // work common to all generations it's responsible for. A similar |
|
2000 // comment applies to the gc_epilogue()'s. |
|
2001 // The role of the variable _between_prologue_and_epilogue is to |
|
2002 // enforce the invocation protocol. |
|
2003 void CMSCollector::gc_prologue(bool full) { |
|
2004 // Call gc_prologue_work() for the CMSGen |
|
2005 // we are responsible for. |
|
2006 |
|
2007 // The following locking discipline assumes that we are only called |
|
2008 // when the world is stopped. |
|
2009 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption"); |
|
2010 |
|
2011 // The CMSCollector prologue must call the gc_prologues for the |
|
2012 // "generations" that it's responsible |
|
2013 // for. |
|
2014 |
|
2015 assert( Thread::current()->is_VM_thread() |
|
2016 || ( CMSScavengeBeforeRemark |
|
2017 && Thread::current()->is_ConcurrentGC_thread()), |
|
2018 "Incorrect thread type for prologue execution"); |
|
2019 |
|
2020 if (_between_prologue_and_epilogue) { |
|
2021 // We have already been invoked; this is a gc_prologue delegation |
|
2022 // from yet another CMS generation that we are responsible for, just |
|
2023 // ignore it since all relevant work has already been done. |
|
2024 return; |
|
2025 } |
|
2026 |
|
2027 // set a bit saying prologue has been called; cleared in epilogue |
|
2028 _between_prologue_and_epilogue = true; |
|
2029 // Claim locks for common data structures, then call gc_prologue_work() |
|
2030 // for each CMSGen. |
|
2031 |
|
2032 getFreelistLocks(); // gets free list locks on constituent spaces |
|
2033 bitMapLock()->lock_without_safepoint_check(); |
|
2034 |
|
2035 // Should call gc_prologue_work() for all cms gens we are responsible for |
|
2036 bool duringMarking = _collectorState >= Marking |
|
2037 && _collectorState < Sweeping; |
|
2038 |
|
2039 // The young collections clear the modified oops state, which tells if |
|
2040 // there are any modified oops in the class. The remark phase also needs |
|
2041 // that information. Tell the young collection to save the union of all |
|
2042 // modified klasses. |
|
2043 if (duringMarking) { |
|
2044 _ct->cld_rem_set()->set_accumulate_modified_oops(true); |
|
2045 } |
|
2046 |
|
2047 bool registerClosure = duringMarking; |
|
2048 |
|
2049 _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar); |
|
2050 |
|
2051 if (!full) { |
|
2052 stats().record_gc0_begin(); |
|
2053 } |
|
2054 } |
|
2055 |
|
2056 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) { |
|
2057 |
|
2058 _capacity_at_prologue = capacity(); |
|
2059 _used_at_prologue = used(); |
|
2060 _cmsSpace->recalculate_used_stable(); |
|
2061 |
|
2062 // We enable promotion tracking so that card-scanning can recognize |
|
2063 // which objects have been promoted during this GC and skip them. |
|
2064 for (uint i = 0; i < ParallelGCThreads; i++) { |
|
2065 _par_gc_thread_states[i]->promo.startTrackingPromotions(); |
|
2066 } |
|
2067 |
|
2068 // Delegate to CMScollector which knows how to coordinate between |
|
2069 // this and any other CMS generations that it is responsible for |
|
2070 // collecting. |
|
2071 collector()->gc_prologue(full); |
|
2072 } |
|
2073 |
|
2074 // This is a "private" interface for use by this generation's CMSCollector. |
|
2075 // Not to be called directly by any other entity (for instance, |
|
2076 // GenCollectedHeap, which calls the "public" gc_prologue method above). |
|
2077 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full, |
|
2078 bool registerClosure, ModUnionClosure* modUnionClosure) { |
|
2079 assert(!incremental_collection_failed(), "Shouldn't be set yet"); |
|
2080 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL, |
|
2081 "Should be NULL"); |
|
2082 if (registerClosure) { |
|
2083 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure); |
|
2084 } |
|
2085 cmsSpace()->gc_prologue(); |
|
2086 // Clear stat counters |
|
2087 NOT_PRODUCT( |
|
2088 assert(_numObjectsPromoted == 0, "check"); |
|
2089 assert(_numWordsPromoted == 0, "check"); |
|
2090 log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently", |
|
2091 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); |
|
2092 _numObjectsAllocated = 0; |
|
2093 _numWordsAllocated = 0; |
|
2094 ) |
|
2095 } |
|
2096 |
|
2097 void CMSCollector::gc_epilogue(bool full) { |
|
2098 // The following locking discipline assumes that we are only called |
|
2099 // when the world is stopped. |
|
2100 assert(SafepointSynchronize::is_at_safepoint(), |
|
2101 "world is stopped assumption"); |
|
2102 |
|
2103 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks |
|
2104 // if linear allocation blocks need to be appropriately marked to allow the |
|
2105 // the blocks to be parsable. We also check here whether we need to nudge the |
|
2106 // CMS collector thread to start a new cycle (if it's not already active). |
|
2107 assert( Thread::current()->is_VM_thread() |
|
2108 || ( CMSScavengeBeforeRemark |
|
2109 && Thread::current()->is_ConcurrentGC_thread()), |
|
2110 "Incorrect thread type for epilogue execution"); |
|
2111 |
|
2112 if (!_between_prologue_and_epilogue) { |
|
2113 // We have already been invoked; this is a gc_epilogue delegation |
|
2114 // from yet another CMS generation that we are responsible for, just |
|
2115 // ignore it since all relevant work has already been done. |
|
2116 return; |
|
2117 } |
|
2118 assert(haveFreelistLocks(), "must have freelist locks"); |
|
2119 assert_lock_strong(bitMapLock()); |
|
2120 |
|
2121 _ct->cld_rem_set()->set_accumulate_modified_oops(false); |
|
2122 |
|
2123 _cmsGen->gc_epilogue_work(full); |
|
2124 |
|
2125 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { |
|
2126 // in case sampling was not already enabled, enable it |
|
2127 _start_sampling = true; |
|
2128 } |
|
2129 // reset _eden_chunk_array so sampling starts afresh |
|
2130 _eden_chunk_index = 0; |
|
2131 |
|
2132 size_t cms_used = _cmsGen->cmsSpace()->used(); |
|
2133 _cmsGen->cmsSpace()->recalculate_used_stable(); |
|
2134 |
|
2135 // update performance counters - this uses a special version of |
|
2136 // update_counters() that allows the utilization to be passed as a |
|
2137 // parameter, avoiding multiple calls to used(). |
|
2138 // |
|
2139 _cmsGen->update_counters(cms_used); |
|
2140 |
|
2141 bitMapLock()->unlock(); |
|
2142 releaseFreelistLocks(); |
|
2143 |
|
2144 if (!CleanChunkPoolAsync) { |
|
2145 Chunk::clean_chunk_pool(); |
|
2146 } |
|
2147 |
|
2148 set_did_compact(false); |
|
2149 _between_prologue_and_epilogue = false; // ready for next cycle |
|
2150 } |
|
2151 |
|
2152 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) { |
|
2153 collector()->gc_epilogue(full); |
|
2154 |
|
2155 // When using ParNew, promotion tracking should have already been |
|
2156 // disabled. However, the prologue (which enables promotion |
|
2157 // tracking) and epilogue are called irrespective of the type of |
|
2158 // GC. So they will also be called before and after Full GCs, during |
|
2159 // which promotion tracking will not be explicitly disabled. So, |
|
2160 // it's safer to also disable it here too (to be symmetric with |
|
2161 // enabling it in the prologue). |
|
2162 for (uint i = 0; i < ParallelGCThreads; i++) { |
|
2163 _par_gc_thread_states[i]->promo.stopTrackingPromotions(); |
|
2164 } |
|
2165 } |
|
2166 |
|
2167 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) { |
|
2168 assert(!incremental_collection_failed(), "Should have been cleared"); |
|
2169 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL); |
|
2170 cmsSpace()->gc_epilogue(); |
|
2171 // Print stat counters |
|
2172 NOT_PRODUCT( |
|
2173 assert(_numObjectsAllocated == 0, "check"); |
|
2174 assert(_numWordsAllocated == 0, "check"); |
|
2175 log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", |
|
2176 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); |
|
2177 _numObjectsPromoted = 0; |
|
2178 _numWordsPromoted = 0; |
|
2179 ) |
|
2180 |
|
2181 // Call down the chain in contiguous_available needs the freelistLock |
|
2182 // so print this out before releasing the freeListLock. |
|
2183 log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available()); |
|
2184 } |
|
2185 |
|
2186 #ifndef PRODUCT |
|
2187 bool CMSCollector::have_cms_token() { |
|
2188 Thread* thr = Thread::current(); |
|
2189 if (thr->is_VM_thread()) { |
|
2190 return ConcurrentMarkSweepThread::vm_thread_has_cms_token(); |
|
2191 } else if (thr->is_ConcurrentGC_thread()) { |
|
2192 return ConcurrentMarkSweepThread::cms_thread_has_cms_token(); |
|
2193 } else if (thr->is_GC_task_thread()) { |
|
2194 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() && |
|
2195 ParGCRareEvent_lock->owned_by_self(); |
|
2196 } |
|
2197 return false; |
|
2198 } |
|
2199 |
|
2200 // Check reachability of the given heap address in CMS generation, |
|
2201 // treating all other generations as roots. |
|
2202 bool CMSCollector::is_cms_reachable(HeapWord* addr) { |
|
2203 // We could "guarantee" below, rather than assert, but I'll |
|
2204 // leave these as "asserts" so that an adventurous debugger |
|
2205 // could try this in the product build provided some subset of |
|
2206 // the conditions were met, provided they were interested in the |
|
2207 // results and knew that the computation below wouldn't interfere |
|
2208 // with other concurrent computations mutating the structures |
|
2209 // being read or written. |
|
2210 assert(SafepointSynchronize::is_at_safepoint(), |
|
2211 "Else mutations in object graph will make answer suspect"); |
|
2212 assert(have_cms_token(), "Should hold cms token"); |
|
2213 assert(haveFreelistLocks(), "must hold free list locks"); |
|
2214 assert_lock_strong(bitMapLock()); |
|
2215 |
|
2216 // Clear the marking bit map array before starting, but, just |
|
2217 // for kicks, first report if the given address is already marked |
|
2218 tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr), |
|
2219 _markBitMap.isMarked(addr) ? "" : " not"); |
|
2220 |
|
2221 if (verify_after_remark()) { |
|
2222 MutexLocker x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); |
|
2223 bool result = verification_mark_bm()->isMarked(addr); |
|
2224 tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr), |
|
2225 result ? "IS" : "is NOT"); |
|
2226 return result; |
|
2227 } else { |
|
2228 tty->print_cr("Could not compute result"); |
|
2229 return false; |
|
2230 } |
|
2231 } |
|
2232 #endif |
|
2233 |
|
2234 void |
|
2235 CMSCollector::print_on_error(outputStream* st) { |
|
2236 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector; |
|
2237 if (collector != NULL) { |
|
2238 CMSBitMap* bitmap = &collector->_markBitMap; |
|
2239 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap)); |
|
2240 bitmap->print_on_error(st, " Bits: "); |
|
2241 |
|
2242 st->cr(); |
|
2243 |
|
2244 CMSBitMap* mut_bitmap = &collector->_modUnionTable; |
|
2245 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap)); |
|
2246 mut_bitmap->print_on_error(st, " Bits: "); |
|
2247 } |
|
2248 } |
|
2249 |
|
2250 //////////////////////////////////////////////////////// |
|
2251 // CMS Verification Support |
|
2252 //////////////////////////////////////////////////////// |
|
2253 // Following the remark phase, the following invariant |
|
2254 // should hold -- each object in the CMS heap which is |
|
2255 // marked in markBitMap() should be marked in the verification_mark_bm(). |
|
2256 |
|
2257 class VerifyMarkedClosure: public BitMapClosure { |
|
2258 CMSBitMap* _marks; |
|
2259 bool _failed; |
|
2260 |
|
2261 public: |
|
2262 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} |
|
2263 |
|
2264 bool do_bit(size_t offset) { |
|
2265 HeapWord* addr = _marks->offsetToHeapWord(offset); |
|
2266 if (!_marks->isMarked(addr)) { |
|
2267 Log(gc, verify) log; |
|
2268 ResourceMark rm; |
|
2269 LogStream ls(log.error()); |
|
2270 oop(addr)->print_on(&ls); |
|
2271 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); |
|
2272 _failed = true; |
|
2273 } |
|
2274 return true; |
|
2275 } |
|
2276 |
|
2277 bool failed() { return _failed; } |
|
2278 }; |
|
2279 |
|
2280 bool CMSCollector::verify_after_remark() { |
|
2281 GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking."); |
|
2282 MutexLocker ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); |
|
2283 static bool init = false; |
|
2284 |
|
2285 assert(SafepointSynchronize::is_at_safepoint(), |
|
2286 "Else mutations in object graph will make answer suspect"); |
|
2287 assert(have_cms_token(), |
|
2288 "Else there may be mutual interference in use of " |
|
2289 " verification data structures"); |
|
2290 assert(_collectorState > Marking && _collectorState <= Sweeping, |
|
2291 "Else marking info checked here may be obsolete"); |
|
2292 assert(haveFreelistLocks(), "must hold free list locks"); |
|
2293 assert_lock_strong(bitMapLock()); |
|
2294 |
|
2295 |
|
2296 // Allocate marking bit map if not already allocated |
|
2297 if (!init) { // first time |
|
2298 if (!verification_mark_bm()->allocate(_span)) { |
|
2299 return false; |
|
2300 } |
|
2301 init = true; |
|
2302 } |
|
2303 |
|
2304 assert(verification_mark_stack()->isEmpty(), "Should be empty"); |
|
2305 |
|
2306 // Turn off refs discovery -- so we will be tracing through refs. |
|
2307 // This is as intended, because by this time |
|
2308 // GC must already have cleared any refs that need to be cleared, |
|
2309 // and traced those that need to be marked; moreover, |
|
2310 // the marking done here is not going to interfere in any |
|
2311 // way with the marking information used by GC. |
|
2312 NoRefDiscovery no_discovery(ref_processor()); |
|
2313 |
|
2314 #if COMPILER2_OR_JVMCI |
|
2315 DerivedPointerTableDeactivate dpt_deact; |
|
2316 #endif |
|
2317 |
|
2318 // Clear any marks from a previous round |
|
2319 verification_mark_bm()->clear_all(); |
|
2320 assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); |
|
2321 verify_work_stacks_empty(); |
|
2322 |
|
2323 CMSHeap* heap = CMSHeap::heap(); |
|
2324 heap->ensure_parsability(false); // fill TLABs, but no need to retire them |
|
2325 // Update the saved marks which may affect the root scans. |
|
2326 heap->save_marks(); |
|
2327 |
|
2328 if (CMSRemarkVerifyVariant == 1) { |
|
2329 // In this first variant of verification, we complete |
|
2330 // all marking, then check if the new marks-vector is |
|
2331 // a subset of the CMS marks-vector. |
|
2332 verify_after_remark_work_1(); |
|
2333 } else { |
|
2334 guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2"); |
|
2335 // In this second variant of verification, we flag an error |
|
2336 // (i.e. an object reachable in the new marks-vector not reachable |
|
2337 // in the CMS marks-vector) immediately, also indicating the |
|
2338 // identify of an object (A) that references the unmarked object (B) -- |
|
2339 // presumably, a mutation to A failed to be picked up by preclean/remark? |
|
2340 verify_after_remark_work_2(); |
|
2341 } |
|
2342 |
|
2343 return true; |
|
2344 } |
|
2345 |
|
2346 void CMSCollector::verify_after_remark_work_1() { |
|
2347 ResourceMark rm; |
|
2348 HandleMark hm; |
|
2349 CMSHeap* heap = CMSHeap::heap(); |
|
2350 |
|
2351 // Get a clear set of claim bits for the roots processing to work with. |
|
2352 ClassLoaderDataGraph::clear_claimed_marks(); |
|
2353 |
|
2354 // Mark from roots one level into CMS |
|
2355 MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); |
|
2356 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
|
2357 |
|
2358 { |
|
2359 StrongRootsScope srs(1); |
|
2360 |
|
2361 heap->cms_process_roots(&srs, |
|
2362 true, // young gen as roots |
|
2363 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
|
2364 should_unload_classes(), |
|
2365 ¬Older, |
|
2366 NULL); |
|
2367 } |
|
2368 |
|
2369 // Now mark from the roots |
|
2370 MarkFromRootsClosure markFromRootsClosure(this, _span, |
|
2371 verification_mark_bm(), verification_mark_stack(), |
|
2372 false /* don't yield */, true /* verifying */); |
|
2373 assert(_restart_addr == NULL, "Expected pre-condition"); |
|
2374 verification_mark_bm()->iterate(&markFromRootsClosure); |
|
2375 while (_restart_addr != NULL) { |
|
2376 // Deal with stack overflow: by restarting at the indicated |
|
2377 // address. |
|
2378 HeapWord* ra = _restart_addr; |
|
2379 markFromRootsClosure.reset(ra); |
|
2380 _restart_addr = NULL; |
|
2381 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); |
|
2382 } |
|
2383 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); |
|
2384 verify_work_stacks_empty(); |
|
2385 |
|
2386 // Marking completed -- now verify that each bit marked in |
|
2387 // verification_mark_bm() is also marked in markBitMap(); flag all |
|
2388 // errors by printing corresponding objects. |
|
2389 VerifyMarkedClosure vcl(markBitMap()); |
|
2390 verification_mark_bm()->iterate(&vcl); |
|
2391 if (vcl.failed()) { |
|
2392 Log(gc, verify) log; |
|
2393 log.error("Failed marking verification after remark"); |
|
2394 ResourceMark rm; |
|
2395 LogStream ls(log.error()); |
|
2396 heap->print_on(&ls); |
|
2397 fatal("CMS: failed marking verification after remark"); |
|
2398 } |
|
2399 } |
|
2400 |
|
2401 class VerifyCLDOopsCLDClosure : public CLDClosure { |
|
2402 class VerifyCLDOopsClosure : public OopClosure { |
|
2403 CMSBitMap* _bitmap; |
|
2404 public: |
|
2405 VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { } |
|
2406 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); } |
|
2407 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
|
2408 } _oop_closure; |
|
2409 public: |
|
2410 VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {} |
|
2411 void do_cld(ClassLoaderData* cld) { |
|
2412 cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, false); |
|
2413 } |
|
2414 }; |
|
2415 |
|
2416 void CMSCollector::verify_after_remark_work_2() { |
|
2417 ResourceMark rm; |
|
2418 HandleMark hm; |
|
2419 CMSHeap* heap = CMSHeap::heap(); |
|
2420 |
|
2421 // Get a clear set of claim bits for the roots processing to work with. |
|
2422 ClassLoaderDataGraph::clear_claimed_marks(); |
|
2423 |
|
2424 // Mark from roots one level into CMS |
|
2425 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), |
|
2426 markBitMap()); |
|
2427 CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong); |
|
2428 |
|
2429 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
|
2430 |
|
2431 { |
|
2432 StrongRootsScope srs(1); |
|
2433 |
|
2434 heap->cms_process_roots(&srs, |
|
2435 true, // young gen as roots |
|
2436 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
|
2437 should_unload_classes(), |
|
2438 ¬Older, |
|
2439 &cld_closure); |
|
2440 } |
|
2441 |
|
2442 // Now mark from the roots |
|
2443 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, |
|
2444 verification_mark_bm(), markBitMap(), verification_mark_stack()); |
|
2445 assert(_restart_addr == NULL, "Expected pre-condition"); |
|
2446 verification_mark_bm()->iterate(&markFromRootsClosure); |
|
2447 while (_restart_addr != NULL) { |
|
2448 // Deal with stack overflow: by restarting at the indicated |
|
2449 // address. |
|
2450 HeapWord* ra = _restart_addr; |
|
2451 markFromRootsClosure.reset(ra); |
|
2452 _restart_addr = NULL; |
|
2453 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); |
|
2454 } |
|
2455 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); |
|
2456 verify_work_stacks_empty(); |
|
2457 |
|
2458 VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm()); |
|
2459 ClassLoaderDataGraph::cld_do(&verify_cld_oops); |
|
2460 |
|
2461 // Marking completed -- now verify that each bit marked in |
|
2462 // verification_mark_bm() is also marked in markBitMap(); flag all |
|
2463 // errors by printing corresponding objects. |
|
2464 VerifyMarkedClosure vcl(markBitMap()); |
|
2465 verification_mark_bm()->iterate(&vcl); |
|
2466 assert(!vcl.failed(), "Else verification above should not have succeeded"); |
|
2467 } |
|
2468 |
|
2469 void ConcurrentMarkSweepGeneration::save_marks() { |
|
2470 // delegate to CMS space |
|
2471 cmsSpace()->save_marks(); |
|
2472 } |
|
2473 |
|
2474 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { |
|
2475 return cmsSpace()->no_allocs_since_save_marks(); |
|
2476 } |
|
2477 |
|
2478 void |
|
2479 ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) { |
|
2480 if (freelistLock()->owned_by_self()) { |
|
2481 Generation::oop_iterate(cl); |
|
2482 } else { |
|
2483 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
2484 Generation::oop_iterate(cl); |
|
2485 } |
|
2486 } |
|
2487 |
|
2488 void |
|
2489 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { |
|
2490 if (freelistLock()->owned_by_self()) { |
|
2491 Generation::object_iterate(cl); |
|
2492 } else { |
|
2493 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
2494 Generation::object_iterate(cl); |
|
2495 } |
|
2496 } |
|
2497 |
|
2498 void |
|
2499 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { |
|
2500 if (freelistLock()->owned_by_self()) { |
|
2501 Generation::safe_object_iterate(cl); |
|
2502 } else { |
|
2503 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
2504 Generation::safe_object_iterate(cl); |
|
2505 } |
|
2506 } |
|
2507 |
|
2508 void |
|
2509 ConcurrentMarkSweepGeneration::post_compact() { |
|
2510 } |
|
2511 |
|
2512 void |
|
2513 ConcurrentMarkSweepGeneration::prepare_for_verify() { |
|
2514 // Fix the linear allocation blocks to look like free blocks. |
|
2515 |
|
2516 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those |
|
2517 // are not called when the heap is verified during universe initialization and |
|
2518 // at vm shutdown. |
|
2519 if (freelistLock()->owned_by_self()) { |
|
2520 cmsSpace()->prepare_for_verify(); |
|
2521 } else { |
|
2522 MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
2523 cmsSpace()->prepare_for_verify(); |
|
2524 } |
|
2525 } |
|
2526 |
|
2527 void |
|
2528 ConcurrentMarkSweepGeneration::verify() { |
|
2529 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those |
|
2530 // are not called when the heap is verified during universe initialization and |
|
2531 // at vm shutdown. |
|
2532 if (freelistLock()->owned_by_self()) { |
|
2533 cmsSpace()->verify(); |
|
2534 } else { |
|
2535 MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
2536 cmsSpace()->verify(); |
|
2537 } |
|
2538 } |
|
2539 |
|
2540 void CMSCollector::verify() { |
|
2541 _cmsGen->verify(); |
|
2542 } |
|
2543 |
|
2544 #ifndef PRODUCT |
|
2545 bool CMSCollector::overflow_list_is_empty() const { |
|
2546 assert(_num_par_pushes >= 0, "Inconsistency"); |
|
2547 if (_overflow_list == NULL) { |
|
2548 assert(_num_par_pushes == 0, "Inconsistency"); |
|
2549 } |
|
2550 return _overflow_list == NULL; |
|
2551 } |
|
2552 |
|
2553 // The methods verify_work_stacks_empty() and verify_overflow_empty() |
|
2554 // merely consolidate assertion checks that appear to occur together frequently. |
|
2555 void CMSCollector::verify_work_stacks_empty() const { |
|
2556 assert(_markStack.isEmpty(), "Marking stack should be empty"); |
|
2557 assert(overflow_list_is_empty(), "Overflow list should be empty"); |
|
2558 } |
|
2559 |
|
2560 void CMSCollector::verify_overflow_empty() const { |
|
2561 assert(overflow_list_is_empty(), "Overflow list should be empty"); |
|
2562 assert(no_preserved_marks(), "No preserved marks"); |
|
2563 } |
|
2564 #endif // PRODUCT |
|
2565 |
|
2566 // Decide if we want to enable class unloading as part of the |
|
2567 // ensuing concurrent GC cycle. We will collect and |
|
2568 // unload classes if it's the case that: |
|
2569 // (a) class unloading is enabled at the command line, and |
|
2570 // (b) old gen is getting really full |
|
2571 // NOTE: Provided there is no change in the state of the heap between |
|
2572 // calls to this method, it should have idempotent results. Moreover, |
|
2573 // its results should be monotonically increasing (i.e. going from 0 to 1, |
|
2574 // but not 1 to 0) between successive calls between which the heap was |
|
2575 // not collected. For the implementation below, it must thus rely on |
|
2576 // the property that concurrent_cycles_since_last_unload() |
|
2577 // will not decrease unless a collection cycle happened and that |
|
2578 // _cmsGen->is_too_full() are |
|
2579 // themselves also monotonic in that sense. See check_monotonicity() |
|
2580 // below. |
|
2581 void CMSCollector::update_should_unload_classes() { |
|
2582 _should_unload_classes = false; |
|
2583 if (CMSClassUnloadingEnabled) { |
|
2584 _should_unload_classes = (concurrent_cycles_since_last_unload() >= |
|
2585 CMSClassUnloadingMaxInterval) |
|
2586 || _cmsGen->is_too_full(); |
|
2587 } |
|
2588 } |
|
2589 |
|
2590 bool ConcurrentMarkSweepGeneration::is_too_full() const { |
|
2591 bool res = should_concurrent_collect(); |
|
2592 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); |
|
2593 return res; |
|
2594 } |
|
2595 |
|
2596 void CMSCollector::setup_cms_unloading_and_verification_state() { |
|
2597 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC |
|
2598 || VerifyBeforeExit; |
|
2599 const int rso = GenCollectedHeap::SO_AllCodeCache; |
|
2600 |
|
2601 // We set the proper root for this CMS cycle here. |
|
2602 if (should_unload_classes()) { // Should unload classes this cycle |
|
2603 remove_root_scanning_option(rso); // Shrink the root set appropriately |
|
2604 set_verifying(should_verify); // Set verification state for this cycle |
|
2605 return; // Nothing else needs to be done at this time |
|
2606 } |
|
2607 |
|
2608 // Not unloading classes this cycle |
|
2609 assert(!should_unload_classes(), "Inconsistency!"); |
|
2610 |
|
2611 // If we are not unloading classes then add SO_AllCodeCache to root |
|
2612 // scanning options. |
|
2613 add_root_scanning_option(rso); |
|
2614 |
|
2615 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { |
|
2616 set_verifying(true); |
|
2617 } else if (verifying() && !should_verify) { |
|
2618 // We were verifying, but some verification flags got disabled. |
|
2619 set_verifying(false); |
|
2620 // Exclude symbols, strings and code cache elements from root scanning to |
|
2621 // reduce IM and RM pauses. |
|
2622 remove_root_scanning_option(rso); |
|
2623 } |
|
2624 } |
|
2625 |
|
2626 |
|
2627 #ifndef PRODUCT |
|
2628 HeapWord* CMSCollector::block_start(const void* p) const { |
|
2629 const HeapWord* addr = (HeapWord*)p; |
|
2630 if (_span.contains(p)) { |
|
2631 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) { |
|
2632 return _cmsGen->cmsSpace()->block_start(p); |
|
2633 } |
|
2634 } |
|
2635 return NULL; |
|
2636 } |
|
2637 #endif |
|
2638 |
|
2639 HeapWord* |
|
2640 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size, |
|
2641 bool tlab, |
|
2642 bool parallel) { |
|
2643 CMSSynchronousYieldRequest yr; |
|
2644 assert(!tlab, "Can't deal with TLAB allocation"); |
|
2645 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
2646 expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation); |
|
2647 if (GCExpandToAllocateDelayMillis > 0) { |
|
2648 os::naked_sleep(GCExpandToAllocateDelayMillis); |
|
2649 } |
|
2650 return have_lock_and_allocate(word_size, tlab); |
|
2651 } |
|
2652 |
|
2653 void ConcurrentMarkSweepGeneration::expand_for_gc_cause( |
|
2654 size_t bytes, |
|
2655 size_t expand_bytes, |
|
2656 CMSExpansionCause::Cause cause) |
|
2657 { |
|
2658 |
|
2659 bool success = expand(bytes, expand_bytes); |
|
2660 |
|
2661 // remember why we expanded; this information is used |
|
2662 // by shouldConcurrentCollect() when making decisions on whether to start |
|
2663 // a new CMS cycle. |
|
2664 if (success) { |
|
2665 set_expansion_cause(cause); |
|
2666 log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause)); |
|
2667 } |
|
2668 } |
|
2669 |
|
2670 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { |
|
2671 HeapWord* res = NULL; |
|
2672 MutexLocker x(ParGCRareEvent_lock); |
|
2673 while (true) { |
|
2674 // Expansion by some other thread might make alloc OK now: |
|
2675 res = ps->lab.alloc(word_sz); |
|
2676 if (res != NULL) return res; |
|
2677 // If there's not enough expansion space available, give up. |
|
2678 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { |
|
2679 return NULL; |
|
2680 } |
|
2681 // Otherwise, we try expansion. |
|
2682 expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab); |
|
2683 // Now go around the loop and try alloc again; |
|
2684 // A competing par_promote might beat us to the expansion space, |
|
2685 // so we may go around the loop again if promotion fails again. |
|
2686 if (GCExpandToAllocateDelayMillis > 0) { |
|
2687 os::naked_sleep(GCExpandToAllocateDelayMillis); |
|
2688 } |
|
2689 } |
|
2690 } |
|
2691 |
|
2692 |
|
2693 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( |
|
2694 PromotionInfo* promo) { |
|
2695 MutexLocker x(ParGCRareEvent_lock); |
|
2696 size_t refill_size_bytes = promo->refillSize() * HeapWordSize; |
|
2697 while (true) { |
|
2698 // Expansion by some other thread might make alloc OK now: |
|
2699 if (promo->ensure_spooling_space()) { |
|
2700 assert(promo->has_spooling_space(), |
|
2701 "Post-condition of successful ensure_spooling_space()"); |
|
2702 return true; |
|
2703 } |
|
2704 // If there's not enough expansion space available, give up. |
|
2705 if (_virtual_space.uncommitted_size() < refill_size_bytes) { |
|
2706 return false; |
|
2707 } |
|
2708 // Otherwise, we try expansion. |
|
2709 expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space); |
|
2710 // Now go around the loop and try alloc again; |
|
2711 // A competing allocation might beat us to the expansion space, |
|
2712 // so we may go around the loop again if allocation fails again. |
|
2713 if (GCExpandToAllocateDelayMillis > 0) { |
|
2714 os::naked_sleep(GCExpandToAllocateDelayMillis); |
|
2715 } |
|
2716 } |
|
2717 } |
|
2718 |
|
2719 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { |
|
2720 // Only shrink if a compaction was done so that all the free space |
|
2721 // in the generation is in a contiguous block at the end. |
|
2722 if (did_compact()) { |
|
2723 CardGeneration::shrink(bytes); |
|
2724 } |
|
2725 } |
|
2726 |
|
2727 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() { |
|
2728 assert_locked_or_safepoint(Heap_lock); |
|
2729 } |
|
2730 |
|
2731 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { |
|
2732 assert_locked_or_safepoint(Heap_lock); |
|
2733 assert_lock_strong(freelistLock()); |
|
2734 log_trace(gc)("Shrinking of CMS not yet implemented"); |
|
2735 return; |
|
2736 } |
|
2737 |
|
2738 |
|
2739 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent |
|
2740 // phases. |
|
2741 class CMSPhaseAccounting: public StackObj { |
|
2742 public: |
|
2743 CMSPhaseAccounting(CMSCollector *collector, |
|
2744 const char *title); |
|
2745 ~CMSPhaseAccounting(); |
|
2746 |
|
2747 private: |
|
2748 CMSCollector *_collector; |
|
2749 const char *_title; |
|
2750 GCTraceConcTime(Info, gc) _trace_time; |
|
2751 |
|
2752 public: |
|
2753 // Not MT-safe; so do not pass around these StackObj's |
|
2754 // where they may be accessed by other threads. |
|
2755 double wallclock_millis() { |
|
2756 return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time()); |
|
2757 } |
|
2758 }; |
|
2759 |
|
2760 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, |
|
2761 const char *title) : |
|
2762 _collector(collector), _title(title), _trace_time(title) { |
|
2763 |
|
2764 _collector->resetYields(); |
|
2765 _collector->resetTimer(); |
|
2766 _collector->startTimer(); |
|
2767 _collector->gc_timer_cm()->register_gc_concurrent_start(title); |
|
2768 } |
|
2769 |
|
2770 CMSPhaseAccounting::~CMSPhaseAccounting() { |
|
2771 _collector->gc_timer_cm()->register_gc_concurrent_end(); |
|
2772 _collector->stopTimer(); |
|
2773 log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_millis(_collector->timerTicks())); |
|
2774 log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields()); |
|
2775 } |
|
2776 |
|
2777 // CMS work |
|
2778 |
|
2779 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask. |
|
2780 class CMSParMarkTask : public AbstractGangTask { |
|
2781 protected: |
|
2782 CMSCollector* _collector; |
|
2783 uint _n_workers; |
|
2784 CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) : |
|
2785 AbstractGangTask(name), |
|
2786 _collector(collector), |
|
2787 _n_workers(n_workers) {} |
|
2788 // Work method in support of parallel rescan ... of young gen spaces |
|
2789 void do_young_space_rescan(OopsInGenClosure* cl, |
|
2790 ContiguousSpace* space, |
|
2791 HeapWord** chunk_array, size_t chunk_top); |
|
2792 void work_on_young_gen_roots(OopsInGenClosure* cl); |
|
2793 }; |
|
2794 |
|
2795 // Parallel initial mark task |
|
2796 class CMSParInitialMarkTask: public CMSParMarkTask { |
|
2797 StrongRootsScope* _strong_roots_scope; |
|
2798 public: |
|
2799 CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) : |
|
2800 CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers), |
|
2801 _strong_roots_scope(strong_roots_scope) {} |
|
2802 void work(uint worker_id); |
|
2803 }; |
|
2804 |
|
2805 // Checkpoint the roots into this generation from outside |
|
2806 // this generation. [Note this initial checkpoint need only |
|
2807 // be approximate -- we'll do a catch up phase subsequently.] |
|
2808 void CMSCollector::checkpointRootsInitial() { |
|
2809 assert(_collectorState == InitialMarking, "Wrong collector state"); |
|
2810 check_correct_thread_executing(); |
|
2811 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); |
|
2812 |
|
2813 save_heap_summary(); |
|
2814 report_heap_summary(GCWhen::BeforeGC); |
|
2815 |
|
2816 ReferenceProcessor* rp = ref_processor(); |
|
2817 assert(_restart_addr == NULL, "Control point invariant"); |
|
2818 { |
|
2819 // acquire locks for subsequent manipulations |
|
2820 MutexLocker x(bitMapLock(), |
|
2821 Mutex::_no_safepoint_check_flag); |
|
2822 checkpointRootsInitialWork(); |
|
2823 // enable ("weak") refs discovery |
|
2824 rp->enable_discovery(); |
|
2825 _collectorState = Marking; |
|
2826 } |
|
2827 |
|
2828 _cmsGen->cmsSpace()->recalculate_used_stable(); |
|
2829 } |
|
2830 |
|
2831 void CMSCollector::checkpointRootsInitialWork() { |
|
2832 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); |
|
2833 assert(_collectorState == InitialMarking, "just checking"); |
|
2834 |
|
2835 // Already have locks. |
|
2836 assert_lock_strong(bitMapLock()); |
|
2837 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); |
|
2838 |
|
2839 // Setup the verification and class unloading state for this |
|
2840 // CMS collection cycle. |
|
2841 setup_cms_unloading_and_verification_state(); |
|
2842 |
|
2843 GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm); |
|
2844 |
|
2845 // Reset all the PLAB chunk arrays if necessary. |
|
2846 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { |
|
2847 reset_survivor_plab_arrays(); |
|
2848 } |
|
2849 |
|
2850 ResourceMark rm; |
|
2851 HandleMark hm; |
|
2852 |
|
2853 MarkRefsIntoClosure notOlder(_span, &_markBitMap); |
|
2854 CMSHeap* heap = CMSHeap::heap(); |
|
2855 |
|
2856 verify_work_stacks_empty(); |
|
2857 verify_overflow_empty(); |
|
2858 |
|
2859 heap->ensure_parsability(false); // fill TLABs, but no need to retire them |
|
2860 // Update the saved marks which may affect the root scans. |
|
2861 heap->save_marks(); |
|
2862 |
|
2863 // weak reference processing has not started yet. |
|
2864 ref_processor()->set_enqueuing_is_done(false); |
|
2865 |
|
2866 // Need to remember all newly created CLDs, |
|
2867 // so that we can guarantee that the remark finds them. |
|
2868 ClassLoaderDataGraph::remember_new_clds(true); |
|
2869 |
|
2870 // Whenever a CLD is found, it will be claimed before proceeding to mark |
|
2871 // the klasses. The claimed marks need to be cleared before marking starts. |
|
2872 ClassLoaderDataGraph::clear_claimed_marks(); |
|
2873 |
|
2874 print_eden_and_survivor_chunk_arrays(); |
|
2875 |
|
2876 { |
|
2877 #if COMPILER2_OR_JVMCI |
|
2878 DerivedPointerTableDeactivate dpt_deact; |
|
2879 #endif |
|
2880 if (CMSParallelInitialMarkEnabled) { |
|
2881 // The parallel version. |
|
2882 WorkGang* workers = heap->workers(); |
|
2883 assert(workers != NULL, "Need parallel worker threads."); |
|
2884 uint n_workers = workers->active_workers(); |
|
2885 |
|
2886 StrongRootsScope srs(n_workers); |
|
2887 |
|
2888 CMSParInitialMarkTask tsk(this, &srs, n_workers); |
|
2889 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); |
|
2890 // If the total workers is greater than 1, then multiple workers |
|
2891 // may be used at some time and the initialization has been set |
|
2892 // such that the single threaded path cannot be used. |
|
2893 if (workers->total_workers() > 1) { |
|
2894 workers->run_task(&tsk); |
|
2895 } else { |
|
2896 tsk.work(0); |
|
2897 } |
|
2898 } else { |
|
2899 // The serial version. |
|
2900 CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong); |
|
2901 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
|
2902 |
|
2903 StrongRootsScope srs(1); |
|
2904 |
|
2905 heap->cms_process_roots(&srs, |
|
2906 true, // young gen as roots |
|
2907 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
|
2908 should_unload_classes(), |
|
2909 ¬Older, |
|
2910 &cld_closure); |
|
2911 } |
|
2912 } |
|
2913 |
|
2914 // Clear mod-union table; it will be dirtied in the prologue of |
|
2915 // CMS generation per each young generation collection. |
|
2916 |
|
2917 assert(_modUnionTable.isAllClear(), |
|
2918 "Was cleared in most recent final checkpoint phase" |
|
2919 " or no bits are set in the gc_prologue before the start of the next " |
|
2920 "subsequent marking phase."); |
|
2921 |
|
2922 assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be"); |
|
2923 |
|
2924 // Save the end of the used_region of the constituent generations |
|
2925 // to be used to limit the extent of sweep in each generation. |
|
2926 save_sweep_limits(); |
|
2927 verify_overflow_empty(); |
|
2928 } |
|
2929 |
|
2930 bool CMSCollector::markFromRoots() { |
|
2931 // we might be tempted to assert that: |
|
2932 // assert(!SafepointSynchronize::is_at_safepoint(), |
|
2933 // "inconsistent argument?"); |
|
2934 // However that wouldn't be right, because it's possible that |
|
2935 // a safepoint is indeed in progress as a young generation |
|
2936 // stop-the-world GC happens even as we mark in this generation. |
|
2937 assert(_collectorState == Marking, "inconsistent state?"); |
|
2938 check_correct_thread_executing(); |
|
2939 verify_overflow_empty(); |
|
2940 |
|
2941 // Weak ref discovery note: We may be discovering weak |
|
2942 // refs in this generation concurrent (but interleaved) with |
|
2943 // weak ref discovery by the young generation collector. |
|
2944 |
|
2945 CMSTokenSyncWithLocks ts(true, bitMapLock()); |
|
2946 GCTraceCPUTime tcpu; |
|
2947 CMSPhaseAccounting pa(this, "Concurrent Mark"); |
|
2948 bool res = markFromRootsWork(); |
|
2949 if (res) { |
|
2950 _collectorState = Precleaning; |
|
2951 } else { // We failed and a foreground collection wants to take over |
|
2952 assert(_foregroundGCIsActive, "internal state inconsistency"); |
|
2953 assert(_restart_addr == NULL, "foreground will restart from scratch"); |
|
2954 log_debug(gc)("bailing out to foreground collection"); |
|
2955 } |
|
2956 verify_overflow_empty(); |
|
2957 return res; |
|
2958 } |
|
2959 |
|
2960 bool CMSCollector::markFromRootsWork() { |
|
2961 // iterate over marked bits in bit map, doing a full scan and mark |
|
2962 // from these roots using the following algorithm: |
|
2963 // . if oop is to the right of the current scan pointer, |
|
2964 // mark corresponding bit (we'll process it later) |
|
2965 // . else (oop is to left of current scan pointer) |
|
2966 // push oop on marking stack |
|
2967 // . drain the marking stack |
|
2968 |
|
2969 // Note that when we do a marking step we need to hold the |
|
2970 // bit map lock -- recall that direct allocation (by mutators) |
|
2971 // and promotion (by the young generation collector) is also |
|
2972 // marking the bit map. [the so-called allocate live policy.] |
|
2973 // Because the implementation of bit map marking is not |
|
2974 // robust wrt simultaneous marking of bits in the same word, |
|
2975 // we need to make sure that there is no such interference |
|
2976 // between concurrent such updates. |
|
2977 |
|
2978 // already have locks |
|
2979 assert_lock_strong(bitMapLock()); |
|
2980 |
|
2981 verify_work_stacks_empty(); |
|
2982 verify_overflow_empty(); |
|
2983 bool result = false; |
|
2984 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { |
|
2985 result = do_marking_mt(); |
|
2986 } else { |
|
2987 result = do_marking_st(); |
|
2988 } |
|
2989 return result; |
|
2990 } |
|
2991 |
|
2992 // Forward decl |
|
2993 class CMSConcMarkingTask; |
|
2994 |
|
2995 class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator { |
|
2996 CMSCollector* _collector; |
|
2997 CMSConcMarkingTask* _task; |
|
2998 public: |
|
2999 virtual void yield(); |
|
3000 |
|
3001 // "n_threads" is the number of threads to be terminated. |
|
3002 // "queue_set" is a set of work queues of other threads. |
|
3003 // "collector" is the CMS collector associated with this task terminator. |
|
3004 // "yield" indicates whether we need the gang as a whole to yield. |
|
3005 CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : |
|
3006 ParallelTaskTerminator(n_threads, queue_set), |
|
3007 _collector(collector) { } |
|
3008 |
|
3009 void set_task(CMSConcMarkingTask* task) { |
|
3010 _task = task; |
|
3011 } |
|
3012 }; |
|
3013 |
|
3014 class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator { |
|
3015 CMSCollector* _collector; |
|
3016 CMSConcMarkingTask* _task; |
|
3017 public: |
|
3018 virtual void yield(); |
|
3019 |
|
3020 // "n_threads" is the number of threads to be terminated. |
|
3021 // "queue_set" is a set of work queues of other threads. |
|
3022 // "collector" is the CMS collector associated with this task terminator. |
|
3023 // "yield" indicates whether we need the gang as a whole to yield. |
|
3024 CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : |
|
3025 OWSTTaskTerminator(n_threads, queue_set), |
|
3026 _collector(collector) { } |
|
3027 |
|
3028 void set_task(CMSConcMarkingTask* task) { |
|
3029 _task = task; |
|
3030 } |
|
3031 }; |
|
3032 |
|
3033 class CMSConcMarkingTaskTerminator { |
|
3034 private: |
|
3035 ParallelTaskTerminator* _term; |
|
3036 public: |
|
3037 CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) { |
|
3038 if (UseOWSTTaskTerminator) { |
|
3039 _term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector); |
|
3040 } else { |
|
3041 _term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector); |
|
3042 } |
|
3043 } |
|
3044 ~CMSConcMarkingTaskTerminator() { |
|
3045 assert(_term != NULL, "Must not be NULL"); |
|
3046 delete _term; |
|
3047 } |
|
3048 |
|
3049 void set_task(CMSConcMarkingTask* task); |
|
3050 ParallelTaskTerminator* terminator() const { return _term; } |
|
3051 }; |
|
3052 |
|
3053 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator { |
|
3054 CMSConcMarkingTask* _task; |
|
3055 public: |
|
3056 bool should_exit_termination(); |
|
3057 void set_task(CMSConcMarkingTask* task) { |
|
3058 _task = task; |
|
3059 } |
|
3060 }; |
|
3061 |
|
3062 // MT Concurrent Marking Task |
|
3063 class CMSConcMarkingTask: public YieldingFlexibleGangTask { |
|
3064 CMSCollector* _collector; |
|
3065 uint _n_workers; // requested/desired # workers |
|
3066 bool _result; |
|
3067 CompactibleFreeListSpace* _cms_space; |
|
3068 char _pad_front[64]; // padding to ... |
|
3069 HeapWord* volatile _global_finger; // ... avoid sharing cache line |
|
3070 char _pad_back[64]; |
|
3071 HeapWord* _restart_addr; |
|
3072 |
|
3073 // Exposed here for yielding support |
|
3074 Mutex* const _bit_map_lock; |
|
3075 |
|
3076 // The per thread work queues, available here for stealing |
|
3077 OopTaskQueueSet* _task_queues; |
|
3078 |
|
3079 // Termination (and yielding) support |
|
3080 CMSConcMarkingTaskTerminator _term; |
|
3081 CMSConcMarkingTerminatorTerminator _term_term; |
|
3082 |
|
3083 public: |
|
3084 CMSConcMarkingTask(CMSCollector* collector, |
|
3085 CompactibleFreeListSpace* cms_space, |
|
3086 YieldingFlexibleWorkGang* workers, |
|
3087 OopTaskQueueSet* task_queues): |
|
3088 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), |
|
3089 _collector(collector), |
|
3090 _n_workers(0), |
|
3091 _result(true), |
|
3092 _cms_space(cms_space), |
|
3093 _bit_map_lock(collector->bitMapLock()), |
|
3094 _task_queues(task_queues), |
|
3095 _term(_n_workers, task_queues, _collector) |
|
3096 { |
|
3097 _requested_size = _n_workers; |
|
3098 _term.set_task(this); |
|
3099 _term_term.set_task(this); |
|
3100 _restart_addr = _global_finger = _cms_space->bottom(); |
|
3101 } |
|
3102 |
|
3103 |
|
3104 OopTaskQueueSet* task_queues() { return _task_queues; } |
|
3105 |
|
3106 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } |
|
3107 |
|
3108 HeapWord* volatile* global_finger_addr() { return &_global_finger; } |
|
3109 |
|
3110 ParallelTaskTerminator* terminator() { return _term.terminator(); } |
|
3111 |
|
3112 virtual void set_for_termination(uint active_workers) { |
|
3113 terminator()->reset_for_reuse(active_workers); |
|
3114 } |
|
3115 |
|
3116 void work(uint worker_id); |
|
3117 bool should_yield() { |
|
3118 return ConcurrentMarkSweepThread::should_yield() |
|
3119 && !_collector->foregroundGCIsActive(); |
|
3120 } |
|
3121 |
|
3122 virtual void coordinator_yield(); // stuff done by coordinator |
|
3123 bool result() { return _result; } |
|
3124 |
|
3125 void reset(HeapWord* ra) { |
|
3126 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); |
|
3127 _restart_addr = _global_finger = ra; |
|
3128 _term.terminator()->reset_for_reuse(); |
|
3129 } |
|
3130 |
|
3131 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, |
|
3132 OopTaskQueue* work_q); |
|
3133 |
|
3134 private: |
|
3135 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp); |
|
3136 void do_work_steal(int i); |
|
3137 void bump_global_finger(HeapWord* f); |
|
3138 }; |
|
3139 |
|
3140 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() { |
|
3141 assert(_task != NULL, "Error"); |
|
3142 return _task->yielding(); |
|
3143 // Note that we do not need the disjunct || _task->should_yield() above |
|
3144 // because we want terminating threads to yield only if the task |
|
3145 // is already in the midst of yielding, which happens only after at least one |
|
3146 // thread has yielded. |
|
3147 } |
|
3148 |
|
3149 void CMSConcMarkingParallelTerminator::yield() { |
|
3150 if (_task->should_yield()) { |
|
3151 _task->yield(); |
|
3152 } else { |
|
3153 ParallelTaskTerminator::yield(); |
|
3154 } |
|
3155 } |
|
3156 |
|
3157 void CMSConcMarkingOWSTTerminator::yield() { |
|
3158 if (_task->should_yield()) { |
|
3159 _task->yield(); |
|
3160 } else { |
|
3161 OWSTTaskTerminator::yield(); |
|
3162 } |
|
3163 } |
|
3164 |
|
3165 void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) { |
|
3166 if (UseOWSTTaskTerminator) { |
|
3167 ((CMSConcMarkingOWSTTerminator*)_term)->set_task(task); |
|
3168 } else { |
|
3169 ((CMSConcMarkingParallelTerminator*)_term)->set_task(task); |
|
3170 } |
|
3171 } |
|
3172 |
|
3173 //////////////////////////////////////////////////////////////// |
|
3174 // Concurrent Marking Algorithm Sketch |
|
3175 //////////////////////////////////////////////////////////////// |
|
3176 // Until all tasks exhausted (both spaces): |
|
3177 // -- claim next available chunk |
|
3178 // -- bump global finger via CAS |
|
3179 // -- find first object that starts in this chunk |
|
3180 // and start scanning bitmap from that position |
|
3181 // -- scan marked objects for oops |
|
3182 // -- CAS-mark target, and if successful: |
|
3183 // . if target oop is above global finger (volatile read) |
|
3184 // nothing to do |
|
3185 // . if target oop is in chunk and above local finger |
|
3186 // then nothing to do |
|
3187 // . else push on work-queue |
|
3188 // -- Deal with possible overflow issues: |
|
3189 // . local work-queue overflow causes stuff to be pushed on |
|
3190 // global (common) overflow queue |
|
3191 // . always first empty local work queue |
|
3192 // . then get a batch of oops from global work queue if any |
|
3193 // . then do work stealing |
|
3194 // -- When all tasks claimed (both spaces) |
|
3195 // and local work queue empty, |
|
3196 // then in a loop do: |
|
3197 // . check global overflow stack; steal a batch of oops and trace |
|
3198 // . try to steal from other threads oif GOS is empty |
|
3199 // . if neither is available, offer termination |
|
3200 // -- Terminate and return result |
|
3201 // |
|
3202 void CMSConcMarkingTask::work(uint worker_id) { |
|
3203 elapsedTimer _timer; |
|
3204 ResourceMark rm; |
|
3205 HandleMark hm; |
|
3206 |
|
3207 DEBUG_ONLY(_collector->verify_overflow_empty();) |
|
3208 |
|
3209 // Before we begin work, our work queue should be empty |
|
3210 assert(work_queue(worker_id)->size() == 0, "Expected to be empty"); |
|
3211 // Scan the bitmap covering _cms_space, tracing through grey objects. |
|
3212 _timer.start(); |
|
3213 do_scan_and_mark(worker_id, _cms_space); |
|
3214 _timer.stop(); |
|
3215 log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
3216 |
|
3217 // ... do work stealing |
|
3218 _timer.reset(); |
|
3219 _timer.start(); |
|
3220 do_work_steal(worker_id); |
|
3221 _timer.stop(); |
|
3222 log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
3223 assert(_collector->_markStack.isEmpty(), "Should have been emptied"); |
|
3224 assert(work_queue(worker_id)->size() == 0, "Should have been emptied"); |
|
3225 // Note that under the current task protocol, the |
|
3226 // following assertion is true even of the spaces |
|
3227 // expanded since the completion of the concurrent |
|
3228 // marking. XXX This will likely change under a strict |
|
3229 // ABORT semantics. |
|
3230 // After perm removal the comparison was changed to |
|
3231 // greater than or equal to from strictly greater than. |
|
3232 // Before perm removal the highest address sweep would |
|
3233 // have been at the end of perm gen but now is at the |
|
3234 // end of the tenured gen. |
|
3235 assert(_global_finger >= _cms_space->end(), |
|
3236 "All tasks have been completed"); |
|
3237 DEBUG_ONLY(_collector->verify_overflow_empty();) |
|
3238 } |
|
3239 |
|
3240 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) { |
|
3241 HeapWord* read = _global_finger; |
|
3242 HeapWord* cur = read; |
|
3243 while (f > read) { |
|
3244 cur = read; |
|
3245 read = Atomic::cmpxchg(f, &_global_finger, cur); |
|
3246 if (cur == read) { |
|
3247 // our cas succeeded |
|
3248 assert(_global_finger >= f, "protocol consistency"); |
|
3249 break; |
|
3250 } |
|
3251 } |
|
3252 } |
|
3253 |
|
3254 // This is really inefficient, and should be redone by |
|
3255 // using (not yet available) block-read and -write interfaces to the |
|
3256 // stack and the work_queue. XXX FIX ME !!! |
|
3257 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, |
|
3258 OopTaskQueue* work_q) { |
|
3259 // Fast lock-free check |
|
3260 if (ovflw_stk->length() == 0) { |
|
3261 return false; |
|
3262 } |
|
3263 assert(work_q->size() == 0, "Shouldn't steal"); |
|
3264 MutexLocker ml(ovflw_stk->par_lock(), |
|
3265 Mutex::_no_safepoint_check_flag); |
|
3266 // Grab up to 1/4 the size of the work queue |
|
3267 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
|
3268 (size_t)ParGCDesiredObjsFromOverflowList); |
|
3269 num = MIN2(num, ovflw_stk->length()); |
|
3270 for (int i = (int) num; i > 0; i--) { |
|
3271 oop cur = ovflw_stk->pop(); |
|
3272 assert(cur != NULL, "Counted wrong?"); |
|
3273 work_q->push(cur); |
|
3274 } |
|
3275 return num > 0; |
|
3276 } |
|
3277 |
|
3278 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) { |
|
3279 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); |
|
3280 int n_tasks = pst->n_tasks(); |
|
3281 // We allow that there may be no tasks to do here because |
|
3282 // we are restarting after a stack overflow. |
|
3283 assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); |
|
3284 uint nth_task = 0; |
|
3285 |
|
3286 HeapWord* aligned_start = sp->bottom(); |
|
3287 if (sp->used_region().contains(_restart_addr)) { |
|
3288 // Align down to a card boundary for the start of 0th task |
|
3289 // for this space. |
|
3290 aligned_start = align_down(_restart_addr, CardTable::card_size); |
|
3291 } |
|
3292 |
|
3293 size_t chunk_size = sp->marking_task_size(); |
|
3294 while (pst->try_claim_task(/* reference */ nth_task)) { |
|
3295 // Having claimed the nth task in this space, |
|
3296 // compute the chunk that it corresponds to: |
|
3297 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size, |
|
3298 aligned_start + (nth_task+1)*chunk_size); |
|
3299 // Try and bump the global finger via a CAS; |
|
3300 // note that we need to do the global finger bump |
|
3301 // _before_ taking the intersection below, because |
|
3302 // the task corresponding to that region will be |
|
3303 // deemed done even if the used_region() expands |
|
3304 // because of allocation -- as it almost certainly will |
|
3305 // during start-up while the threads yield in the |
|
3306 // closure below. |
|
3307 HeapWord* finger = span.end(); |
|
3308 bump_global_finger(finger); // atomically |
|
3309 // There are null tasks here corresponding to chunks |
|
3310 // beyond the "top" address of the space. |
|
3311 span = span.intersection(sp->used_region()); |
|
3312 if (!span.is_empty()) { // Non-null task |
|
3313 HeapWord* prev_obj; |
|
3314 assert(!span.contains(_restart_addr) || nth_task == 0, |
|
3315 "Inconsistency"); |
|
3316 if (nth_task == 0) { |
|
3317 // For the 0th task, we'll not need to compute a block_start. |
|
3318 if (span.contains(_restart_addr)) { |
|
3319 // In the case of a restart because of stack overflow, |
|
3320 // we might additionally skip a chunk prefix. |
|
3321 prev_obj = _restart_addr; |
|
3322 } else { |
|
3323 prev_obj = span.start(); |
|
3324 } |
|
3325 } else { |
|
3326 // We want to skip the first object because |
|
3327 // the protocol is to scan any object in its entirety |
|
3328 // that _starts_ in this span; a fortiori, any |
|
3329 // object starting in an earlier span is scanned |
|
3330 // as part of an earlier claimed task. |
|
3331 // Below we use the "careful" version of block_start |
|
3332 // so we do not try to navigate uninitialized objects. |
|
3333 prev_obj = sp->block_start_careful(span.start()); |
|
3334 // Below we use a variant of block_size that uses the |
|
3335 // Printezis bits to avoid waiting for allocated |
|
3336 // objects to become initialized/parsable. |
|
3337 while (prev_obj < span.start()) { |
|
3338 size_t sz = sp->block_size_no_stall(prev_obj, _collector); |
|
3339 if (sz > 0) { |
|
3340 prev_obj += sz; |
|
3341 } else { |
|
3342 // In this case we may end up doing a bit of redundant |
|
3343 // scanning, but that appears unavoidable, short of |
|
3344 // locking the free list locks; see bug 6324141. |
|
3345 break; |
|
3346 } |
|
3347 } |
|
3348 } |
|
3349 if (prev_obj < span.end()) { |
|
3350 MemRegion my_span = MemRegion(prev_obj, span.end()); |
|
3351 // Do the marking work within a non-empty span -- |
|
3352 // the last argument to the constructor indicates whether the |
|
3353 // iteration should be incremental with periodic yields. |
|
3354 ParMarkFromRootsClosure cl(this, _collector, my_span, |
|
3355 &_collector->_markBitMap, |
|
3356 work_queue(i), |
|
3357 &_collector->_markStack); |
|
3358 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); |
|
3359 } // else nothing to do for this task |
|
3360 } // else nothing to do for this task |
|
3361 } |
|
3362 // We'd be tempted to assert here that since there are no |
|
3363 // more tasks left to claim in this space, the global_finger |
|
3364 // must exceed space->top() and a fortiori space->end(). However, |
|
3365 // that would not quite be correct because the bumping of |
|
3366 // global_finger occurs strictly after the claiming of a task, |
|
3367 // so by the time we reach here the global finger may not yet |
|
3368 // have been bumped up by the thread that claimed the last |
|
3369 // task. |
|
3370 pst->all_tasks_completed(); |
|
3371 } |
|
3372 |
|
3373 class ParConcMarkingClosure: public MetadataVisitingOopIterateClosure { |
|
3374 private: |
|
3375 CMSCollector* _collector; |
|
3376 CMSConcMarkingTask* _task; |
|
3377 MemRegion _span; |
|
3378 CMSBitMap* _bit_map; |
|
3379 CMSMarkStack* _overflow_stack; |
|
3380 OopTaskQueue* _work_queue; |
|
3381 protected: |
|
3382 DO_OOP_WORK_DEFN |
|
3383 public: |
|
3384 ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, |
|
3385 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): |
|
3386 MetadataVisitingOopIterateClosure(collector->ref_processor()), |
|
3387 _collector(collector), |
|
3388 _task(task), |
|
3389 _span(collector->_span), |
|
3390 _bit_map(bit_map), |
|
3391 _overflow_stack(overflow_stack), |
|
3392 _work_queue(work_queue) |
|
3393 { } |
|
3394 virtual void do_oop(oop* p); |
|
3395 virtual void do_oop(narrowOop* p); |
|
3396 |
|
3397 void trim_queue(size_t max); |
|
3398 void handle_stack_overflow(HeapWord* lost); |
|
3399 void do_yield_check() { |
|
3400 if (_task->should_yield()) { |
|
3401 _task->yield(); |
|
3402 } |
|
3403 } |
|
3404 }; |
|
3405 |
|
3406 DO_OOP_WORK_IMPL(ParConcMarkingClosure) |
|
3407 |
|
3408 // Grey object scanning during work stealing phase -- |
|
3409 // the salient assumption here is that any references |
|
3410 // that are in these stolen objects being scanned must |
|
3411 // already have been initialized (else they would not have |
|
3412 // been published), so we do not need to check for |
|
3413 // uninitialized objects before pushing here. |
|
3414 void ParConcMarkingClosure::do_oop(oop obj) { |
|
3415 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); |
|
3416 HeapWord* addr = (HeapWord*)obj; |
|
3417 // Check if oop points into the CMS generation |
|
3418 // and is not marked |
|
3419 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { |
|
3420 // a white object ... |
|
3421 // If we manage to "claim" the object, by being the |
|
3422 // first thread to mark it, then we push it on our |
|
3423 // marking stack |
|
3424 if (_bit_map->par_mark(addr)) { // ... now grey |
|
3425 // push on work queue (grey set) |
|
3426 bool simulate_overflow = false; |
|
3427 NOT_PRODUCT( |
|
3428 if (CMSMarkStackOverflowALot && |
|
3429 _collector->simulate_overflow()) { |
|
3430 // simulate a stack overflow |
|
3431 simulate_overflow = true; |
|
3432 } |
|
3433 ) |
|
3434 if (simulate_overflow || |
|
3435 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
|
3436 // stack overflow |
|
3437 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); |
|
3438 // We cannot assert that the overflow stack is full because |
|
3439 // it may have been emptied since. |
|
3440 assert(simulate_overflow || |
|
3441 _work_queue->size() == _work_queue->max_elems(), |
|
3442 "Else push should have succeeded"); |
|
3443 handle_stack_overflow(addr); |
|
3444 } |
|
3445 } // Else, some other thread got there first |
|
3446 do_yield_check(); |
|
3447 } |
|
3448 } |
|
3449 |
|
3450 void ParConcMarkingClosure::trim_queue(size_t max) { |
|
3451 while (_work_queue->size() > max) { |
|
3452 oop new_oop; |
|
3453 if (_work_queue->pop_local(new_oop)) { |
|
3454 assert(oopDesc::is_oop(new_oop), "Should be an oop"); |
|
3455 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); |
|
3456 assert(_span.contains((HeapWord*)new_oop), "Not in span"); |
|
3457 new_oop->oop_iterate(this); // do_oop() above |
|
3458 do_yield_check(); |
|
3459 } |
|
3460 } |
|
3461 } |
|
3462 |
|
3463 // Upon stack overflow, we discard (part of) the stack, |
|
3464 // remembering the least address amongst those discarded |
|
3465 // in CMSCollector's _restart_address. |
|
3466 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { |
|
3467 // We need to do this under a mutex to prevent other |
|
3468 // workers from interfering with the work done below. |
|
3469 MutexLocker ml(_overflow_stack->par_lock(), |
|
3470 Mutex::_no_safepoint_check_flag); |
|
3471 // Remember the least grey address discarded |
|
3472 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); |
|
3473 _collector->lower_restart_addr(ra); |
|
3474 _overflow_stack->reset(); // discard stack contents |
|
3475 _overflow_stack->expand(); // expand the stack if possible |
|
3476 } |
|
3477 |
|
3478 |
|
3479 void CMSConcMarkingTask::do_work_steal(int i) { |
|
3480 OopTaskQueue* work_q = work_queue(i); |
|
3481 oop obj_to_scan; |
|
3482 CMSBitMap* bm = &(_collector->_markBitMap); |
|
3483 CMSMarkStack* ovflw = &(_collector->_markStack); |
|
3484 ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw); |
|
3485 while (true) { |
|
3486 cl.trim_queue(0); |
|
3487 assert(work_q->size() == 0, "Should have been emptied above"); |
|
3488 if (get_work_from_overflow_stack(ovflw, work_q)) { |
|
3489 // Can't assert below because the work obtained from the |
|
3490 // overflow stack may already have been stolen from us. |
|
3491 // assert(work_q->size() > 0, "Work from overflow stack"); |
|
3492 continue; |
|
3493 } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) { |
|
3494 assert(oopDesc::is_oop(obj_to_scan), "Should be an oop"); |
|
3495 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); |
|
3496 obj_to_scan->oop_iterate(&cl); |
|
3497 } else if (terminator()->offer_termination(&_term_term)) { |
|
3498 assert(work_q->size() == 0, "Impossible!"); |
|
3499 break; |
|
3500 } else if (yielding() || should_yield()) { |
|
3501 yield(); |
|
3502 } |
|
3503 } |
|
3504 } |
|
3505 |
|
3506 // This is run by the CMS (coordinator) thread. |
|
3507 void CMSConcMarkingTask::coordinator_yield() { |
|
3508 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
3509 "CMS thread should hold CMS token"); |
|
3510 // First give up the locks, then yield, then re-lock |
|
3511 // We should probably use a constructor/destructor idiom to |
|
3512 // do this unlock/lock or modify the MutexUnlocker class to |
|
3513 // serve our purpose. XXX |
|
3514 assert_lock_strong(_bit_map_lock); |
|
3515 _bit_map_lock->unlock(); |
|
3516 ConcurrentMarkSweepThread::desynchronize(true); |
|
3517 _collector->stopTimer(); |
|
3518 _collector->incrementYields(); |
|
3519 |
|
3520 // It is possible for whichever thread initiated the yield request |
|
3521 // not to get a chance to wake up and take the bitmap lock between |
|
3522 // this thread releasing it and reacquiring it. So, while the |
|
3523 // should_yield() flag is on, let's sleep for a bit to give the |
|
3524 // other thread a chance to wake up. The limit imposed on the number |
|
3525 // of iterations is defensive, to avoid any unforseen circumstances |
|
3526 // putting us into an infinite loop. Since it's always been this |
|
3527 // (coordinator_yield()) method that was observed to cause the |
|
3528 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount) |
|
3529 // which is by default non-zero. For the other seven methods that |
|
3530 // also perform the yield operation, as are using a different |
|
3531 // parameter (CMSYieldSleepCount) which is by default zero. This way we |
|
3532 // can enable the sleeping for those methods too, if necessary. |
|
3533 // See 6442774. |
|
3534 // |
|
3535 // We really need to reconsider the synchronization between the GC |
|
3536 // thread and the yield-requesting threads in the future and we |
|
3537 // should really use wait/notify, which is the recommended |
|
3538 // way of doing this type of interaction. Additionally, we should |
|
3539 // consolidate the eight methods that do the yield operation and they |
|
3540 // are almost identical into one for better maintainability and |
|
3541 // readability. See 6445193. |
|
3542 // |
|
3543 // Tony 2006.06.29 |
|
3544 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && |
|
3545 ConcurrentMarkSweepThread::should_yield() && |
|
3546 !CMSCollector::foregroundGCIsActive(); ++i) { |
|
3547 os::naked_short_sleep(1); |
|
3548 } |
|
3549 |
|
3550 ConcurrentMarkSweepThread::synchronize(true); |
|
3551 _bit_map_lock->lock_without_safepoint_check(); |
|
3552 _collector->startTimer(); |
|
3553 } |
|
3554 |
|
3555 bool CMSCollector::do_marking_mt() { |
|
3556 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); |
|
3557 uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(), |
|
3558 conc_workers()->active_workers(), |
|
3559 Threads::number_of_non_daemon_threads()); |
|
3560 num_workers = conc_workers()->update_active_workers(num_workers); |
|
3561 log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers()); |
|
3562 |
|
3563 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); |
|
3564 |
|
3565 CMSConcMarkingTask tsk(this, |
|
3566 cms_space, |
|
3567 conc_workers(), |
|
3568 task_queues()); |
|
3569 |
|
3570 // Since the actual number of workers we get may be different |
|
3571 // from the number we requested above, do we need to do anything different |
|
3572 // below? In particular, may be we need to subclass the SequantialSubTasksDone |
|
3573 // class?? XXX |
|
3574 cms_space ->initialize_sequential_subtasks_for_marking(num_workers); |
|
3575 |
|
3576 // Refs discovery is already non-atomic. |
|
3577 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); |
|
3578 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT"); |
|
3579 conc_workers()->start_task(&tsk); |
|
3580 while (tsk.yielded()) { |
|
3581 tsk.coordinator_yield(); |
|
3582 conc_workers()->continue_task(&tsk); |
|
3583 } |
|
3584 // If the task was aborted, _restart_addr will be non-NULL |
|
3585 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency"); |
|
3586 while (_restart_addr != NULL) { |
|
3587 // XXX For now we do not make use of ABORTED state and have not |
|
3588 // yet implemented the right abort semantics (even in the original |
|
3589 // single-threaded CMS case). That needs some more investigation |
|
3590 // and is deferred for now; see CR# TBF. 07252005YSR. XXX |
|
3591 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); |
|
3592 // If _restart_addr is non-NULL, a marking stack overflow |
|
3593 // occurred; we need to do a fresh marking iteration from the |
|
3594 // indicated restart address. |
|
3595 if (_foregroundGCIsActive) { |
|
3596 // We may be running into repeated stack overflows, having |
|
3597 // reached the limit of the stack size, while making very |
|
3598 // slow forward progress. It may be best to bail out and |
|
3599 // let the foreground collector do its job. |
|
3600 // Clear _restart_addr, so that foreground GC |
|
3601 // works from scratch. This avoids the headache of |
|
3602 // a "rescan" which would otherwise be needed because |
|
3603 // of the dirty mod union table & card table. |
|
3604 _restart_addr = NULL; |
|
3605 return false; |
|
3606 } |
|
3607 // Adjust the task to restart from _restart_addr |
|
3608 tsk.reset(_restart_addr); |
|
3609 cms_space ->initialize_sequential_subtasks_for_marking(num_workers, |
|
3610 _restart_addr); |
|
3611 _restart_addr = NULL; |
|
3612 // Get the workers going again |
|
3613 conc_workers()->start_task(&tsk); |
|
3614 while (tsk.yielded()) { |
|
3615 tsk.coordinator_yield(); |
|
3616 conc_workers()->continue_task(&tsk); |
|
3617 } |
|
3618 } |
|
3619 assert(tsk.completed(), "Inconsistency"); |
|
3620 assert(tsk.result() == true, "Inconsistency"); |
|
3621 return true; |
|
3622 } |
|
3623 |
|
3624 bool CMSCollector::do_marking_st() { |
|
3625 ResourceMark rm; |
|
3626 HandleMark hm; |
|
3627 |
|
3628 // Temporarily make refs discovery single threaded (non-MT) |
|
3629 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
|
3630 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, |
|
3631 &_markStack, CMSYield); |
|
3632 // the last argument to iterate indicates whether the iteration |
|
3633 // should be incremental with periodic yields. |
|
3634 _markBitMap.iterate(&markFromRootsClosure); |
|
3635 // If _restart_addr is non-NULL, a marking stack overflow |
|
3636 // occurred; we need to do a fresh iteration from the |
|
3637 // indicated restart address. |
|
3638 while (_restart_addr != NULL) { |
|
3639 if (_foregroundGCIsActive) { |
|
3640 // We may be running into repeated stack overflows, having |
|
3641 // reached the limit of the stack size, while making very |
|
3642 // slow forward progress. It may be best to bail out and |
|
3643 // let the foreground collector do its job. |
|
3644 // Clear _restart_addr, so that foreground GC |
|
3645 // works from scratch. This avoids the headache of |
|
3646 // a "rescan" which would otherwise be needed because |
|
3647 // of the dirty mod union table & card table. |
|
3648 _restart_addr = NULL; |
|
3649 return false; // indicating failure to complete marking |
|
3650 } |
|
3651 // Deal with stack overflow: |
|
3652 // we restart marking from _restart_addr |
|
3653 HeapWord* ra = _restart_addr; |
|
3654 markFromRootsClosure.reset(ra); |
|
3655 _restart_addr = NULL; |
|
3656 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end()); |
|
3657 } |
|
3658 return true; |
|
3659 } |
|
3660 |
|
3661 void CMSCollector::preclean() { |
|
3662 check_correct_thread_executing(); |
|
3663 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread"); |
|
3664 verify_work_stacks_empty(); |
|
3665 verify_overflow_empty(); |
|
3666 _abort_preclean = false; |
|
3667 if (CMSPrecleaningEnabled) { |
|
3668 if (!CMSEdenChunksRecordAlways) { |
|
3669 _eden_chunk_index = 0; |
|
3670 } |
|
3671 size_t used = get_eden_used(); |
|
3672 size_t capacity = get_eden_capacity(); |
|
3673 // Don't start sampling unless we will get sufficiently |
|
3674 // many samples. |
|
3675 if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100) |
|
3676 * CMSScheduleRemarkEdenPenetration)) { |
|
3677 _start_sampling = true; |
|
3678 } else { |
|
3679 _start_sampling = false; |
|
3680 } |
|
3681 GCTraceCPUTime tcpu; |
|
3682 CMSPhaseAccounting pa(this, "Concurrent Preclean"); |
|
3683 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); |
|
3684 } |
|
3685 CMSTokenSync x(true); // is cms thread |
|
3686 if (CMSPrecleaningEnabled) { |
|
3687 sample_eden(); |
|
3688 _collectorState = AbortablePreclean; |
|
3689 } else { |
|
3690 _collectorState = FinalMarking; |
|
3691 } |
|
3692 verify_work_stacks_empty(); |
|
3693 verify_overflow_empty(); |
|
3694 } |
|
3695 |
|
3696 // Try and schedule the remark such that young gen |
|
3697 // occupancy is CMSScheduleRemarkEdenPenetration %. |
|
3698 void CMSCollector::abortable_preclean() { |
|
3699 check_correct_thread_executing(); |
|
3700 assert(CMSPrecleaningEnabled, "Inconsistent control state"); |
|
3701 assert(_collectorState == AbortablePreclean, "Inconsistent control state"); |
|
3702 |
|
3703 // If Eden's current occupancy is below this threshold, |
|
3704 // immediately schedule the remark; else preclean |
|
3705 // past the next scavenge in an effort to |
|
3706 // schedule the pause as described above. By choosing |
|
3707 // CMSScheduleRemarkEdenSizeThreshold >= max eden size |
|
3708 // we will never do an actual abortable preclean cycle. |
|
3709 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { |
|
3710 GCTraceCPUTime tcpu; |
|
3711 CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean"); |
|
3712 // We need more smarts in the abortable preclean |
|
3713 // loop below to deal with cases where allocation |
|
3714 // in young gen is very very slow, and our precleaning |
|
3715 // is running a losing race against a horde of |
|
3716 // mutators intent on flooding us with CMS updates |
|
3717 // (dirty cards). |
|
3718 // One, admittedly dumb, strategy is to give up |
|
3719 // after a certain number of abortable precleaning loops |
|
3720 // or after a certain maximum time. We want to make |
|
3721 // this smarter in the next iteration. |
|
3722 // XXX FIX ME!!! YSR |
|
3723 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0; |
|
3724 while (!(should_abort_preclean() || |
|
3725 ConcurrentMarkSweepThread::cmst()->should_terminate())) { |
|
3726 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); |
|
3727 cumworkdone += workdone; |
|
3728 loops++; |
|
3729 // Voluntarily terminate abortable preclean phase if we have |
|
3730 // been at it for too long. |
|
3731 if ((CMSMaxAbortablePrecleanLoops != 0) && |
|
3732 loops >= CMSMaxAbortablePrecleanLoops) { |
|
3733 log_debug(gc)(" CMS: abort preclean due to loops "); |
|
3734 break; |
|
3735 } |
|
3736 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { |
|
3737 log_debug(gc)(" CMS: abort preclean due to time "); |
|
3738 break; |
|
3739 } |
|
3740 // If we are doing little work each iteration, we should |
|
3741 // take a short break. |
|
3742 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) { |
|
3743 // Sleep for some time, waiting for work to accumulate |
|
3744 stopTimer(); |
|
3745 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); |
|
3746 startTimer(); |
|
3747 waited++; |
|
3748 } |
|
3749 } |
|
3750 log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ", |
|
3751 loops, waited, cumworkdone); |
|
3752 } |
|
3753 CMSTokenSync x(true); // is cms thread |
|
3754 if (_collectorState != Idling) { |
|
3755 assert(_collectorState == AbortablePreclean, |
|
3756 "Spontaneous state transition?"); |
|
3757 _collectorState = FinalMarking; |
|
3758 } // Else, a foreground collection completed this CMS cycle. |
|
3759 return; |
|
3760 } |
|
3761 |
|
3762 // Respond to an Eden sampling opportunity |
|
3763 void CMSCollector::sample_eden() { |
|
3764 // Make sure a young gc cannot sneak in between our |
|
3765 // reading and recording of a sample. |
|
3766 assert(Thread::current()->is_ConcurrentGC_thread(), |
|
3767 "Only the cms thread may collect Eden samples"); |
|
3768 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
3769 "Should collect samples while holding CMS token"); |
|
3770 if (!_start_sampling) { |
|
3771 return; |
|
3772 } |
|
3773 // When CMSEdenChunksRecordAlways is true, the eden chunk array |
|
3774 // is populated by the young generation. |
|
3775 if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) { |
|
3776 if (_eden_chunk_index < _eden_chunk_capacity) { |
|
3777 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample |
|
3778 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, |
|
3779 "Unexpected state of Eden"); |
|
3780 // We'd like to check that what we just sampled is an oop-start address; |
|
3781 // however, we cannot do that here since the object may not yet have been |
|
3782 // initialized. So we'll instead do the check when we _use_ this sample |
|
3783 // later. |
|
3784 if (_eden_chunk_index == 0 || |
|
3785 (pointer_delta(_eden_chunk_array[_eden_chunk_index], |
|
3786 _eden_chunk_array[_eden_chunk_index-1]) |
|
3787 >= CMSSamplingGrain)) { |
|
3788 _eden_chunk_index++; // commit sample |
|
3789 } |
|
3790 } |
|
3791 } |
|
3792 if ((_collectorState == AbortablePreclean) && !_abort_preclean) { |
|
3793 size_t used = get_eden_used(); |
|
3794 size_t capacity = get_eden_capacity(); |
|
3795 assert(used <= capacity, "Unexpected state of Eden"); |
|
3796 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) { |
|
3797 _abort_preclean = true; |
|
3798 } |
|
3799 } |
|
3800 } |
|
3801 |
|
3802 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) { |
|
3803 assert(_collectorState == Precleaning || |
|
3804 _collectorState == AbortablePreclean, "incorrect state"); |
|
3805 ResourceMark rm; |
|
3806 HandleMark hm; |
|
3807 |
|
3808 // Precleaning is currently not MT but the reference processor |
|
3809 // may be set for MT. Disable it temporarily here. |
|
3810 ReferenceProcessor* rp = ref_processor(); |
|
3811 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); |
|
3812 |
|
3813 // Do one pass of scrubbing the discovered reference lists |
|
3814 // to remove any reference objects with strongly-reachable |
|
3815 // referents. |
|
3816 if (clean_refs) { |
|
3817 CMSPrecleanRefsYieldClosure yield_cl(this); |
|
3818 assert(_span_based_discoverer.span().equals(_span), "Spans should be equal"); |
|
3819 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, |
|
3820 &_markStack, true /* preclean */); |
|
3821 CMSDrainMarkingStackClosure complete_trace(this, |
|
3822 _span, &_markBitMap, &_markStack, |
|
3823 &keep_alive, true /* preclean */); |
|
3824 |
|
3825 // We don't want this step to interfere with a young |
|
3826 // collection because we don't want to take CPU |
|
3827 // or memory bandwidth away from the young GC threads |
|
3828 // (which may be as many as there are CPUs). |
|
3829 // Note that we don't need to protect ourselves from |
|
3830 // interference with mutators because they can't |
|
3831 // manipulate the discovered reference lists nor affect |
|
3832 // the computed reachability of the referents, the |
|
3833 // only properties manipulated by the precleaning |
|
3834 // of these reference lists. |
|
3835 stopTimer(); |
|
3836 CMSTokenSyncWithLocks x(true /* is cms thread */, |
|
3837 bitMapLock()); |
|
3838 startTimer(); |
|
3839 sample_eden(); |
|
3840 |
|
3841 // The following will yield to allow foreground |
|
3842 // collection to proceed promptly. XXX YSR: |
|
3843 // The code in this method may need further |
|
3844 // tweaking for better performance and some restructuring |
|
3845 // for cleaner interfaces. |
|
3846 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases |
|
3847 rp->preclean_discovered_references( |
|
3848 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, |
|
3849 gc_timer); |
|
3850 } |
|
3851 |
|
3852 if (clean_survivor) { // preclean the active survivor space(s) |
|
3853 PushAndMarkClosure pam_cl(this, _span, ref_processor(), |
|
3854 &_markBitMap, &_modUnionTable, |
|
3855 &_markStack, true /* precleaning phase */); |
|
3856 stopTimer(); |
|
3857 CMSTokenSyncWithLocks ts(true /* is cms thread */, |
|
3858 bitMapLock()); |
|
3859 startTimer(); |
|
3860 unsigned int before_count = |
|
3861 CMSHeap::heap()->total_collections(); |
|
3862 SurvivorSpacePrecleanClosure |
|
3863 sss_cl(this, _span, &_markBitMap, &_markStack, |
|
3864 &pam_cl, before_count, CMSYield); |
|
3865 _young_gen->from()->object_iterate_careful(&sss_cl); |
|
3866 _young_gen->to()->object_iterate_careful(&sss_cl); |
|
3867 } |
|
3868 MarkRefsIntoAndScanClosure |
|
3869 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, |
|
3870 &_markStack, this, CMSYield, |
|
3871 true /* precleaning phase */); |
|
3872 // CAUTION: The following closure has persistent state that may need to |
|
3873 // be reset upon a decrease in the sequence of addresses it |
|
3874 // processes. |
|
3875 ScanMarkedObjectsAgainCarefullyClosure |
|
3876 smoac_cl(this, _span, |
|
3877 &_markBitMap, &_markStack, &mrias_cl, CMSYield); |
|
3878 |
|
3879 // Preclean dirty cards in ModUnionTable and CardTable using |
|
3880 // appropriate convergence criterion; |
|
3881 // repeat CMSPrecleanIter times unless we find that |
|
3882 // we are losing. |
|
3883 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large"); |
|
3884 assert(CMSPrecleanNumerator < CMSPrecleanDenominator, |
|
3885 "Bad convergence multiplier"); |
|
3886 assert(CMSPrecleanThreshold >= 100, |
|
3887 "Unreasonably low CMSPrecleanThreshold"); |
|
3888 |
|
3889 size_t numIter, cumNumCards, lastNumCards, curNumCards; |
|
3890 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; |
|
3891 numIter < CMSPrecleanIter; |
|
3892 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { |
|
3893 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); |
|
3894 log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards); |
|
3895 // Either there are very few dirty cards, so re-mark |
|
3896 // pause will be small anyway, or our pre-cleaning isn't |
|
3897 // that much faster than the rate at which cards are being |
|
3898 // dirtied, so we might as well stop and re-mark since |
|
3899 // precleaning won't improve our re-mark time by much. |
|
3900 if (curNumCards <= CMSPrecleanThreshold || |
|
3901 (numIter > 0 && |
|
3902 (curNumCards * CMSPrecleanDenominator > |
|
3903 lastNumCards * CMSPrecleanNumerator))) { |
|
3904 numIter++; |
|
3905 cumNumCards += curNumCards; |
|
3906 break; |
|
3907 } |
|
3908 } |
|
3909 |
|
3910 preclean_cld(&mrias_cl, _cmsGen->freelistLock()); |
|
3911 |
|
3912 curNumCards = preclean_card_table(_cmsGen, &smoac_cl); |
|
3913 cumNumCards += curNumCards; |
|
3914 log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", |
|
3915 curNumCards, cumNumCards, numIter); |
|
3916 return cumNumCards; // as a measure of useful work done |
|
3917 } |
|
3918 |
|
3919 // PRECLEANING NOTES: |
|
3920 // Precleaning involves: |
|
3921 // . reading the bits of the modUnionTable and clearing the set bits. |
|
3922 // . For the cards corresponding to the set bits, we scan the |
|
3923 // objects on those cards. This means we need the free_list_lock |
|
3924 // so that we can safely iterate over the CMS space when scanning |
|
3925 // for oops. |
|
3926 // . When we scan the objects, we'll be both reading and setting |
|
3927 // marks in the marking bit map, so we'll need the marking bit map. |
|
3928 // . For protecting _collector_state transitions, we take the CGC_lock. |
|
3929 // Note that any races in the reading of of card table entries by the |
|
3930 // CMS thread on the one hand and the clearing of those entries by the |
|
3931 // VM thread or the setting of those entries by the mutator threads on the |
|
3932 // other are quite benign. However, for efficiency it makes sense to keep |
|
3933 // the VM thread from racing with the CMS thread while the latter is |
|
3934 // dirty card info to the modUnionTable. We therefore also use the |
|
3935 // CGC_lock to protect the reading of the card table and the mod union |
|
3936 // table by the CM thread. |
|
3937 // . We run concurrently with mutator updates, so scanning |
|
3938 // needs to be done carefully -- we should not try to scan |
|
3939 // potentially uninitialized objects. |
|
3940 // |
|
3941 // Locking strategy: While holding the CGC_lock, we scan over and |
|
3942 // reset a maximal dirty range of the mod union / card tables, then lock |
|
3943 // the free_list_lock and bitmap lock to do a full marking, then |
|
3944 // release these locks; and repeat the cycle. This allows for a |
|
3945 // certain amount of fairness in the sharing of these locks between |
|
3946 // the CMS collector on the one hand, and the VM thread and the |
|
3947 // mutators on the other. |
|
3948 |
|
3949 // NOTE: preclean_mod_union_table() and preclean_card_table() |
|
3950 // further below are largely identical; if you need to modify |
|
3951 // one of these methods, please check the other method too. |
|
3952 |
|
3953 size_t CMSCollector::preclean_mod_union_table( |
|
3954 ConcurrentMarkSweepGeneration* old_gen, |
|
3955 ScanMarkedObjectsAgainCarefullyClosure* cl) { |
|
3956 verify_work_stacks_empty(); |
|
3957 verify_overflow_empty(); |
|
3958 |
|
3959 // strategy: starting with the first card, accumulate contiguous |
|
3960 // ranges of dirty cards; clear these cards, then scan the region |
|
3961 // covered by these cards. |
|
3962 |
|
3963 // Since all of the MUT is committed ahead, we can just use |
|
3964 // that, in case the generations expand while we are precleaning. |
|
3965 // It might also be fine to just use the committed part of the |
|
3966 // generation, but we might potentially miss cards when the |
|
3967 // generation is rapidly expanding while we are in the midst |
|
3968 // of precleaning. |
|
3969 HeapWord* startAddr = old_gen->reserved().start(); |
|
3970 HeapWord* endAddr = old_gen->reserved().end(); |
|
3971 |
|
3972 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding |
|
3973 |
|
3974 size_t numDirtyCards, cumNumDirtyCards; |
|
3975 HeapWord *nextAddr, *lastAddr; |
|
3976 for (cumNumDirtyCards = numDirtyCards = 0, |
|
3977 nextAddr = lastAddr = startAddr; |
|
3978 nextAddr < endAddr; |
|
3979 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { |
|
3980 |
|
3981 ResourceMark rm; |
|
3982 HandleMark hm; |
|
3983 |
|
3984 MemRegion dirtyRegion; |
|
3985 { |
|
3986 stopTimer(); |
|
3987 // Potential yield point |
|
3988 CMSTokenSync ts(true); |
|
3989 startTimer(); |
|
3990 sample_eden(); |
|
3991 // Get dirty region starting at nextOffset (inclusive), |
|
3992 // simultaneously clearing it. |
|
3993 dirtyRegion = |
|
3994 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr); |
|
3995 assert(dirtyRegion.start() >= nextAddr, |
|
3996 "returned region inconsistent?"); |
|
3997 } |
|
3998 // Remember where the next search should begin. |
|
3999 // The returned region (if non-empty) is a right open interval, |
|
4000 // so lastOffset is obtained from the right end of that |
|
4001 // interval. |
|
4002 lastAddr = dirtyRegion.end(); |
|
4003 // Should do something more transparent and less hacky XXX |
|
4004 numDirtyCards = |
|
4005 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size()); |
|
4006 |
|
4007 // We'll scan the cards in the dirty region (with periodic |
|
4008 // yields for foreground GC as needed). |
|
4009 if (!dirtyRegion.is_empty()) { |
|
4010 assert(numDirtyCards > 0, "consistency check"); |
|
4011 HeapWord* stop_point = NULL; |
|
4012 stopTimer(); |
|
4013 // Potential yield point |
|
4014 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), |
|
4015 bitMapLock()); |
|
4016 startTimer(); |
|
4017 { |
|
4018 verify_work_stacks_empty(); |
|
4019 verify_overflow_empty(); |
|
4020 sample_eden(); |
|
4021 stop_point = |
|
4022 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
|
4023 } |
|
4024 if (stop_point != NULL) { |
|
4025 // The careful iteration stopped early either because it found an |
|
4026 // uninitialized object, or because we were in the midst of an |
|
4027 // "abortable preclean", which should now be aborted. Redirty |
|
4028 // the bits corresponding to the partially-scanned or unscanned |
|
4029 // cards. We'll either restart at the next block boundary or |
|
4030 // abort the preclean. |
|
4031 assert((_collectorState == AbortablePreclean && should_abort_preclean()), |
|
4032 "Should only be AbortablePreclean."); |
|
4033 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); |
|
4034 if (should_abort_preclean()) { |
|
4035 break; // out of preclean loop |
|
4036 } else { |
|
4037 // Compute the next address at which preclean should pick up; |
|
4038 // might need bitMapLock in order to read P-bits. |
|
4039 lastAddr = next_card_start_after_block(stop_point); |
|
4040 } |
|
4041 } |
|
4042 } else { |
|
4043 assert(lastAddr == endAddr, "consistency check"); |
|
4044 assert(numDirtyCards == 0, "consistency check"); |
|
4045 break; |
|
4046 } |
|
4047 } |
|
4048 verify_work_stacks_empty(); |
|
4049 verify_overflow_empty(); |
|
4050 return cumNumDirtyCards; |
|
4051 } |
|
4052 |
|
4053 // NOTE: preclean_mod_union_table() above and preclean_card_table() |
|
4054 // below are largely identical; if you need to modify |
|
4055 // one of these methods, please check the other method too. |
|
4056 |
|
4057 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen, |
|
4058 ScanMarkedObjectsAgainCarefullyClosure* cl) { |
|
4059 // strategy: it's similar to precleamModUnionTable above, in that |
|
4060 // we accumulate contiguous ranges of dirty cards, mark these cards |
|
4061 // precleaned, then scan the region covered by these cards. |
|
4062 HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high()); |
|
4063 HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low()); |
|
4064 |
|
4065 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding |
|
4066 |
|
4067 size_t numDirtyCards, cumNumDirtyCards; |
|
4068 HeapWord *lastAddr, *nextAddr; |
|
4069 |
|
4070 for (cumNumDirtyCards = numDirtyCards = 0, |
|
4071 nextAddr = lastAddr = startAddr; |
|
4072 nextAddr < endAddr; |
|
4073 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { |
|
4074 |
|
4075 ResourceMark rm; |
|
4076 HandleMark hm; |
|
4077 |
|
4078 MemRegion dirtyRegion; |
|
4079 { |
|
4080 // See comments in "Precleaning notes" above on why we |
|
4081 // do this locking. XXX Could the locking overheads be |
|
4082 // too high when dirty cards are sparse? [I don't think so.] |
|
4083 stopTimer(); |
|
4084 CMSTokenSync x(true); // is cms thread |
|
4085 startTimer(); |
|
4086 sample_eden(); |
|
4087 // Get and clear dirty region from card table |
|
4088 dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr), |
|
4089 true, |
|
4090 CardTable::precleaned_card_val()); |
|
4091 |
|
4092 assert(dirtyRegion.start() >= nextAddr, |
|
4093 "returned region inconsistent?"); |
|
4094 } |
|
4095 lastAddr = dirtyRegion.end(); |
|
4096 numDirtyCards = |
|
4097 dirtyRegion.word_size()/CardTable::card_size_in_words; |
|
4098 |
|
4099 if (!dirtyRegion.is_empty()) { |
|
4100 stopTimer(); |
|
4101 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock()); |
|
4102 startTimer(); |
|
4103 sample_eden(); |
|
4104 verify_work_stacks_empty(); |
|
4105 verify_overflow_empty(); |
|
4106 HeapWord* stop_point = |
|
4107 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
|
4108 if (stop_point != NULL) { |
|
4109 assert((_collectorState == AbortablePreclean && should_abort_preclean()), |
|
4110 "Should only be AbortablePreclean."); |
|
4111 _ct->invalidate(MemRegion(stop_point, dirtyRegion.end())); |
|
4112 if (should_abort_preclean()) { |
|
4113 break; // out of preclean loop |
|
4114 } else { |
|
4115 // Compute the next address at which preclean should pick up. |
|
4116 lastAddr = next_card_start_after_block(stop_point); |
|
4117 } |
|
4118 } |
|
4119 } else { |
|
4120 break; |
|
4121 } |
|
4122 } |
|
4123 verify_work_stacks_empty(); |
|
4124 verify_overflow_empty(); |
|
4125 return cumNumDirtyCards; |
|
4126 } |
|
4127 |
|
4128 class PrecleanCLDClosure : public CLDClosure { |
|
4129 MetadataVisitingOopsInGenClosure* _cm_closure; |
|
4130 public: |
|
4131 PrecleanCLDClosure(MetadataVisitingOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {} |
|
4132 void do_cld(ClassLoaderData* cld) { |
|
4133 if (cld->has_accumulated_modified_oops()) { |
|
4134 cld->clear_accumulated_modified_oops(); |
|
4135 |
|
4136 _cm_closure->do_cld(cld); |
|
4137 } |
|
4138 } |
|
4139 }; |
|
4140 |
|
4141 // The freelist lock is needed to prevent asserts, is it really needed? |
|
4142 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) { |
|
4143 // Needed to walk CLDG |
|
4144 MutexLocker ml(ClassLoaderDataGraph_lock); |
|
4145 |
|
4146 cl->set_freelistLock(freelistLock); |
|
4147 |
|
4148 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock()); |
|
4149 |
|
4150 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean? |
|
4151 // SSS: We should probably check if precleaning should be aborted, at suitable intervals? |
|
4152 PrecleanCLDClosure preclean_closure(cl); |
|
4153 ClassLoaderDataGraph::cld_do(&preclean_closure); |
|
4154 |
|
4155 verify_work_stacks_empty(); |
|
4156 verify_overflow_empty(); |
|
4157 } |
|
4158 |
|
4159 void CMSCollector::checkpointRootsFinal() { |
|
4160 assert(_collectorState == FinalMarking, "incorrect state transition?"); |
|
4161 check_correct_thread_executing(); |
|
4162 // world is stopped at this checkpoint |
|
4163 assert(SafepointSynchronize::is_at_safepoint(), |
|
4164 "world should be stopped"); |
|
4165 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); |
|
4166 |
|
4167 verify_work_stacks_empty(); |
|
4168 verify_overflow_empty(); |
|
4169 |
|
4170 log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)", |
|
4171 _young_gen->used() / K, _young_gen->capacity() / K); |
|
4172 { |
|
4173 if (CMSScavengeBeforeRemark) { |
|
4174 CMSHeap* heap = CMSHeap::heap(); |
|
4175 // Temporarily set flag to false, GCH->do_collection will |
|
4176 // expect it to be false and set to true |
|
4177 FlagSetting fl(heap->_is_gc_active, false); |
|
4178 |
|
4179 heap->do_collection(true, // full (i.e. force, see below) |
|
4180 false, // !clear_all_soft_refs |
|
4181 0, // size |
|
4182 false, // is_tlab |
|
4183 GenCollectedHeap::YoungGen // type |
|
4184 ); |
|
4185 } |
|
4186 FreelistLocker x(this); |
|
4187 MutexLocker y(bitMapLock(), |
|
4188 Mutex::_no_safepoint_check_flag); |
|
4189 checkpointRootsFinalWork(); |
|
4190 _cmsGen->cmsSpace()->recalculate_used_stable(); |
|
4191 } |
|
4192 verify_work_stacks_empty(); |
|
4193 verify_overflow_empty(); |
|
4194 } |
|
4195 |
|
4196 void CMSCollector::checkpointRootsFinalWork() { |
|
4197 GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm); |
|
4198 |
|
4199 assert(haveFreelistLocks(), "must have free list locks"); |
|
4200 assert_lock_strong(bitMapLock()); |
|
4201 |
|
4202 ResourceMark rm; |
|
4203 HandleMark hm; |
|
4204 |
|
4205 CMSHeap* heap = CMSHeap::heap(); |
|
4206 |
|
4207 assert(haveFreelistLocks(), "must have free list locks"); |
|
4208 assert_lock_strong(bitMapLock()); |
|
4209 |
|
4210 // We might assume that we need not fill TLAB's when |
|
4211 // CMSScavengeBeforeRemark is set, because we may have just done |
|
4212 // a scavenge which would have filled all TLAB's -- and besides |
|
4213 // Eden would be empty. This however may not always be the case -- |
|
4214 // for instance although we asked for a scavenge, it may not have |
|
4215 // happened because of a JNI critical section. We probably need |
|
4216 // a policy for deciding whether we can in that case wait until |
|
4217 // the critical section releases and then do the remark following |
|
4218 // the scavenge, and skip it here. In the absence of that policy, |
|
4219 // or of an indication of whether the scavenge did indeed occur, |
|
4220 // we cannot rely on TLAB's having been filled and must do |
|
4221 // so here just in case a scavenge did not happen. |
|
4222 heap->ensure_parsability(false); // fill TLAB's, but no need to retire them |
|
4223 // Update the saved marks which may affect the root scans. |
|
4224 heap->save_marks(); |
|
4225 |
|
4226 print_eden_and_survivor_chunk_arrays(); |
|
4227 |
|
4228 { |
|
4229 #if COMPILER2_OR_JVMCI |
|
4230 DerivedPointerTableDeactivate dpt_deact; |
|
4231 #endif |
|
4232 |
|
4233 // Note on the role of the mod union table: |
|
4234 // Since the marker in "markFromRoots" marks concurrently with |
|
4235 // mutators, it is possible for some reachable objects not to have been |
|
4236 // scanned. For instance, an only reference to an object A was |
|
4237 // placed in object B after the marker scanned B. Unless B is rescanned, |
|
4238 // A would be collected. Such updates to references in marked objects |
|
4239 // are detected via the mod union table which is the set of all cards |
|
4240 // dirtied since the first checkpoint in this GC cycle and prior to |
|
4241 // the most recent young generation GC, minus those cleaned up by the |
|
4242 // concurrent precleaning. |
|
4243 if (CMSParallelRemarkEnabled) { |
|
4244 GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm); |
|
4245 do_remark_parallel(); |
|
4246 } else { |
|
4247 GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm); |
|
4248 do_remark_non_parallel(); |
|
4249 } |
|
4250 } |
|
4251 verify_work_stacks_empty(); |
|
4252 verify_overflow_empty(); |
|
4253 |
|
4254 { |
|
4255 GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm); |
|
4256 refProcessingWork(); |
|
4257 } |
|
4258 verify_work_stacks_empty(); |
|
4259 verify_overflow_empty(); |
|
4260 |
|
4261 if (should_unload_classes()) { |
|
4262 heap->prune_scavengable_nmethods(); |
|
4263 } |
|
4264 |
|
4265 // If we encountered any (marking stack / work queue) overflow |
|
4266 // events during the current CMS cycle, take appropriate |
|
4267 // remedial measures, where possible, so as to try and avoid |
|
4268 // recurrence of that condition. |
|
4269 assert(_markStack.isEmpty(), "No grey objects"); |
|
4270 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + |
|
4271 _ser_kac_ovflw + _ser_kac_preclean_ovflw; |
|
4272 if (ser_ovflw > 0) { |
|
4273 log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")", |
|
4274 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw); |
|
4275 _markStack.expand(); |
|
4276 _ser_pmc_remark_ovflw = 0; |
|
4277 _ser_pmc_preclean_ovflw = 0; |
|
4278 _ser_kac_preclean_ovflw = 0; |
|
4279 _ser_kac_ovflw = 0; |
|
4280 } |
|
4281 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { |
|
4282 log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")", |
|
4283 _par_pmc_remark_ovflw, _par_kac_ovflw); |
|
4284 _par_pmc_remark_ovflw = 0; |
|
4285 _par_kac_ovflw = 0; |
|
4286 } |
|
4287 if (_markStack._hit_limit > 0) { |
|
4288 log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")", |
|
4289 _markStack._hit_limit); |
|
4290 } |
|
4291 if (_markStack._failed_double > 0) { |
|
4292 log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT, |
|
4293 _markStack._failed_double, _markStack.capacity()); |
|
4294 } |
|
4295 _markStack._hit_limit = 0; |
|
4296 _markStack._failed_double = 0; |
|
4297 |
|
4298 if ((VerifyAfterGC || VerifyDuringGC) && |
|
4299 CMSHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
4300 verify_after_remark(); |
|
4301 } |
|
4302 |
|
4303 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure); |
|
4304 |
|
4305 // Change under the freelistLocks. |
|
4306 _collectorState = Sweeping; |
|
4307 // Call isAllClear() under bitMapLock |
|
4308 assert(_modUnionTable.isAllClear(), |
|
4309 "Should be clear by end of the final marking"); |
|
4310 assert(_ct->cld_rem_set()->mod_union_is_clear(), |
|
4311 "Should be clear by end of the final marking"); |
|
4312 } |
|
4313 |
|
4314 void CMSParInitialMarkTask::work(uint worker_id) { |
|
4315 elapsedTimer _timer; |
|
4316 ResourceMark rm; |
|
4317 HandleMark hm; |
|
4318 |
|
4319 // ---------- scan from roots -------------- |
|
4320 _timer.start(); |
|
4321 CMSHeap* heap = CMSHeap::heap(); |
|
4322 ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); |
|
4323 |
|
4324 // ---------- young gen roots -------------- |
|
4325 { |
|
4326 work_on_young_gen_roots(&par_mri_cl); |
|
4327 _timer.stop(); |
|
4328 log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4329 } |
|
4330 |
|
4331 // ---------- remaining roots -------------- |
|
4332 _timer.reset(); |
|
4333 _timer.start(); |
|
4334 |
|
4335 CLDToOopClosure cld_closure(&par_mri_cl, ClassLoaderData::_claim_strong); |
|
4336 |
|
4337 heap->cms_process_roots(_strong_roots_scope, |
|
4338 false, // yg was scanned above |
|
4339 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), |
|
4340 _collector->should_unload_classes(), |
|
4341 &par_mri_cl, |
|
4342 &cld_closure); |
|
4343 |
|
4344 assert(_collector->should_unload_classes() |
|
4345 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), |
|
4346 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); |
|
4347 _timer.stop(); |
|
4348 log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4349 } |
|
4350 |
|
4351 // Parallel remark task |
|
4352 class CMSParRemarkTask: public CMSParMarkTask { |
|
4353 CompactibleFreeListSpace* _cms_space; |
|
4354 |
|
4355 // The per-thread work queues, available here for stealing. |
|
4356 OopTaskQueueSet* _task_queues; |
|
4357 TaskTerminator _term; |
|
4358 StrongRootsScope* _strong_roots_scope; |
|
4359 |
|
4360 public: |
|
4361 // A value of 0 passed to n_workers will cause the number of |
|
4362 // workers to be taken from the active workers in the work gang. |
|
4363 CMSParRemarkTask(CMSCollector* collector, |
|
4364 CompactibleFreeListSpace* cms_space, |
|
4365 uint n_workers, WorkGang* workers, |
|
4366 OopTaskQueueSet* task_queues, |
|
4367 StrongRootsScope* strong_roots_scope): |
|
4368 CMSParMarkTask("Rescan roots and grey objects in parallel", |
|
4369 collector, n_workers), |
|
4370 _cms_space(cms_space), |
|
4371 _task_queues(task_queues), |
|
4372 _term(n_workers, task_queues), |
|
4373 _strong_roots_scope(strong_roots_scope) { } |
|
4374 |
|
4375 OopTaskQueueSet* task_queues() { return _task_queues; } |
|
4376 |
|
4377 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } |
|
4378 |
|
4379 ParallelTaskTerminator* terminator() { return _term.terminator(); } |
|
4380 uint n_workers() { return _n_workers; } |
|
4381 |
|
4382 void work(uint worker_id); |
|
4383 |
|
4384 private: |
|
4385 // ... of dirty cards in old space |
|
4386 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, |
|
4387 ParMarkRefsIntoAndScanClosure* cl); |
|
4388 |
|
4389 // ... work stealing for the above |
|
4390 void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl); |
|
4391 }; |
|
4392 |
|
4393 class RemarkCLDClosure : public CLDClosure { |
|
4394 CLDToOopClosure _cm_closure; |
|
4395 public: |
|
4396 RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure, ClassLoaderData::_claim_strong) {} |
|
4397 void do_cld(ClassLoaderData* cld) { |
|
4398 // Check if we have modified any oops in the CLD during the concurrent marking. |
|
4399 if (cld->has_accumulated_modified_oops()) { |
|
4400 cld->clear_accumulated_modified_oops(); |
|
4401 |
|
4402 // We could have transfered the current modified marks to the accumulated marks, |
|
4403 // like we do with the Card Table to Mod Union Table. But it's not really necessary. |
|
4404 } else if (cld->has_modified_oops()) { |
|
4405 // Don't clear anything, this info is needed by the next young collection. |
|
4406 } else { |
|
4407 // No modified oops in the ClassLoaderData. |
|
4408 return; |
|
4409 } |
|
4410 |
|
4411 // The klass has modified fields, need to scan the klass. |
|
4412 _cm_closure.do_cld(cld); |
|
4413 } |
|
4414 }; |
|
4415 |
|
4416 void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) { |
|
4417 ParNewGeneration* young_gen = _collector->_young_gen; |
|
4418 ContiguousSpace* eden_space = young_gen->eden(); |
|
4419 ContiguousSpace* from_space = young_gen->from(); |
|
4420 ContiguousSpace* to_space = young_gen->to(); |
|
4421 |
|
4422 HeapWord** eca = _collector->_eden_chunk_array; |
|
4423 size_t ect = _collector->_eden_chunk_index; |
|
4424 HeapWord** sca = _collector->_survivor_chunk_array; |
|
4425 size_t sct = _collector->_survivor_chunk_index; |
|
4426 |
|
4427 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds"); |
|
4428 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds"); |
|
4429 |
|
4430 do_young_space_rescan(cl, to_space, NULL, 0); |
|
4431 do_young_space_rescan(cl, from_space, sca, sct); |
|
4432 do_young_space_rescan(cl, eden_space, eca, ect); |
|
4433 } |
|
4434 |
|
4435 // work_queue(i) is passed to the closure |
|
4436 // ParMarkRefsIntoAndScanClosure. The "i" parameter |
|
4437 // also is passed to do_dirty_card_rescan_tasks() and to |
|
4438 // do_work_steal() to select the i-th task_queue. |
|
4439 |
|
4440 void CMSParRemarkTask::work(uint worker_id) { |
|
4441 elapsedTimer _timer; |
|
4442 ResourceMark rm; |
|
4443 HandleMark hm; |
|
4444 |
|
4445 // ---------- rescan from roots -------------- |
|
4446 _timer.start(); |
|
4447 CMSHeap* heap = CMSHeap::heap(); |
|
4448 ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector, |
|
4449 _collector->_span, _collector->ref_processor(), |
|
4450 &(_collector->_markBitMap), |
|
4451 work_queue(worker_id)); |
|
4452 |
|
4453 // Rescan young gen roots first since these are likely |
|
4454 // coarsely partitioned and may, on that account, constitute |
|
4455 // the critical path; thus, it's best to start off that |
|
4456 // work first. |
|
4457 // ---------- young gen roots -------------- |
|
4458 { |
|
4459 work_on_young_gen_roots(&par_mrias_cl); |
|
4460 _timer.stop(); |
|
4461 log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4462 } |
|
4463 |
|
4464 // ---------- remaining roots -------------- |
|
4465 _timer.reset(); |
|
4466 _timer.start(); |
|
4467 heap->cms_process_roots(_strong_roots_scope, |
|
4468 false, // yg was scanned above |
|
4469 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), |
|
4470 _collector->should_unload_classes(), |
|
4471 &par_mrias_cl, |
|
4472 NULL); // The dirty klasses will be handled below |
|
4473 |
|
4474 assert(_collector->should_unload_classes() |
|
4475 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), |
|
4476 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); |
|
4477 _timer.stop(); |
|
4478 log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4479 |
|
4480 // ---------- unhandled CLD scanning ---------- |
|
4481 if (worker_id == 0) { // Single threaded at the moment. |
|
4482 _timer.reset(); |
|
4483 _timer.start(); |
|
4484 |
|
4485 // Scan all new class loader data objects and new dependencies that were |
|
4486 // introduced during concurrent marking. |
|
4487 ResourceMark rm; |
|
4488 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds(); |
|
4489 for (int i = 0; i < array->length(); i++) { |
|
4490 Devirtualizer::do_cld(&par_mrias_cl, array->at(i)); |
|
4491 } |
|
4492 |
|
4493 // We don't need to keep track of new CLDs anymore. |
|
4494 ClassLoaderDataGraph::remember_new_clds(false); |
|
4495 |
|
4496 _timer.stop(); |
|
4497 log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4498 } |
|
4499 |
|
4500 // We might have added oops to ClassLoaderData::_handles during the |
|
4501 // concurrent marking phase. These oops do not always point to newly allocated objects |
|
4502 // that are guaranteed to be kept alive. Hence, |
|
4503 // we do have to revisit the _handles block during the remark phase. |
|
4504 |
|
4505 // ---------- dirty CLD scanning ---------- |
|
4506 if (worker_id == 0) { // Single threaded at the moment. |
|
4507 _timer.reset(); |
|
4508 _timer.start(); |
|
4509 |
|
4510 // Scan all classes that was dirtied during the concurrent marking phase. |
|
4511 RemarkCLDClosure remark_closure(&par_mrias_cl); |
|
4512 ClassLoaderDataGraph::cld_do(&remark_closure); |
|
4513 |
|
4514 _timer.stop(); |
|
4515 log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4516 } |
|
4517 |
|
4518 |
|
4519 // ---------- rescan dirty cards ------------ |
|
4520 _timer.reset(); |
|
4521 _timer.start(); |
|
4522 |
|
4523 // Do the rescan tasks for each of the two spaces |
|
4524 // (cms_space) in turn. |
|
4525 // "worker_id" is passed to select the task_queue for "worker_id" |
|
4526 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl); |
|
4527 _timer.stop(); |
|
4528 log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4529 |
|
4530 // ---------- steal work from other threads ... |
|
4531 // ---------- ... and drain overflow list. |
|
4532 _timer.reset(); |
|
4533 _timer.start(); |
|
4534 do_work_steal(worker_id, &par_mrias_cl); |
|
4535 _timer.stop(); |
|
4536 log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); |
|
4537 } |
|
4538 |
|
4539 void |
|
4540 CMSParMarkTask::do_young_space_rescan( |
|
4541 OopsInGenClosure* cl, ContiguousSpace* space, |
|
4542 HeapWord** chunk_array, size_t chunk_top) { |
|
4543 // Until all tasks completed: |
|
4544 // . claim an unclaimed task |
|
4545 // . compute region boundaries corresponding to task claimed |
|
4546 // using chunk_array |
|
4547 // . par_oop_iterate(cl) over that region |
|
4548 |
|
4549 ResourceMark rm; |
|
4550 HandleMark hm; |
|
4551 |
|
4552 SequentialSubTasksDone* pst = space->par_seq_tasks(); |
|
4553 |
|
4554 uint nth_task = 0; |
|
4555 uint n_tasks = pst->n_tasks(); |
|
4556 |
|
4557 if (n_tasks > 0) { |
|
4558 assert(pst->valid(), "Uninitialized use?"); |
|
4559 HeapWord *start, *end; |
|
4560 while (pst->try_claim_task(/* reference */ nth_task)) { |
|
4561 // We claimed task # nth_task; compute its boundaries. |
|
4562 if (chunk_top == 0) { // no samples were taken |
|
4563 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task"); |
|
4564 start = space->bottom(); |
|
4565 end = space->top(); |
|
4566 } else if (nth_task == 0) { |
|
4567 start = space->bottom(); |
|
4568 end = chunk_array[nth_task]; |
|
4569 } else if (nth_task < (uint)chunk_top) { |
|
4570 assert(nth_task >= 1, "Control point invariant"); |
|
4571 start = chunk_array[nth_task - 1]; |
|
4572 end = chunk_array[nth_task]; |
|
4573 } else { |
|
4574 assert(nth_task == (uint)chunk_top, "Control point invariant"); |
|
4575 start = chunk_array[chunk_top - 1]; |
|
4576 end = space->top(); |
|
4577 } |
|
4578 MemRegion mr(start, end); |
|
4579 // Verify that mr is in space |
|
4580 assert(mr.is_empty() || space->used_region().contains(mr), |
|
4581 "Should be in space"); |
|
4582 // Verify that "start" is an object boundary |
|
4583 assert(mr.is_empty() || oopDesc::is_oop(oop(mr.start())), |
|
4584 "Should be an oop"); |
|
4585 space->par_oop_iterate(mr, cl); |
|
4586 } |
|
4587 pst->all_tasks_completed(); |
|
4588 } |
|
4589 } |
|
4590 |
|
4591 void |
|
4592 CMSParRemarkTask::do_dirty_card_rescan_tasks( |
|
4593 CompactibleFreeListSpace* sp, int i, |
|
4594 ParMarkRefsIntoAndScanClosure* cl) { |
|
4595 // Until all tasks completed: |
|
4596 // . claim an unclaimed task |
|
4597 // . compute region boundaries corresponding to task claimed |
|
4598 // . transfer dirty bits ct->mut for that region |
|
4599 // . apply rescanclosure to dirty mut bits for that region |
|
4600 |
|
4601 ResourceMark rm; |
|
4602 HandleMark hm; |
|
4603 |
|
4604 OopTaskQueue* work_q = work_queue(i); |
|
4605 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable)); |
|
4606 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! |
|
4607 // CAUTION: This closure has state that persists across calls to |
|
4608 // the work method dirty_range_iterate_clear() in that it has |
|
4609 // embedded in it a (subtype of) UpwardsObjectClosure. The |
|
4610 // use of that state in the embedded UpwardsObjectClosure instance |
|
4611 // assumes that the cards are always iterated (even if in parallel |
|
4612 // by several threads) in monotonically increasing order per each |
|
4613 // thread. This is true of the implementation below which picks |
|
4614 // card ranges (chunks) in monotonically increasing order globally |
|
4615 // and, a-fortiori, in monotonically increasing order per thread |
|
4616 // (the latter order being a subsequence of the former). |
|
4617 // If the work code below is ever reorganized into a more chaotic |
|
4618 // work-partitioning form than the current "sequential tasks" |
|
4619 // paradigm, the use of that persistent state will have to be |
|
4620 // revisited and modified appropriately. See also related |
|
4621 // bug 4756801 work on which should examine this code to make |
|
4622 // sure that the changes there do not run counter to the |
|
4623 // assumptions made here and necessary for correctness and |
|
4624 // efficiency. Note also that this code might yield inefficient |
|
4625 // behavior in the case of very large objects that span one or |
|
4626 // more work chunks. Such objects would potentially be scanned |
|
4627 // several times redundantly. Work on 4756801 should try and |
|
4628 // address that performance anomaly if at all possible. XXX |
|
4629 MemRegion full_span = _collector->_span; |
|
4630 CMSBitMap* bm = &(_collector->_markBitMap); // shared |
|
4631 MarkFromDirtyCardsClosure |
|
4632 greyRescanClosure(_collector, full_span, // entire span of interest |
|
4633 sp, bm, work_q, cl); |
|
4634 |
|
4635 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); |
|
4636 assert(pst->valid(), "Uninitialized use?"); |
|
4637 uint nth_task = 0; |
|
4638 const int alignment = CardTable::card_size * BitsPerWord; |
|
4639 MemRegion span = sp->used_region(); |
|
4640 HeapWord* start_addr = span.start(); |
|
4641 HeapWord* end_addr = align_up(span.end(), alignment); |
|
4642 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units |
|
4643 assert(is_aligned(start_addr, alignment), "Check alignment"); |
|
4644 assert(is_aligned(chunk_size, alignment), "Check alignment"); |
|
4645 |
|
4646 while (pst->try_claim_task(/* reference */ nth_task)) { |
|
4647 // Having claimed the nth_task, compute corresponding mem-region, |
|
4648 // which is a-fortiori aligned correctly (i.e. at a MUT boundary). |
|
4649 // The alignment restriction ensures that we do not need any |
|
4650 // synchronization with other gang-workers while setting or |
|
4651 // clearing bits in thus chunk of the MUT. |
|
4652 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size, |
|
4653 start_addr + (nth_task+1)*chunk_size); |
|
4654 // The last chunk's end might be way beyond end of the |
|
4655 // used region. In that case pull back appropriately. |
|
4656 if (this_span.end() > end_addr) { |
|
4657 this_span.set_end(end_addr); |
|
4658 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)"); |
|
4659 } |
|
4660 // Iterate over the dirty cards covering this chunk, marking them |
|
4661 // precleaned, and setting the corresponding bits in the mod union |
|
4662 // table. Since we have been careful to partition at Card and MUT-word |
|
4663 // boundaries no synchronization is needed between parallel threads. |
|
4664 _collector->_ct->dirty_card_iterate(this_span, |
|
4665 &modUnionClosure); |
|
4666 |
|
4667 // Having transferred these marks into the modUnionTable, |
|
4668 // rescan the marked objects on the dirty cards in the modUnionTable. |
|
4669 // Even if this is at a synchronous collection, the initial marking |
|
4670 // may have been done during an asynchronous collection so there |
|
4671 // may be dirty bits in the mod-union table. |
|
4672 _collector->_modUnionTable.dirty_range_iterate_clear( |
|
4673 this_span, &greyRescanClosure); |
|
4674 _collector->_modUnionTable.verifyNoOneBitsInRange( |
|
4675 this_span.start(), |
|
4676 this_span.end()); |
|
4677 } |
|
4678 pst->all_tasks_completed(); // declare that i am done |
|
4679 } |
|
4680 |
|
4681 // . see if we can share work_queues with ParNew? XXX |
|
4682 void |
|
4683 CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) { |
|
4684 OopTaskQueue* work_q = work_queue(i); |
|
4685 NOT_PRODUCT(int num_steals = 0;) |
|
4686 oop obj_to_scan; |
|
4687 CMSBitMap* bm = &(_collector->_markBitMap); |
|
4688 |
|
4689 while (true) { |
|
4690 // Completely finish any left over work from (an) earlier round(s) |
|
4691 cl->trim_queue(0); |
|
4692 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
|
4693 (size_t)ParGCDesiredObjsFromOverflowList); |
|
4694 // Now check if there's any work in the overflow list |
|
4695 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, |
|
4696 // only affects the number of attempts made to get work from the |
|
4697 // overflow list and does not affect the number of workers. Just |
|
4698 // pass ParallelGCThreads so this behavior is unchanged. |
|
4699 if (_collector->par_take_from_overflow_list(num_from_overflow_list, |
|
4700 work_q, |
|
4701 ParallelGCThreads)) { |
|
4702 // found something in global overflow list; |
|
4703 // not yet ready to go stealing work from others. |
|
4704 // We'd like to assert(work_q->size() != 0, ...) |
|
4705 // because we just took work from the overflow list, |
|
4706 // but of course we can't since all of that could have |
|
4707 // been already stolen from us. |
|
4708 // "He giveth and He taketh away." |
|
4709 continue; |
|
4710 } |
|
4711 // Verify that we have no work before we resort to stealing |
|
4712 assert(work_q->size() == 0, "Have work, shouldn't steal"); |
|
4713 // Try to steal from other queues that have work |
|
4714 if (task_queues()->steal(i, /* reference */ obj_to_scan)) { |
|
4715 NOT_PRODUCT(num_steals++;) |
|
4716 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!"); |
|
4717 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); |
|
4718 // Do scanning work |
|
4719 obj_to_scan->oop_iterate(cl); |
|
4720 // Loop around, finish this work, and try to steal some more |
|
4721 } else if (terminator()->offer_termination()) { |
|
4722 break; // nirvana from the infinite cycle |
|
4723 } |
|
4724 } |
|
4725 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); |
|
4726 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), |
|
4727 "Else our work is not yet done"); |
|
4728 } |
|
4729 |
|
4730 // Record object boundaries in _eden_chunk_array by sampling the eden |
|
4731 // top in the slow-path eden object allocation code path and record |
|
4732 // the boundaries, if CMSEdenChunksRecordAlways is true. If |
|
4733 // CMSEdenChunksRecordAlways is false, we use the other asynchronous |
|
4734 // sampling in sample_eden() that activates during the part of the |
|
4735 // preclean phase. |
|
4736 void CMSCollector::sample_eden_chunk() { |
|
4737 if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) { |
|
4738 if (_eden_chunk_lock->try_lock()) { |
|
4739 // Record a sample. This is the critical section. The contents |
|
4740 // of the _eden_chunk_array have to be non-decreasing in the |
|
4741 // address order. |
|
4742 _eden_chunk_array[_eden_chunk_index] = *_top_addr; |
|
4743 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, |
|
4744 "Unexpected state of Eden"); |
|
4745 if (_eden_chunk_index == 0 || |
|
4746 ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) && |
|
4747 (pointer_delta(_eden_chunk_array[_eden_chunk_index], |
|
4748 _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) { |
|
4749 _eden_chunk_index++; // commit sample |
|
4750 } |
|
4751 _eden_chunk_lock->unlock(); |
|
4752 } |
|
4753 } |
|
4754 } |
|
4755 |
|
4756 // Return a thread-local PLAB recording array, as appropriate. |
|
4757 void* CMSCollector::get_data_recorder(int thr_num) { |
|
4758 if (_survivor_plab_array != NULL && |
|
4759 (CMSPLABRecordAlways || |
|
4760 (_collectorState > Marking && _collectorState < FinalMarking))) { |
|
4761 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds"); |
|
4762 ChunkArray* ca = &_survivor_plab_array[thr_num]; |
|
4763 ca->reset(); // clear it so that fresh data is recorded |
|
4764 return (void*) ca; |
|
4765 } else { |
|
4766 return NULL; |
|
4767 } |
|
4768 } |
|
4769 |
|
4770 // Reset all the thread-local PLAB recording arrays |
|
4771 void CMSCollector::reset_survivor_plab_arrays() { |
|
4772 for (uint i = 0; i < ParallelGCThreads; i++) { |
|
4773 _survivor_plab_array[i].reset(); |
|
4774 } |
|
4775 } |
|
4776 |
|
4777 // Merge the per-thread plab arrays into the global survivor chunk |
|
4778 // array which will provide the partitioning of the survivor space |
|
4779 // for CMS initial scan and rescan. |
|
4780 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv, |
|
4781 int no_of_gc_threads) { |
|
4782 assert(_survivor_plab_array != NULL, "Error"); |
|
4783 assert(_survivor_chunk_array != NULL, "Error"); |
|
4784 assert(_collectorState == FinalMarking || |
|
4785 (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error"); |
|
4786 for (int j = 0; j < no_of_gc_threads; j++) { |
|
4787 _cursor[j] = 0; |
|
4788 } |
|
4789 HeapWord* top = surv->top(); |
|
4790 size_t i; |
|
4791 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries |
|
4792 HeapWord* min_val = top; // Higher than any PLAB address |
|
4793 uint min_tid = 0; // position of min_val this round |
|
4794 for (int j = 0; j < no_of_gc_threads; j++) { |
|
4795 ChunkArray* cur_sca = &_survivor_plab_array[j]; |
|
4796 if (_cursor[j] == cur_sca->end()) { |
|
4797 continue; |
|
4798 } |
|
4799 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant"); |
|
4800 HeapWord* cur_val = cur_sca->nth(_cursor[j]); |
|
4801 assert(surv->used_region().contains(cur_val), "Out of bounds value"); |
|
4802 if (cur_val < min_val) { |
|
4803 min_tid = j; |
|
4804 min_val = cur_val; |
|
4805 } else { |
|
4806 assert(cur_val < top, "All recorded addresses should be less"); |
|
4807 } |
|
4808 } |
|
4809 // At this point min_val and min_tid are respectively |
|
4810 // the least address in _survivor_plab_array[j]->nth(_cursor[j]) |
|
4811 // and the thread (j) that witnesses that address. |
|
4812 // We record this address in the _survivor_chunk_array[i] |
|
4813 // and increment _cursor[min_tid] prior to the next round i. |
|
4814 if (min_val == top) { |
|
4815 break; |
|
4816 } |
|
4817 _survivor_chunk_array[i] = min_val; |
|
4818 _cursor[min_tid]++; |
|
4819 } |
|
4820 // We are all done; record the size of the _survivor_chunk_array |
|
4821 _survivor_chunk_index = i; // exclusive: [0, i) |
|
4822 log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i); |
|
4823 // Verify that we used up all the recorded entries |
|
4824 #ifdef ASSERT |
|
4825 size_t total = 0; |
|
4826 for (int j = 0; j < no_of_gc_threads; j++) { |
|
4827 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant"); |
|
4828 total += _cursor[j]; |
|
4829 } |
|
4830 assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); |
|
4831 // Check that the merged array is in sorted order |
|
4832 if (total > 0) { |
|
4833 for (size_t i = 0; i < total - 1; i++) { |
|
4834 log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", |
|
4835 i, p2i(_survivor_chunk_array[i])); |
|
4836 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], |
|
4837 "Not sorted"); |
|
4838 } |
|
4839 } |
|
4840 #endif // ASSERT |
|
4841 } |
|
4842 |
|
4843 // Set up the space's par_seq_tasks structure for work claiming |
|
4844 // for parallel initial scan and rescan of young gen. |
|
4845 // See ParRescanTask where this is currently used. |
|
4846 void |
|
4847 CMSCollector:: |
|
4848 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) { |
|
4849 assert(n_threads > 0, "Unexpected n_threads argument"); |
|
4850 |
|
4851 // Eden space |
|
4852 if (!_young_gen->eden()->is_empty()) { |
|
4853 SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks(); |
|
4854 assert(!pst->valid(), "Clobbering existing data?"); |
|
4855 // Each valid entry in [0, _eden_chunk_index) represents a task. |
|
4856 size_t n_tasks = _eden_chunk_index + 1; |
|
4857 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error"); |
|
4858 // Sets the condition for completion of the subtask (how many threads |
|
4859 // need to finish in order to be done). |
|
4860 pst->set_n_threads(n_threads); |
|
4861 pst->set_n_tasks((int)n_tasks); |
|
4862 } |
|
4863 |
|
4864 // Merge the survivor plab arrays into _survivor_chunk_array |
|
4865 if (_survivor_plab_array != NULL) { |
|
4866 merge_survivor_plab_arrays(_young_gen->from(), n_threads); |
|
4867 } else { |
|
4868 assert(_survivor_chunk_index == 0, "Error"); |
|
4869 } |
|
4870 |
|
4871 // To space |
|
4872 { |
|
4873 SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks(); |
|
4874 assert(!pst->valid(), "Clobbering existing data?"); |
|
4875 // Sets the condition for completion of the subtask (how many threads |
|
4876 // need to finish in order to be done). |
|
4877 pst->set_n_threads(n_threads); |
|
4878 pst->set_n_tasks(1); |
|
4879 assert(pst->valid(), "Error"); |
|
4880 } |
|
4881 |
|
4882 // From space |
|
4883 { |
|
4884 SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks(); |
|
4885 assert(!pst->valid(), "Clobbering existing data?"); |
|
4886 size_t n_tasks = _survivor_chunk_index + 1; |
|
4887 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error"); |
|
4888 // Sets the condition for completion of the subtask (how many threads |
|
4889 // need to finish in order to be done). |
|
4890 pst->set_n_threads(n_threads); |
|
4891 pst->set_n_tasks((int)n_tasks); |
|
4892 assert(pst->valid(), "Error"); |
|
4893 } |
|
4894 } |
|
4895 |
|
4896 // Parallel version of remark |
|
4897 void CMSCollector::do_remark_parallel() { |
|
4898 CMSHeap* heap = CMSHeap::heap(); |
|
4899 WorkGang* workers = heap->workers(); |
|
4900 assert(workers != NULL, "Need parallel worker threads."); |
|
4901 // Choose to use the number of GC workers most recently set |
|
4902 // into "active_workers". |
|
4903 uint n_workers = workers->active_workers(); |
|
4904 |
|
4905 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); |
|
4906 |
|
4907 StrongRootsScope srs(n_workers); |
|
4908 |
|
4909 CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs); |
|
4910 |
|
4911 // We won't be iterating over the cards in the card table updating |
|
4912 // the younger_gen cards, so we shouldn't call the following else |
|
4913 // the verification code as well as subsequent younger_refs_iterate |
|
4914 // code would get confused. XXX |
|
4915 // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel |
|
4916 |
|
4917 // The young gen rescan work will not be done as part of |
|
4918 // process_roots (which currently doesn't know how to |
|
4919 // parallelize such a scan), but rather will be broken up into |
|
4920 // a set of parallel tasks (via the sampling that the [abortable] |
|
4921 // preclean phase did of eden, plus the [two] tasks of |
|
4922 // scanning the [two] survivor spaces. Further fine-grain |
|
4923 // parallelization of the scanning of the survivor spaces |
|
4924 // themselves, and of precleaning of the young gen itself |
|
4925 // is deferred to the future. |
|
4926 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); |
|
4927 |
|
4928 // The dirty card rescan work is broken up into a "sequence" |
|
4929 // of parallel tasks (per constituent space) that are dynamically |
|
4930 // claimed by the parallel threads. |
|
4931 cms_space->initialize_sequential_subtasks_for_rescan(n_workers); |
|
4932 |
|
4933 // It turns out that even when we're using 1 thread, doing the work in a |
|
4934 // separate thread causes wide variance in run times. We can't help this |
|
4935 // in the multi-threaded case, but we special-case n=1 here to get |
|
4936 // repeatable measurements of the 1-thread overhead of the parallel code. |
|
4937 if (n_workers > 1) { |
|
4938 // Make refs discovery MT-safe, if it isn't already: it may not |
|
4939 // necessarily be so, since it's possible that we are doing |
|
4940 // ST marking. |
|
4941 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); |
|
4942 workers->run_task(&tsk); |
|
4943 } else { |
|
4944 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); |
|
4945 tsk.work(0); |
|
4946 } |
|
4947 |
|
4948 // restore, single-threaded for now, any preserved marks |
|
4949 // as a result of work_q overflow |
|
4950 restore_preserved_marks_if_any(); |
|
4951 } |
|
4952 |
|
4953 // Non-parallel version of remark |
|
4954 void CMSCollector::do_remark_non_parallel() { |
|
4955 ResourceMark rm; |
|
4956 HandleMark hm; |
|
4957 CMSHeap* heap = CMSHeap::heap(); |
|
4958 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); |
|
4959 |
|
4960 MarkRefsIntoAndScanClosure |
|
4961 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */, |
|
4962 &_markStack, this, |
|
4963 false /* should_yield */, false /* not precleaning */); |
|
4964 MarkFromDirtyCardsClosure |
|
4965 markFromDirtyCardsClosure(this, _span, |
|
4966 NULL, // space is set further below |
|
4967 &_markBitMap, &_markStack, &mrias_cl); |
|
4968 { |
|
4969 GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm); |
|
4970 // Iterate over the dirty cards, setting the corresponding bits in the |
|
4971 // mod union table. |
|
4972 { |
|
4973 ModUnionClosure modUnionClosure(&_modUnionTable); |
|
4974 _ct->dirty_card_iterate(_cmsGen->used_region(), |
|
4975 &modUnionClosure); |
|
4976 } |
|
4977 // Having transferred these marks into the modUnionTable, we just need |
|
4978 // to rescan the marked objects on the dirty cards in the modUnionTable. |
|
4979 // The initial marking may have been done during an asynchronous |
|
4980 // collection so there may be dirty bits in the mod-union table. |
|
4981 const int alignment = CardTable::card_size * BitsPerWord; |
|
4982 { |
|
4983 // ... First handle dirty cards in CMS gen |
|
4984 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace()); |
|
4985 MemRegion ur = _cmsGen->used_region(); |
|
4986 HeapWord* lb = ur.start(); |
|
4987 HeapWord* ub = align_up(ur.end(), alignment); |
|
4988 MemRegion cms_span(lb, ub); |
|
4989 _modUnionTable.dirty_range_iterate_clear(cms_span, |
|
4990 &markFromDirtyCardsClosure); |
|
4991 verify_work_stacks_empty(); |
|
4992 log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards()); |
|
4993 } |
|
4994 } |
|
4995 if (VerifyDuringGC && |
|
4996 CMSHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
4997 HandleMark hm; // Discard invalid handles created during verification |
|
4998 Universe::verify(); |
|
4999 } |
|
5000 { |
|
5001 GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm); |
|
5002 |
|
5003 verify_work_stacks_empty(); |
|
5004 |
|
5005 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
|
5006 StrongRootsScope srs(1); |
|
5007 |
|
5008 heap->cms_process_roots(&srs, |
|
5009 true, // young gen as roots |
|
5010 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
|
5011 should_unload_classes(), |
|
5012 &mrias_cl, |
|
5013 NULL); // The dirty klasses will be handled below |
|
5014 |
|
5015 assert(should_unload_classes() |
|
5016 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), |
|
5017 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); |
|
5018 } |
|
5019 |
|
5020 { |
|
5021 GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm); |
|
5022 |
|
5023 verify_work_stacks_empty(); |
|
5024 |
|
5025 // Scan all class loader data objects that might have been introduced |
|
5026 // during concurrent marking. |
|
5027 ResourceMark rm; |
|
5028 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds(); |
|
5029 for (int i = 0; i < array->length(); i++) { |
|
5030 Devirtualizer::do_cld(&mrias_cl, array->at(i)); |
|
5031 } |
|
5032 |
|
5033 // We don't need to keep track of new CLDs anymore. |
|
5034 ClassLoaderDataGraph::remember_new_clds(false); |
|
5035 |
|
5036 verify_work_stacks_empty(); |
|
5037 } |
|
5038 |
|
5039 // We might have added oops to ClassLoaderData::_handles during the |
|
5040 // concurrent marking phase. These oops do not point to newly allocated objects |
|
5041 // that are guaranteed to be kept alive. Hence, |
|
5042 // we do have to revisit the _handles block during the remark phase. |
|
5043 { |
|
5044 GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm); |
|
5045 |
|
5046 verify_work_stacks_empty(); |
|
5047 |
|
5048 RemarkCLDClosure remark_closure(&mrias_cl); |
|
5049 ClassLoaderDataGraph::cld_do(&remark_closure); |
|
5050 |
|
5051 verify_work_stacks_empty(); |
|
5052 } |
|
5053 |
|
5054 verify_work_stacks_empty(); |
|
5055 // Restore evacuated mark words, if any, used for overflow list links |
|
5056 restore_preserved_marks_if_any(); |
|
5057 |
|
5058 verify_overflow_empty(); |
|
5059 } |
|
5060 |
|
5061 //////////////////////////////////////////////////////// |
|
5062 // Parallel Reference Processing Task Proxy Class |
|
5063 //////////////////////////////////////////////////////// |
|
5064 class AbstractGangTaskWOopQueues : public AbstractGangTask { |
|
5065 OopTaskQueueSet* _queues; |
|
5066 TaskTerminator _terminator; |
|
5067 public: |
|
5068 AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) : |
|
5069 AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {} |
|
5070 ParallelTaskTerminator* terminator() { return _terminator.terminator(); } |
|
5071 OopTaskQueueSet* queues() { return _queues; } |
|
5072 }; |
|
5073 |
|
5074 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues { |
|
5075 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
|
5076 CMSCollector* _collector; |
|
5077 CMSBitMap* _mark_bit_map; |
|
5078 const MemRegion _span; |
|
5079 ProcessTask& _task; |
|
5080 |
|
5081 public: |
|
5082 CMSRefProcTaskProxy(ProcessTask& task, |
|
5083 CMSCollector* collector, |
|
5084 const MemRegion& span, |
|
5085 CMSBitMap* mark_bit_map, |
|
5086 AbstractWorkGang* workers, |
|
5087 OopTaskQueueSet* task_queues): |
|
5088 AbstractGangTaskWOopQueues("Process referents by policy in parallel", |
|
5089 task_queues, |
|
5090 workers->active_workers()), |
|
5091 _collector(collector), |
|
5092 _mark_bit_map(mark_bit_map), |
|
5093 _span(span), |
|
5094 _task(task) |
|
5095 { |
|
5096 assert(_collector->_span.equals(_span) && !_span.is_empty(), |
|
5097 "Inconsistency in _span"); |
|
5098 } |
|
5099 |
|
5100 OopTaskQueueSet* task_queues() { return queues(); } |
|
5101 |
|
5102 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } |
|
5103 |
|
5104 void do_work_steal(int i, |
|
5105 CMSParDrainMarkingStackClosure* drain, |
|
5106 CMSParKeepAliveClosure* keep_alive); |
|
5107 |
|
5108 virtual void work(uint worker_id); |
|
5109 }; |
|
5110 |
|
5111 void CMSRefProcTaskProxy::work(uint worker_id) { |
|
5112 ResourceMark rm; |
|
5113 HandleMark hm; |
|
5114 assert(_collector->_span.equals(_span), "Inconsistency in _span"); |
|
5115 CMSParKeepAliveClosure par_keep_alive(_collector, _span, |
|
5116 _mark_bit_map, |
|
5117 work_queue(worker_id)); |
|
5118 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, |
|
5119 _mark_bit_map, |
|
5120 work_queue(worker_id)); |
|
5121 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); |
|
5122 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack); |
|
5123 if (_task.marks_oops_alive()) { |
|
5124 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive); |
|
5125 } |
|
5126 assert(work_queue(worker_id)->size() == 0, "work_queue should be empty"); |
|
5127 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); |
|
5128 } |
|
5129 |
|
5130 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, |
|
5131 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): |
|
5132 _span(span), |
|
5133 _work_queue(work_queue), |
|
5134 _bit_map(bit_map), |
|
5135 _mark_and_push(collector, span, bit_map, work_queue), |
|
5136 _low_water_mark(MIN2((work_queue->max_elems()/4), |
|
5137 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))) |
|
5138 { } |
|
5139 |
|
5140 // . see if we can share work_queues with ParNew? XXX |
|
5141 void CMSRefProcTaskProxy::do_work_steal(int i, |
|
5142 CMSParDrainMarkingStackClosure* drain, |
|
5143 CMSParKeepAliveClosure* keep_alive) { |
|
5144 OopTaskQueue* work_q = work_queue(i); |
|
5145 NOT_PRODUCT(int num_steals = 0;) |
|
5146 oop obj_to_scan; |
|
5147 |
|
5148 while (true) { |
|
5149 // Completely finish any left over work from (an) earlier round(s) |
|
5150 drain->trim_queue(0); |
|
5151 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
|
5152 (size_t)ParGCDesiredObjsFromOverflowList); |
|
5153 // Now check if there's any work in the overflow list |
|
5154 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, |
|
5155 // only affects the number of attempts made to get work from the |
|
5156 // overflow list and does not affect the number of workers. Just |
|
5157 // pass ParallelGCThreads so this behavior is unchanged. |
|
5158 if (_collector->par_take_from_overflow_list(num_from_overflow_list, |
|
5159 work_q, |
|
5160 ParallelGCThreads)) { |
|
5161 // Found something in global overflow list; |
|
5162 // not yet ready to go stealing work from others. |
|
5163 // We'd like to assert(work_q->size() != 0, ...) |
|
5164 // because we just took work from the overflow list, |
|
5165 // but of course we can't, since all of that might have |
|
5166 // been already stolen from us. |
|
5167 continue; |
|
5168 } |
|
5169 // Verify that we have no work before we resort to stealing |
|
5170 assert(work_q->size() == 0, "Have work, shouldn't steal"); |
|
5171 // Try to steal from other queues that have work |
|
5172 if (task_queues()->steal(i, /* reference */ obj_to_scan)) { |
|
5173 NOT_PRODUCT(num_steals++;) |
|
5174 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!"); |
|
5175 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); |
|
5176 // Do scanning work |
|
5177 obj_to_scan->oop_iterate(keep_alive); |
|
5178 // Loop around, finish this work, and try to steal some more |
|
5179 } else if (terminator()->offer_termination()) { |
|
5180 break; // nirvana from the infinite cycle |
|
5181 } |
|
5182 } |
|
5183 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); |
|
5184 } |
|
5185 |
|
5186 void CMSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) { |
|
5187 CMSHeap* heap = CMSHeap::heap(); |
|
5188 WorkGang* workers = heap->workers(); |
|
5189 assert(workers != NULL, "Need parallel worker threads."); |
|
5190 assert(workers->active_workers() == ergo_workers, |
|
5191 "Ergonomically chosen workers (%u) must be equal to active workers (%u)", |
|
5192 ergo_workers, workers->active_workers()); |
|
5193 CMSRefProcTaskProxy rp_task(task, &_collector, |
|
5194 _collector.ref_processor_span(), |
|
5195 _collector.markBitMap(), |
|
5196 workers, _collector.task_queues()); |
|
5197 workers->run_task(&rp_task, workers->active_workers()); |
|
5198 } |
|
5199 |
|
5200 void CMSCollector::refProcessingWork() { |
|
5201 ResourceMark rm; |
|
5202 HandleMark hm; |
|
5203 |
|
5204 ReferenceProcessor* rp = ref_processor(); |
|
5205 assert(_span_based_discoverer.span().equals(_span), "Spans should be equal"); |
|
5206 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); |
|
5207 // Process weak references. |
|
5208 rp->setup_policy(false); |
|
5209 verify_work_stacks_empty(); |
|
5210 |
|
5211 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); |
|
5212 { |
|
5213 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm); |
|
5214 |
|
5215 // Setup keep_alive and complete closures. |
|
5216 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, |
|
5217 &_markStack, false /* !preclean */); |
|
5218 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, |
|
5219 _span, &_markBitMap, &_markStack, |
|
5220 &cmsKeepAliveClosure, false /* !preclean */); |
|
5221 |
|
5222 ReferenceProcessorStats stats; |
|
5223 if (rp->processing_is_mt()) { |
|
5224 // Set the degree of MT here. If the discovery is done MT, there |
|
5225 // may have been a different number of threads doing the discovery |
|
5226 // and a different number of discovered lists may have Ref objects. |
|
5227 // That is OK as long as the Reference lists are balanced (see |
|
5228 // balance_all_queues() and balance_queues()). |
|
5229 CMSHeap* heap = CMSHeap::heap(); |
|
5230 uint active_workers = ParallelGCThreads; |
|
5231 WorkGang* workers = heap->workers(); |
|
5232 if (workers != NULL) { |
|
5233 active_workers = workers->active_workers(); |
|
5234 // The expectation is that active_workers will have already |
|
5235 // been set to a reasonable value. If it has not been set, |
|
5236 // investigate. |
|
5237 assert(active_workers > 0, "Should have been set during scavenge"); |
|
5238 } |
|
5239 rp->set_active_mt_degree(active_workers); |
|
5240 CMSRefProcTaskExecutor task_executor(*this); |
|
5241 stats = rp->process_discovered_references(&_is_alive_closure, |
|
5242 &cmsKeepAliveClosure, |
|
5243 &cmsDrainMarkingStackClosure, |
|
5244 &task_executor, |
|
5245 &pt); |
|
5246 } else { |
|
5247 stats = rp->process_discovered_references(&_is_alive_closure, |
|
5248 &cmsKeepAliveClosure, |
|
5249 &cmsDrainMarkingStackClosure, |
|
5250 NULL, |
|
5251 &pt); |
|
5252 } |
|
5253 _gc_tracer_cm->report_gc_reference_stats(stats); |
|
5254 pt.print_all_references(); |
|
5255 } |
|
5256 |
|
5257 // This is the point where the entire marking should have completed. |
|
5258 verify_work_stacks_empty(); |
|
5259 |
|
5260 { |
|
5261 GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm); |
|
5262 WeakProcessor::weak_oops_do(&_is_alive_closure, &do_nothing_cl); |
|
5263 } |
|
5264 |
|
5265 if (should_unload_classes()) { |
|
5266 { |
|
5267 GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm); |
|
5268 |
|
5269 // Unload classes and purge the SystemDictionary. |
|
5270 bool purged_class = SystemDictionary::do_unloading(_gc_timer_cm); |
|
5271 |
|
5272 // Unload nmethods. |
|
5273 CodeCache::do_unloading(&_is_alive_closure, purged_class); |
|
5274 |
|
5275 // Prune dead klasses from subklass/sibling/implementor lists. |
|
5276 Klass::clean_weak_klass_links(purged_class); |
|
5277 |
|
5278 // Clean JVMCI metadata handles. |
|
5279 JVMCI_ONLY(JVMCI::do_unloading(purged_class)); |
|
5280 } |
|
5281 } |
|
5282 |
|
5283 // Restore any preserved marks as a result of mark stack or |
|
5284 // work queue overflow |
|
5285 restore_preserved_marks_if_any(); // done single-threaded for now |
|
5286 |
|
5287 rp->set_enqueuing_is_done(true); |
|
5288 rp->verify_no_references_recorded(); |
|
5289 } |
|
5290 |
|
5291 #ifndef PRODUCT |
|
5292 void CMSCollector::check_correct_thread_executing() { |
|
5293 Thread* t = Thread::current(); |
|
5294 // Only the VM thread or the CMS thread should be here. |
|
5295 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(), |
|
5296 "Unexpected thread type"); |
|
5297 // If this is the vm thread, the foreground process |
|
5298 // should not be waiting. Note that _foregroundGCIsActive is |
|
5299 // true while the foreground collector is waiting. |
|
5300 if (_foregroundGCShouldWait) { |
|
5301 // We cannot be the VM thread |
|
5302 assert(t->is_ConcurrentGC_thread(), |
|
5303 "Should be CMS thread"); |
|
5304 } else { |
|
5305 // We can be the CMS thread only if we are in a stop-world |
|
5306 // phase of CMS collection. |
|
5307 if (t->is_ConcurrentGC_thread()) { |
|
5308 assert(_collectorState == InitialMarking || |
|
5309 _collectorState == FinalMarking, |
|
5310 "Should be a stop-world phase"); |
|
5311 // The CMS thread should be holding the CMS_token. |
|
5312 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
5313 "Potential interference with concurrently " |
|
5314 "executing VM thread"); |
|
5315 } |
|
5316 } |
|
5317 } |
|
5318 #endif |
|
5319 |
|
5320 void CMSCollector::sweep() { |
|
5321 assert(_collectorState == Sweeping, "just checking"); |
|
5322 check_correct_thread_executing(); |
|
5323 verify_work_stacks_empty(); |
|
5324 verify_overflow_empty(); |
|
5325 increment_sweep_count(); |
|
5326 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); |
|
5327 |
|
5328 _inter_sweep_timer.stop(); |
|
5329 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); |
|
5330 |
|
5331 assert(!_intra_sweep_timer.is_active(), "Should not be active"); |
|
5332 _intra_sweep_timer.reset(); |
|
5333 _intra_sweep_timer.start(); |
|
5334 { |
|
5335 GCTraceCPUTime tcpu; |
|
5336 CMSPhaseAccounting pa(this, "Concurrent Sweep"); |
|
5337 // First sweep the old gen |
|
5338 { |
|
5339 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), |
|
5340 bitMapLock()); |
|
5341 sweepWork(_cmsGen); |
|
5342 } |
|
5343 |
|
5344 // Update Universe::_heap_*_at_gc figures. |
|
5345 // We need all the free list locks to make the abstract state |
|
5346 // transition from Sweeping to Resetting. See detailed note |
|
5347 // further below. |
|
5348 { |
|
5349 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock()); |
|
5350 |
|
5351 // Update heap occupancy information which is used as |
|
5352 // input to soft ref clearing policy at the next gc. |
|
5353 Universe::update_heap_info_at_gc(); |
|
5354 |
|
5355 // recalculate CMS used space after CMS collection |
|
5356 _cmsGen->cmsSpace()->recalculate_used_stable(); |
|
5357 |
|
5358 _collectorState = Resizing; |
|
5359 } |
|
5360 } |
|
5361 verify_work_stacks_empty(); |
|
5362 verify_overflow_empty(); |
|
5363 |
|
5364 if (should_unload_classes()) { |
|
5365 // Delay purge to the beginning of the next safepoint. Metaspace::contains |
|
5366 // requires that the virtual spaces are stable and not deleted. |
|
5367 ClassLoaderDataGraph::set_should_purge(true); |
|
5368 } |
|
5369 |
|
5370 _intra_sweep_timer.stop(); |
|
5371 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds()); |
|
5372 |
|
5373 _inter_sweep_timer.reset(); |
|
5374 _inter_sweep_timer.start(); |
|
5375 |
|
5376 // We need to use a monotonically non-decreasing time in ms |
|
5377 // or we will see time-warp warnings and os::javaTimeMillis() |
|
5378 // does not guarantee monotonicity. |
|
5379 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
|
5380 update_time_of_last_gc(now); |
|
5381 |
|
5382 // NOTE on abstract state transitions: |
|
5383 // Mutators allocate-live and/or mark the mod-union table dirty |
|
5384 // based on the state of the collection. The former is done in |
|
5385 // the interval [Marking, Sweeping] and the latter in the interval |
|
5386 // [Marking, Sweeping). Thus the transitions into the Marking state |
|
5387 // and out of the Sweeping state must be synchronously visible |
|
5388 // globally to the mutators. |
|
5389 // The transition into the Marking state happens with the world |
|
5390 // stopped so the mutators will globally see it. Sweeping is |
|
5391 // done asynchronously by the background collector so the transition |
|
5392 // from the Sweeping state to the Resizing state must be done |
|
5393 // under the freelistLock (as is the check for whether to |
|
5394 // allocate-live and whether to dirty the mod-union table). |
|
5395 assert(_collectorState == Resizing, "Change of collector state to" |
|
5396 " Resizing must be done under the freelistLocks (plural)"); |
|
5397 |
|
5398 // Now that sweeping has been completed, we clear |
|
5399 // the incremental_collection_failed flag, |
|
5400 // thus inviting a younger gen collection to promote into |
|
5401 // this generation. If such a promotion may still fail, |
|
5402 // the flag will be set again when a young collection is |
|
5403 // attempted. |
|
5404 CMSHeap* heap = CMSHeap::heap(); |
|
5405 heap->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up |
|
5406 heap->update_full_collections_completed(_collection_count_start); |
|
5407 } |
|
5408 |
|
5409 // FIX ME!!! Looks like this belongs in CFLSpace, with |
|
5410 // CMSGen merely delegating to it. |
|
5411 void ConcurrentMarkSweepGeneration::setNearLargestChunk() { |
|
5412 double nearLargestPercent = FLSLargestBlockCoalesceProximity; |
|
5413 HeapWord* minAddr = _cmsSpace->bottom(); |
|
5414 HeapWord* largestAddr = |
|
5415 (HeapWord*) _cmsSpace->dictionary()->find_largest_dict(); |
|
5416 if (largestAddr == NULL) { |
|
5417 // The dictionary appears to be empty. In this case |
|
5418 // try to coalesce at the end of the heap. |
|
5419 largestAddr = _cmsSpace->end(); |
|
5420 } |
|
5421 size_t largestOffset = pointer_delta(largestAddr, minAddr); |
|
5422 size_t nearLargestOffset = |
|
5423 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; |
|
5424 log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT, |
|
5425 p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset)); |
|
5426 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); |
|
5427 } |
|
5428 |
|
5429 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { |
|
5430 return addr >= _cmsSpace->nearLargestChunk(); |
|
5431 } |
|
5432 |
|
5433 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() { |
|
5434 return _cmsSpace->find_chunk_at_end(); |
|
5435 } |
|
5436 |
|
5437 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation, |
|
5438 bool full) { |
|
5439 // If the young generation has been collected, gather any statistics |
|
5440 // that are of interest at this point. |
|
5441 bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation); |
|
5442 if (!full && current_is_young) { |
|
5443 // Gather statistics on the young generation collection. |
|
5444 collector()->stats().record_gc0_end(used()); |
|
5445 } |
|
5446 _cmsSpace->recalculate_used_stable(); |
|
5447 } |
|
5448 |
|
5449 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) { |
|
5450 // We iterate over the space(s) underlying this generation, |
|
5451 // checking the mark bit map to see if the bits corresponding |
|
5452 // to specific blocks are marked or not. Blocks that are |
|
5453 // marked are live and are not swept up. All remaining blocks |
|
5454 // are swept up, with coalescing on-the-fly as we sweep up |
|
5455 // contiguous free and/or garbage blocks: |
|
5456 // We need to ensure that the sweeper synchronizes with allocators |
|
5457 // and stop-the-world collectors. In particular, the following |
|
5458 // locks are used: |
|
5459 // . CMS token: if this is held, a stop the world collection cannot occur |
|
5460 // . freelistLock: if this is held no allocation can occur from this |
|
5461 // generation by another thread |
|
5462 // . bitMapLock: if this is held, no other thread can access or update |
|
5463 // |
|
5464 |
|
5465 // Note that we need to hold the freelistLock if we use |
|
5466 // block iterate below; else the iterator might go awry if |
|
5467 // a mutator (or promotion) causes block contents to change |
|
5468 // (for instance if the allocator divvies up a block). |
|
5469 // If we hold the free list lock, for all practical purposes |
|
5470 // young generation GC's can't occur (they'll usually need to |
|
5471 // promote), so we might as well prevent all young generation |
|
5472 // GC's while we do a sweeping step. For the same reason, we might |
|
5473 // as well take the bit map lock for the entire duration |
|
5474 |
|
5475 // check that we hold the requisite locks |
|
5476 assert(have_cms_token(), "Should hold cms token"); |
|
5477 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); |
|
5478 assert_lock_strong(old_gen->freelistLock()); |
|
5479 assert_lock_strong(bitMapLock()); |
|
5480 |
|
5481 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); |
|
5482 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); |
|
5483 old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
|
5484 _inter_sweep_estimate.padded_average(), |
|
5485 _intra_sweep_estimate.padded_average()); |
|
5486 old_gen->setNearLargestChunk(); |
|
5487 |
|
5488 { |
|
5489 SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield); |
|
5490 old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure); |
|
5491 // We need to free-up/coalesce garbage/blocks from a |
|
5492 // co-terminal free run. This is done in the SweepClosure |
|
5493 // destructor; so, do not remove this scope, else the |
|
5494 // end-of-sweep-census below will be off by a little bit. |
|
5495 } |
|
5496 old_gen->cmsSpace()->sweep_completed(); |
|
5497 old_gen->cmsSpace()->endSweepFLCensus(sweep_count()); |
|
5498 if (should_unload_classes()) { // unloaded classes this cycle, |
|
5499 _concurrent_cycles_since_last_unload = 0; // ... reset count |
|
5500 } else { // did not unload classes, |
|
5501 _concurrent_cycles_since_last_unload++; // ... increment count |
|
5502 } |
|
5503 } |
|
5504 |
|
5505 // Reset CMS data structures (for now just the marking bit map) |
|
5506 // preparatory for the next cycle. |
|
5507 void CMSCollector::reset_concurrent() { |
|
5508 CMSTokenSyncWithLocks ts(true, bitMapLock()); |
|
5509 |
|
5510 // If the state is not "Resetting", the foreground thread |
|
5511 // has done a collection and the resetting. |
|
5512 if (_collectorState != Resetting) { |
|
5513 assert(_collectorState == Idling, "The state should only change" |
|
5514 " because the foreground collector has finished the collection"); |
|
5515 return; |
|
5516 } |
|
5517 |
|
5518 { |
|
5519 // Clear the mark bitmap (no grey objects to start with) |
|
5520 // for the next cycle. |
|
5521 GCTraceCPUTime tcpu; |
|
5522 CMSPhaseAccounting cmspa(this, "Concurrent Reset"); |
|
5523 |
|
5524 HeapWord* curAddr = _markBitMap.startWord(); |
|
5525 while (curAddr < _markBitMap.endWord()) { |
|
5526 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); |
|
5527 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); |
|
5528 _markBitMap.clear_large_range(chunk); |
|
5529 if (ConcurrentMarkSweepThread::should_yield() && |
|
5530 !foregroundGCIsActive() && |
|
5531 CMSYield) { |
|
5532 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
5533 "CMS thread should hold CMS token"); |
|
5534 assert_lock_strong(bitMapLock()); |
|
5535 bitMapLock()->unlock(); |
|
5536 ConcurrentMarkSweepThread::desynchronize(true); |
|
5537 stopTimer(); |
|
5538 incrementYields(); |
|
5539 |
|
5540 // See the comment in coordinator_yield() |
|
5541 for (unsigned i = 0; i < CMSYieldSleepCount && |
|
5542 ConcurrentMarkSweepThread::should_yield() && |
|
5543 !CMSCollector::foregroundGCIsActive(); ++i) { |
|
5544 os::naked_short_sleep(1); |
|
5545 } |
|
5546 |
|
5547 ConcurrentMarkSweepThread::synchronize(true); |
|
5548 bitMapLock()->lock_without_safepoint_check(); |
|
5549 startTimer(); |
|
5550 } |
|
5551 curAddr = chunk.end(); |
|
5552 } |
|
5553 // A successful mostly concurrent collection has been done. |
|
5554 // Because only the full (i.e., concurrent mode failure) collections |
|
5555 // are being measured for gc overhead limits, clean the "near" flag |
|
5556 // and count. |
|
5557 size_policy()->reset_gc_overhead_limit_count(); |
|
5558 _collectorState = Idling; |
|
5559 } |
|
5560 |
|
5561 register_gc_end(); |
|
5562 } |
|
5563 |
|
5564 // Same as above but for STW paths |
|
5565 void CMSCollector::reset_stw() { |
|
5566 // already have the lock |
|
5567 assert(_collectorState == Resetting, "just checking"); |
|
5568 assert_lock_strong(bitMapLock()); |
|
5569 GCIdMark gc_id_mark(_cmsThread->gc_id()); |
|
5570 _markBitMap.clear_all(); |
|
5571 _collectorState = Idling; |
|
5572 register_gc_end(); |
|
5573 } |
|
5574 |
|
5575 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { |
|
5576 GCTraceCPUTime tcpu; |
|
5577 TraceCollectorStats tcs_cgc(cgc_counters()); |
|
5578 |
|
5579 switch (op) { |
|
5580 case CMS_op_checkpointRootsInitial: { |
|
5581 GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true); |
|
5582 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT); |
|
5583 checkpointRootsInitial(); |
|
5584 break; |
|
5585 } |
|
5586 case CMS_op_checkpointRootsFinal: { |
|
5587 GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true); |
|
5588 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT); |
|
5589 checkpointRootsFinal(); |
|
5590 break; |
|
5591 } |
|
5592 default: |
|
5593 fatal("No such CMS_op"); |
|
5594 } |
|
5595 } |
|
5596 |
|
5597 #ifndef PRODUCT |
|
5598 size_t const CMSCollector::skip_header_HeapWords() { |
|
5599 return FreeChunk::header_size(); |
|
5600 } |
|
5601 |
|
5602 // Try and collect here conditions that should hold when |
|
5603 // CMS thread is exiting. The idea is that the foreground GC |
|
5604 // thread should not be blocked if it wants to terminate |
|
5605 // the CMS thread and yet continue to run the VM for a while |
|
5606 // after that. |
|
5607 void CMSCollector::verify_ok_to_terminate() const { |
|
5608 assert(Thread::current()->is_ConcurrentGC_thread(), |
|
5609 "should be called by CMS thread"); |
|
5610 assert(!_foregroundGCShouldWait, "should be false"); |
|
5611 // We could check here that all the various low-level locks |
|
5612 // are not held by the CMS thread, but that is overkill; see |
|
5613 // also CMSThread::verify_ok_to_terminate() where the CGC_lock |
|
5614 // is checked. |
|
5615 } |
|
5616 #endif |
|
5617 |
|
5618 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { |
|
5619 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), |
|
5620 "missing Printezis mark?"); |
|
5621 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); |
|
5622 size_t size = pointer_delta(nextOneAddr + 1, addr); |
|
5623 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), |
|
5624 "alignment problem"); |
|
5625 assert(size >= 3, "Necessary for Printezis marks to work"); |
|
5626 return size; |
|
5627 } |
|
5628 |
|
5629 // A variant of the above (block_size_using_printezis_bits()) except |
|
5630 // that we return 0 if the P-bits are not yet set. |
|
5631 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const { |
|
5632 if (_markBitMap.isMarked(addr + 1)) { |
|
5633 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects"); |
|
5634 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); |
|
5635 size_t size = pointer_delta(nextOneAddr + 1, addr); |
|
5636 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), |
|
5637 "alignment problem"); |
|
5638 assert(size >= 3, "Necessary for Printezis marks to work"); |
|
5639 return size; |
|
5640 } |
|
5641 return 0; |
|
5642 } |
|
5643 |
|
5644 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { |
|
5645 size_t sz = 0; |
|
5646 oop p = (oop)addr; |
|
5647 if (p->klass_or_null_acquire() != NULL) { |
|
5648 sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); |
|
5649 } else { |
|
5650 sz = block_size_using_printezis_bits(addr); |
|
5651 } |
|
5652 assert(sz > 0, "size must be nonzero"); |
|
5653 HeapWord* next_block = addr + sz; |
|
5654 HeapWord* next_card = align_up(next_block, CardTable::card_size); |
|
5655 assert(align_down((uintptr_t)addr, CardTable::card_size) < |
|
5656 align_down((uintptr_t)next_card, CardTable::card_size), |
|
5657 "must be different cards"); |
|
5658 return next_card; |
|
5659 } |
|
5660 |
|
5661 |
|
5662 // CMS Bit Map Wrapper ///////////////////////////////////////// |
|
5663 |
|
5664 // Construct a CMS bit map infrastructure, but don't create the |
|
5665 // bit vector itself. That is done by a separate call CMSBitMap::allocate() |
|
5666 // further below. |
|
5667 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): |
|
5668 _shifter(shifter), |
|
5669 _bm(), |
|
5670 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true, |
|
5671 Monitor::_safepoint_check_never) : NULL) |
|
5672 { |
|
5673 _bmStartWord = 0; |
|
5674 _bmWordSize = 0; |
|
5675 } |
|
5676 |
|
5677 bool CMSBitMap::allocate(MemRegion mr) { |
|
5678 _bmStartWord = mr.start(); |
|
5679 _bmWordSize = mr.word_size(); |
|
5680 ReservedSpace brs(ReservedSpace::allocation_align_size_up( |
|
5681 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); |
|
5682 if (!brs.is_reserved()) { |
|
5683 log_warning(gc)("CMS bit map allocation failure"); |
|
5684 return false; |
|
5685 } |
|
5686 // For now we'll just commit all of the bit map up front. |
|
5687 // Later on we'll try to be more parsimonious with swap. |
|
5688 if (!_virtual_space.initialize(brs, brs.size())) { |
|
5689 log_warning(gc)("CMS bit map backing store failure"); |
|
5690 return false; |
|
5691 } |
|
5692 assert(_virtual_space.committed_size() == brs.size(), |
|
5693 "didn't reserve backing store for all of CMS bit map?"); |
|
5694 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= |
|
5695 _bmWordSize, "inconsistency in bit map sizing"); |
|
5696 _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter); |
|
5697 |
|
5698 // bm.clear(); // can we rely on getting zero'd memory? verify below |
|
5699 assert(isAllClear(), |
|
5700 "Expected zero'd memory from ReservedSpace constructor"); |
|
5701 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()), |
|
5702 "consistency check"); |
|
5703 return true; |
|
5704 } |
|
5705 |
|
5706 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) { |
|
5707 HeapWord *next_addr, *end_addr, *last_addr; |
|
5708 assert_locked(); |
|
5709 assert(covers(mr), "out-of-range error"); |
|
5710 // XXX assert that start and end are appropriately aligned |
|
5711 for (next_addr = mr.start(), end_addr = mr.end(); |
|
5712 next_addr < end_addr; next_addr = last_addr) { |
|
5713 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr); |
|
5714 last_addr = dirty_region.end(); |
|
5715 if (!dirty_region.is_empty()) { |
|
5716 cl->do_MemRegion(dirty_region); |
|
5717 } else { |
|
5718 assert(last_addr == end_addr, "program logic"); |
|
5719 return; |
|
5720 } |
|
5721 } |
|
5722 } |
|
5723 |
|
5724 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const { |
|
5725 _bm.print_on_error(st, prefix); |
|
5726 } |
|
5727 |
|
5728 #ifndef PRODUCT |
|
5729 void CMSBitMap::assert_locked() const { |
|
5730 CMSLockVerifier::assert_locked(lock()); |
|
5731 } |
|
5732 |
|
5733 bool CMSBitMap::covers(MemRegion mr) const { |
|
5734 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); |
|
5735 assert((size_t)_bm.size() == (_bmWordSize >> _shifter), |
|
5736 "size inconsistency"); |
|
5737 return (mr.start() >= _bmStartWord) && |
|
5738 (mr.end() <= endWord()); |
|
5739 } |
|
5740 |
|
5741 bool CMSBitMap::covers(HeapWord* start, size_t size) const { |
|
5742 return (start >= _bmStartWord && (start + size) <= endWord()); |
|
5743 } |
|
5744 |
|
5745 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) { |
|
5746 // verify that there are no 1 bits in the interval [left, right) |
|
5747 FalseBitMapClosure falseBitMapClosure; |
|
5748 iterate(&falseBitMapClosure, left, right); |
|
5749 } |
|
5750 |
|
5751 void CMSBitMap::region_invariant(MemRegion mr) |
|
5752 { |
|
5753 assert_locked(); |
|
5754 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); |
|
5755 assert(!mr.is_empty(), "unexpected empty region"); |
|
5756 assert(covers(mr), "mr should be covered by bit map"); |
|
5757 // convert address range into offset range |
|
5758 size_t start_ofs = heapWordToOffset(mr.start()); |
|
5759 // Make sure that end() is appropriately aligned |
|
5760 assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))), |
|
5761 "Misaligned mr.end()"); |
|
5762 size_t end_ofs = heapWordToOffset(mr.end()); |
|
5763 assert(end_ofs > start_ofs, "Should mark at least one bit"); |
|
5764 } |
|
5765 |
|
5766 #endif |
|
5767 |
|
5768 bool CMSMarkStack::allocate(size_t size) { |
|
5769 // allocate a stack of the requisite depth |
|
5770 ReservedSpace rs(ReservedSpace::allocation_align_size_up( |
|
5771 size * sizeof(oop))); |
|
5772 if (!rs.is_reserved()) { |
|
5773 log_warning(gc)("CMSMarkStack allocation failure"); |
|
5774 return false; |
|
5775 } |
|
5776 if (!_virtual_space.initialize(rs, rs.size())) { |
|
5777 log_warning(gc)("CMSMarkStack backing store failure"); |
|
5778 return false; |
|
5779 } |
|
5780 assert(_virtual_space.committed_size() == rs.size(), |
|
5781 "didn't reserve backing store for all of CMS stack?"); |
|
5782 _base = (oop*)(_virtual_space.low()); |
|
5783 _index = 0; |
|
5784 _capacity = size; |
|
5785 NOT_PRODUCT(_max_depth = 0); |
|
5786 return true; |
|
5787 } |
|
5788 |
|
5789 // XXX FIX ME !!! In the MT case we come in here holding a |
|
5790 // leaf lock. For printing we need to take a further lock |
|
5791 // which has lower rank. We need to recalibrate the two |
|
5792 // lock-ranks involved in order to be able to print the |
|
5793 // messages below. (Or defer the printing to the caller. |
|
5794 // For now we take the expedient path of just disabling the |
|
5795 // messages for the problematic case.) |
|
5796 void CMSMarkStack::expand() { |
|
5797 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); |
|
5798 if (_capacity == MarkStackSizeMax) { |
|
5799 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) { |
|
5800 // We print a warning message only once per CMS cycle. |
|
5801 log_debug(gc)(" (benign) Hit CMSMarkStack max size limit"); |
|
5802 } |
|
5803 return; |
|
5804 } |
|
5805 // Double capacity if possible |
|
5806 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax); |
|
5807 // Do not give up existing stack until we have managed to |
|
5808 // get the double capacity that we desired. |
|
5809 ReservedSpace rs(ReservedSpace::allocation_align_size_up( |
|
5810 new_capacity * sizeof(oop))); |
|
5811 if (rs.is_reserved()) { |
|
5812 // Release the backing store associated with old stack |
|
5813 _virtual_space.release(); |
|
5814 // Reinitialize virtual space for new stack |
|
5815 if (!_virtual_space.initialize(rs, rs.size())) { |
|
5816 fatal("Not enough swap for expanded marking stack"); |
|
5817 } |
|
5818 _base = (oop*)(_virtual_space.low()); |
|
5819 _index = 0; |
|
5820 _capacity = new_capacity; |
|
5821 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) { |
|
5822 // Failed to double capacity, continue; |
|
5823 // we print a detail message only once per CMS cycle. |
|
5824 log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K", |
|
5825 _capacity / K, new_capacity / K); |
|
5826 } |
|
5827 } |
|
5828 |
|
5829 |
|
5830 // Closures |
|
5831 // XXX: there seems to be a lot of code duplication here; |
|
5832 // should refactor and consolidate common code. |
|
5833 |
|
5834 // This closure is used to mark refs into the CMS generation in |
|
5835 // the CMS bit map. Called at the first checkpoint. This closure |
|
5836 // assumes that we do not need to re-mark dirty cards; if the CMS |
|
5837 // generation on which this is used is not an oldest |
|
5838 // generation then this will lose younger_gen cards! |
|
5839 |
|
5840 MarkRefsIntoClosure::MarkRefsIntoClosure( |
|
5841 MemRegion span, CMSBitMap* bitMap): |
|
5842 _span(span), |
|
5843 _bitMap(bitMap) |
|
5844 { |
|
5845 assert(ref_discoverer() == NULL, "deliberately left NULL"); |
|
5846 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); |
|
5847 } |
|
5848 |
|
5849 void MarkRefsIntoClosure::do_oop(oop obj) { |
|
5850 // if p points into _span, then mark corresponding bit in _markBitMap |
|
5851 assert(oopDesc::is_oop(obj), "expected an oop"); |
|
5852 HeapWord* addr = (HeapWord*)obj; |
|
5853 if (_span.contains(addr)) { |
|
5854 // this should be made more efficient |
|
5855 _bitMap->mark(addr); |
|
5856 } |
|
5857 } |
|
5858 |
|
5859 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure( |
|
5860 MemRegion span, CMSBitMap* bitMap): |
|
5861 _span(span), |
|
5862 _bitMap(bitMap) |
|
5863 { |
|
5864 assert(ref_discoverer() == NULL, "deliberately left NULL"); |
|
5865 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); |
|
5866 } |
|
5867 |
|
5868 void ParMarkRefsIntoClosure::do_oop(oop obj) { |
|
5869 // if p points into _span, then mark corresponding bit in _markBitMap |
|
5870 assert(oopDesc::is_oop(obj), "expected an oop"); |
|
5871 HeapWord* addr = (HeapWord*)obj; |
|
5872 if (_span.contains(addr)) { |
|
5873 // this should be made more efficient |
|
5874 _bitMap->par_mark(addr); |
|
5875 } |
|
5876 } |
|
5877 |
|
5878 // A variant of the above, used for CMS marking verification. |
|
5879 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( |
|
5880 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm): |
|
5881 _span(span), |
|
5882 _verification_bm(verification_bm), |
|
5883 _cms_bm(cms_bm) |
|
5884 { |
|
5885 assert(ref_discoverer() == NULL, "deliberately left NULL"); |
|
5886 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); |
|
5887 } |
|
5888 |
|
5889 void MarkRefsIntoVerifyClosure::do_oop(oop obj) { |
|
5890 // if p points into _span, then mark corresponding bit in _markBitMap |
|
5891 assert(oopDesc::is_oop(obj), "expected an oop"); |
|
5892 HeapWord* addr = (HeapWord*)obj; |
|
5893 if (_span.contains(addr)) { |
|
5894 _verification_bm->mark(addr); |
|
5895 if (!_cms_bm->isMarked(addr)) { |
|
5896 Log(gc, verify) log; |
|
5897 ResourceMark rm; |
|
5898 LogStream ls(log.error()); |
|
5899 oop(addr)->print_on(&ls); |
|
5900 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); |
|
5901 fatal("... aborting"); |
|
5902 } |
|
5903 } |
|
5904 } |
|
5905 |
|
5906 ////////////////////////////////////////////////// |
|
5907 // MarkRefsIntoAndScanClosure |
|
5908 ////////////////////////////////////////////////// |
|
5909 |
|
5910 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span, |
|
5911 ReferenceDiscoverer* rd, |
|
5912 CMSBitMap* bit_map, |
|
5913 CMSBitMap* mod_union_table, |
|
5914 CMSMarkStack* mark_stack, |
|
5915 CMSCollector* collector, |
|
5916 bool should_yield, |
|
5917 bool concurrent_precleaning): |
|
5918 _span(span), |
|
5919 _bit_map(bit_map), |
|
5920 _mark_stack(mark_stack), |
|
5921 _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table, |
|
5922 mark_stack, concurrent_precleaning), |
|
5923 _collector(collector), |
|
5924 _freelistLock(NULL), |
|
5925 _yield(should_yield), |
|
5926 _concurrent_precleaning(concurrent_precleaning) |
|
5927 { |
|
5928 // FIXME: Should initialize in base class constructor. |
|
5929 assert(rd != NULL, "ref_discoverer shouldn't be NULL"); |
|
5930 set_ref_discoverer_internal(rd); |
|
5931 } |
|
5932 |
|
5933 // This closure is used to mark refs into the CMS generation at the |
|
5934 // second (final) checkpoint, and to scan and transitively follow |
|
5935 // the unmarked oops. It is also used during the concurrent precleaning |
|
5936 // phase while scanning objects on dirty cards in the CMS generation. |
|
5937 // The marks are made in the marking bit map and the marking stack is |
|
5938 // used for keeping the (newly) grey objects during the scan. |
|
5939 // The parallel version (Par_...) appears further below. |
|
5940 void MarkRefsIntoAndScanClosure::do_oop(oop obj) { |
|
5941 if (obj != NULL) { |
|
5942 assert(oopDesc::is_oop(obj), "expected an oop"); |
|
5943 HeapWord* addr = (HeapWord*)obj; |
|
5944 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); |
|
5945 assert(_collector->overflow_list_is_empty(), |
|
5946 "overflow list should be empty"); |
|
5947 if (_span.contains(addr) && |
|
5948 !_bit_map->isMarked(addr)) { |
|
5949 // mark bit map (object is now grey) |
|
5950 _bit_map->mark(addr); |
|
5951 // push on marking stack (stack should be empty), and drain the |
|
5952 // stack by applying this closure to the oops in the oops popped |
|
5953 // from the stack (i.e. blacken the grey objects) |
|
5954 bool res = _mark_stack->push(obj); |
|
5955 assert(res, "Should have space to push on empty stack"); |
|
5956 do { |
|
5957 oop new_oop = _mark_stack->pop(); |
|
5958 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop"); |
|
5959 assert(_bit_map->isMarked((HeapWord*)new_oop), |
|
5960 "only grey objects on this stack"); |
|
5961 // iterate over the oops in this oop, marking and pushing |
|
5962 // the ones in CMS heap (i.e. in _span). |
|
5963 new_oop->oop_iterate(&_pushAndMarkClosure); |
|
5964 // check if it's time to yield |
|
5965 do_yield_check(); |
|
5966 } while (!_mark_stack->isEmpty() || |
|
5967 (!_concurrent_precleaning && take_from_overflow_list())); |
|
5968 // if marking stack is empty, and we are not doing this |
|
5969 // during precleaning, then check the overflow list |
|
5970 } |
|
5971 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); |
|
5972 assert(_collector->overflow_list_is_empty(), |
|
5973 "overflow list was drained above"); |
|
5974 |
|
5975 assert(_collector->no_preserved_marks(), |
|
5976 "All preserved marks should have been restored above"); |
|
5977 } |
|
5978 } |
|
5979 |
|
5980 void MarkRefsIntoAndScanClosure::do_yield_work() { |
|
5981 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
5982 "CMS thread should hold CMS token"); |
|
5983 assert_lock_strong(_freelistLock); |
|
5984 assert_lock_strong(_bit_map->lock()); |
|
5985 // relinquish the free_list_lock and bitMaplock() |
|
5986 _bit_map->lock()->unlock(); |
|
5987 _freelistLock->unlock(); |
|
5988 ConcurrentMarkSweepThread::desynchronize(true); |
|
5989 _collector->stopTimer(); |
|
5990 _collector->incrementYields(); |
|
5991 |
|
5992 // See the comment in coordinator_yield() |
|
5993 for (unsigned i = 0; |
|
5994 i < CMSYieldSleepCount && |
|
5995 ConcurrentMarkSweepThread::should_yield() && |
|
5996 !CMSCollector::foregroundGCIsActive(); |
|
5997 ++i) { |
|
5998 os::naked_short_sleep(1); |
|
5999 } |
|
6000 |
|
6001 ConcurrentMarkSweepThread::synchronize(true); |
|
6002 _freelistLock->lock_without_safepoint_check(); |
|
6003 _bit_map->lock()->lock_without_safepoint_check(); |
|
6004 _collector->startTimer(); |
|
6005 } |
|
6006 |
|
6007 /////////////////////////////////////////////////////////// |
|
6008 // ParMarkRefsIntoAndScanClosure: a parallel version of |
|
6009 // MarkRefsIntoAndScanClosure |
|
6010 /////////////////////////////////////////////////////////// |
|
6011 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure( |
|
6012 CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd, |
|
6013 CMSBitMap* bit_map, OopTaskQueue* work_queue): |
|
6014 _span(span), |
|
6015 _bit_map(bit_map), |
|
6016 _work_queue(work_queue), |
|
6017 _low_water_mark(MIN2((work_queue->max_elems()/4), |
|
6018 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))), |
|
6019 _parPushAndMarkClosure(collector, span, rd, bit_map, work_queue) |
|
6020 { |
|
6021 // FIXME: Should initialize in base class constructor. |
|
6022 assert(rd != NULL, "ref_discoverer shouldn't be NULL"); |
|
6023 set_ref_discoverer_internal(rd); |
|
6024 } |
|
6025 |
|
6026 // This closure is used to mark refs into the CMS generation at the |
|
6027 // second (final) checkpoint, and to scan and transitively follow |
|
6028 // the unmarked oops. The marks are made in the marking bit map and |
|
6029 // the work_queue is used for keeping the (newly) grey objects during |
|
6030 // the scan phase whence they are also available for stealing by parallel |
|
6031 // threads. Since the marking bit map is shared, updates are |
|
6032 // synchronized (via CAS). |
|
6033 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) { |
|
6034 if (obj != NULL) { |
|
6035 // Ignore mark word because this could be an already marked oop |
|
6036 // that may be chained at the end of the overflow list. |
|
6037 assert(oopDesc::is_oop(obj, true), "expected an oop"); |
|
6038 HeapWord* addr = (HeapWord*)obj; |
|
6039 if (_span.contains(addr) && |
|
6040 !_bit_map->isMarked(addr)) { |
|
6041 // mark bit map (object will become grey): |
|
6042 // It is possible for several threads to be |
|
6043 // trying to "claim" this object concurrently; |
|
6044 // the unique thread that succeeds in marking the |
|
6045 // object first will do the subsequent push on |
|
6046 // to the work queue (or overflow list). |
|
6047 if (_bit_map->par_mark(addr)) { |
|
6048 // push on work_queue (which may not be empty), and trim the |
|
6049 // queue to an appropriate length by applying this closure to |
|
6050 // the oops in the oops popped from the stack (i.e. blacken the |
|
6051 // grey objects) |
|
6052 bool res = _work_queue->push(obj); |
|
6053 assert(res, "Low water mark should be less than capacity?"); |
|
6054 trim_queue(_low_water_mark); |
|
6055 } // Else, another thread claimed the object |
|
6056 } |
|
6057 } |
|
6058 } |
|
6059 |
|
6060 // This closure is used to rescan the marked objects on the dirty cards |
|
6061 // in the mod union table and the card table proper. |
|
6062 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( |
|
6063 oop p, MemRegion mr) { |
|
6064 |
|
6065 size_t size = 0; |
|
6066 HeapWord* addr = (HeapWord*)p; |
|
6067 DEBUG_ONLY(_collector->verify_work_stacks_empty();) |
|
6068 assert(_span.contains(addr), "we are scanning the CMS generation"); |
|
6069 // check if it's time to yield |
|
6070 if (do_yield_check()) { |
|
6071 // We yielded for some foreground stop-world work, |
|
6072 // and we have been asked to abort this ongoing preclean cycle. |
|
6073 return 0; |
|
6074 } |
|
6075 if (_bitMap->isMarked(addr)) { |
|
6076 // it's marked; is it potentially uninitialized? |
|
6077 if (p->klass_or_null_acquire() != NULL) { |
|
6078 // an initialized object; ignore mark word in verification below |
|
6079 // since we are running concurrent with mutators |
|
6080 assert(oopDesc::is_oop(p, true), "should be an oop"); |
|
6081 if (p->is_objArray()) { |
|
6082 // objArrays are precisely marked; restrict scanning |
|
6083 // to dirty cards only. |
|
6084 size = CompactibleFreeListSpace::adjustObjectSize( |
|
6085 p->oop_iterate_size(_scanningClosure, mr)); |
|
6086 } else { |
|
6087 // A non-array may have been imprecisely marked; we need |
|
6088 // to scan object in its entirety. |
|
6089 size = CompactibleFreeListSpace::adjustObjectSize( |
|
6090 p->oop_iterate_size(_scanningClosure)); |
|
6091 } |
|
6092 #ifdef ASSERT |
|
6093 size_t direct_size = |
|
6094 CompactibleFreeListSpace::adjustObjectSize(p->size()); |
|
6095 assert(size == direct_size, "Inconsistency in size"); |
|
6096 assert(size >= 3, "Necessary for Printezis marks to work"); |
|
6097 HeapWord* start_pbit = addr + 1; |
|
6098 HeapWord* end_pbit = addr + size - 1; |
|
6099 assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit), |
|
6100 "inconsistent Printezis mark"); |
|
6101 // Verify inner mark bits (between Printezis bits) are clear, |
|
6102 // but don't repeat if there are multiple dirty regions for |
|
6103 // the same object, to avoid potential O(N^2) performance. |
|
6104 if (addr != _last_scanned_object) { |
|
6105 _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit); |
|
6106 _last_scanned_object = addr; |
|
6107 } |
|
6108 #endif // ASSERT |
|
6109 } else { |
|
6110 // An uninitialized object. |
|
6111 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); |
|
6112 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); |
|
6113 size = pointer_delta(nextOneAddr + 1, addr); |
|
6114 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), |
|
6115 "alignment problem"); |
|
6116 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() |
|
6117 // will dirty the card when the klass pointer is installed in the |
|
6118 // object (signaling the completion of initialization). |
|
6119 } |
|
6120 } else { |
|
6121 // Either a not yet marked object or an uninitialized object |
|
6122 if (p->klass_or_null_acquire() == NULL) { |
|
6123 // An uninitialized object, skip to the next card, since |
|
6124 // we may not be able to read its P-bits yet. |
|
6125 assert(size == 0, "Initial value"); |
|
6126 } else { |
|
6127 // An object not (yet) reached by marking: we merely need to |
|
6128 // compute its size so as to go look at the next block. |
|
6129 assert(oopDesc::is_oop(p, true), "should be an oop"); |
|
6130 size = CompactibleFreeListSpace::adjustObjectSize(p->size()); |
|
6131 } |
|
6132 } |
|
6133 DEBUG_ONLY(_collector->verify_work_stacks_empty();) |
|
6134 return size; |
|
6135 } |
|
6136 |
|
6137 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { |
|
6138 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
6139 "CMS thread should hold CMS token"); |
|
6140 assert_lock_strong(_freelistLock); |
|
6141 assert_lock_strong(_bitMap->lock()); |
|
6142 // relinquish the free_list_lock and bitMaplock() |
|
6143 _bitMap->lock()->unlock(); |
|
6144 _freelistLock->unlock(); |
|
6145 ConcurrentMarkSweepThread::desynchronize(true); |
|
6146 _collector->stopTimer(); |
|
6147 _collector->incrementYields(); |
|
6148 |
|
6149 // See the comment in coordinator_yield() |
|
6150 for (unsigned i = 0; i < CMSYieldSleepCount && |
|
6151 ConcurrentMarkSweepThread::should_yield() && |
|
6152 !CMSCollector::foregroundGCIsActive(); ++i) { |
|
6153 os::naked_short_sleep(1); |
|
6154 } |
|
6155 |
|
6156 ConcurrentMarkSweepThread::synchronize(true); |
|
6157 _freelistLock->lock_without_safepoint_check(); |
|
6158 _bitMap->lock()->lock_without_safepoint_check(); |
|
6159 _collector->startTimer(); |
|
6160 } |
|
6161 |
|
6162 |
|
6163 ////////////////////////////////////////////////////////////////// |
|
6164 // SurvivorSpacePrecleanClosure |
|
6165 ////////////////////////////////////////////////////////////////// |
|
6166 // This (single-threaded) closure is used to preclean the oops in |
|
6167 // the survivor spaces. |
|
6168 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { |
|
6169 |
|
6170 HeapWord* addr = (HeapWord*)p; |
|
6171 DEBUG_ONLY(_collector->verify_work_stacks_empty();) |
|
6172 assert(!_span.contains(addr), "we are scanning the survivor spaces"); |
|
6173 assert(p->klass_or_null() != NULL, "object should be initialized"); |
|
6174 // an initialized object; ignore mark word in verification below |
|
6175 // since we are running concurrent with mutators |
|
6176 assert(oopDesc::is_oop(p, true), "should be an oop"); |
|
6177 // Note that we do not yield while we iterate over |
|
6178 // the interior oops of p, pushing the relevant ones |
|
6179 // on our marking stack. |
|
6180 size_t size = p->oop_iterate_size(_scanning_closure); |
|
6181 do_yield_check(); |
|
6182 // Observe that below, we do not abandon the preclean |
|
6183 // phase as soon as we should; rather we empty the |
|
6184 // marking stack before returning. This is to satisfy |
|
6185 // some existing assertions. In general, it may be a |
|
6186 // good idea to abort immediately and complete the marking |
|
6187 // from the grey objects at a later time. |
|
6188 while (!_mark_stack->isEmpty()) { |
|
6189 oop new_oop = _mark_stack->pop(); |
|
6190 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop"); |
|
6191 assert(_bit_map->isMarked((HeapWord*)new_oop), |
|
6192 "only grey objects on this stack"); |
|
6193 // iterate over the oops in this oop, marking and pushing |
|
6194 // the ones in CMS heap (i.e. in _span). |
|
6195 new_oop->oop_iterate(_scanning_closure); |
|
6196 // check if it's time to yield |
|
6197 do_yield_check(); |
|
6198 } |
|
6199 unsigned int after_count = |
|
6200 CMSHeap::heap()->total_collections(); |
|
6201 bool abort = (_before_count != after_count) || |
|
6202 _collector->should_abort_preclean(); |
|
6203 return abort ? 0 : size; |
|
6204 } |
|
6205 |
|
6206 void SurvivorSpacePrecleanClosure::do_yield_work() { |
|
6207 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
6208 "CMS thread should hold CMS token"); |
|
6209 assert_lock_strong(_bit_map->lock()); |
|
6210 // Relinquish the bit map lock |
|
6211 _bit_map->lock()->unlock(); |
|
6212 ConcurrentMarkSweepThread::desynchronize(true); |
|
6213 _collector->stopTimer(); |
|
6214 _collector->incrementYields(); |
|
6215 |
|
6216 // See the comment in coordinator_yield() |
|
6217 for (unsigned i = 0; i < CMSYieldSleepCount && |
|
6218 ConcurrentMarkSweepThread::should_yield() && |
|
6219 !CMSCollector::foregroundGCIsActive(); ++i) { |
|
6220 os::naked_short_sleep(1); |
|
6221 } |
|
6222 |
|
6223 ConcurrentMarkSweepThread::synchronize(true); |
|
6224 _bit_map->lock()->lock_without_safepoint_check(); |
|
6225 _collector->startTimer(); |
|
6226 } |
|
6227 |
|
6228 // This closure is used to rescan the marked objects on the dirty cards |
|
6229 // in the mod union table and the card table proper. In the parallel |
|
6230 // case, although the bitMap is shared, we do a single read so the |
|
6231 // isMarked() query is "safe". |
|
6232 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { |
|
6233 // Ignore mark word because we are running concurrent with mutators |
|
6234 assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p)); |
|
6235 HeapWord* addr = (HeapWord*)p; |
|
6236 assert(_span.contains(addr), "we are scanning the CMS generation"); |
|
6237 bool is_obj_array = false; |
|
6238 #ifdef ASSERT |
|
6239 if (!_parallel) { |
|
6240 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); |
|
6241 assert(_collector->overflow_list_is_empty(), |
|
6242 "overflow list should be empty"); |
|
6243 |
|
6244 } |
|
6245 #endif // ASSERT |
|
6246 if (_bit_map->isMarked(addr)) { |
|
6247 // Obj arrays are precisely marked, non-arrays are not; |
|
6248 // so we scan objArrays precisely and non-arrays in their |
|
6249 // entirety. |
|
6250 if (p->is_objArray()) { |
|
6251 is_obj_array = true; |
|
6252 if (_parallel) { |
|
6253 p->oop_iterate(_par_scan_closure, mr); |
|
6254 } else { |
|
6255 p->oop_iterate(_scan_closure, mr); |
|
6256 } |
|
6257 } else { |
|
6258 if (_parallel) { |
|
6259 p->oop_iterate(_par_scan_closure); |
|
6260 } else { |
|
6261 p->oop_iterate(_scan_closure); |
|
6262 } |
|
6263 } |
|
6264 } |
|
6265 #ifdef ASSERT |
|
6266 if (!_parallel) { |
|
6267 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); |
|
6268 assert(_collector->overflow_list_is_empty(), |
|
6269 "overflow list should be empty"); |
|
6270 |
|
6271 } |
|
6272 #endif // ASSERT |
|
6273 return is_obj_array; |
|
6274 } |
|
6275 |
|
6276 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector, |
|
6277 MemRegion span, |
|
6278 CMSBitMap* bitMap, CMSMarkStack* markStack, |
|
6279 bool should_yield, bool verifying): |
|
6280 _collector(collector), |
|
6281 _span(span), |
|
6282 _bitMap(bitMap), |
|
6283 _mut(&collector->_modUnionTable), |
|
6284 _markStack(markStack), |
|
6285 _yield(should_yield), |
|
6286 _skipBits(0) |
|
6287 { |
|
6288 assert(_markStack->isEmpty(), "stack should be empty"); |
|
6289 _finger = _bitMap->startWord(); |
|
6290 _threshold = _finger; |
|
6291 assert(_collector->_restart_addr == NULL, "Sanity check"); |
|
6292 assert(_span.contains(_finger), "Out of bounds _finger?"); |
|
6293 DEBUG_ONLY(_verifying = verifying;) |
|
6294 } |
|
6295 |
|
6296 void MarkFromRootsClosure::reset(HeapWord* addr) { |
|
6297 assert(_markStack->isEmpty(), "would cause duplicates on stack"); |
|
6298 assert(_span.contains(addr), "Out of bounds _finger?"); |
|
6299 _finger = addr; |
|
6300 _threshold = align_up(_finger, CardTable::card_size); |
|
6301 } |
|
6302 |
|
6303 // Should revisit to see if this should be restructured for |
|
6304 // greater efficiency. |
|
6305 bool MarkFromRootsClosure::do_bit(size_t offset) { |
|
6306 if (_skipBits > 0) { |
|
6307 _skipBits--; |
|
6308 return true; |
|
6309 } |
|
6310 // convert offset into a HeapWord* |
|
6311 HeapWord* addr = _bitMap->startWord() + offset; |
|
6312 assert(_bitMap->endWord() && addr < _bitMap->endWord(), |
|
6313 "address out of range"); |
|
6314 assert(_bitMap->isMarked(addr), "tautology"); |
|
6315 if (_bitMap->isMarked(addr+1)) { |
|
6316 // this is an allocated but not yet initialized object |
|
6317 assert(_skipBits == 0, "tautology"); |
|
6318 _skipBits = 2; // skip next two marked bits ("Printezis-marks") |
|
6319 oop p = oop(addr); |
|
6320 if (p->klass_or_null_acquire() == NULL) { |
|
6321 DEBUG_ONLY(if (!_verifying) {) |
|
6322 // We re-dirty the cards on which this object lies and increase |
|
6323 // the _threshold so that we'll come back to scan this object |
|
6324 // during the preclean or remark phase. (CMSCleanOnEnter) |
|
6325 if (CMSCleanOnEnter) { |
|
6326 size_t sz = _collector->block_size_using_printezis_bits(addr); |
|
6327 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); |
|
6328 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
|
6329 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
|
6330 // Bump _threshold to end_card_addr; note that |
|
6331 // _threshold cannot possibly exceed end_card_addr, anyhow. |
|
6332 // This prevents future clearing of the card as the scan proceeds |
|
6333 // to the right. |
|
6334 assert(_threshold <= end_card_addr, |
|
6335 "Because we are just scanning into this object"); |
|
6336 if (_threshold < end_card_addr) { |
|
6337 _threshold = end_card_addr; |
|
6338 } |
|
6339 if (p->klass_or_null_acquire() != NULL) { |
|
6340 // Redirty the range of cards... |
|
6341 _mut->mark_range(redirty_range); |
|
6342 } // ...else the setting of klass will dirty the card anyway. |
|
6343 } |
|
6344 DEBUG_ONLY(}) |
|
6345 return true; |
|
6346 } |
|
6347 } |
|
6348 scanOopsInOop(addr); |
|
6349 return true; |
|
6350 } |
|
6351 |
|
6352 // We take a break if we've been at this for a while, |
|
6353 // so as to avoid monopolizing the locks involved. |
|
6354 void MarkFromRootsClosure::do_yield_work() { |
|
6355 // First give up the locks, then yield, then re-lock |
|
6356 // We should probably use a constructor/destructor idiom to |
|
6357 // do this unlock/lock or modify the MutexUnlocker class to |
|
6358 // serve our purpose. XXX |
|
6359 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
6360 "CMS thread should hold CMS token"); |
|
6361 assert_lock_strong(_bitMap->lock()); |
|
6362 _bitMap->lock()->unlock(); |
|
6363 ConcurrentMarkSweepThread::desynchronize(true); |
|
6364 _collector->stopTimer(); |
|
6365 _collector->incrementYields(); |
|
6366 |
|
6367 // See the comment in coordinator_yield() |
|
6368 for (unsigned i = 0; i < CMSYieldSleepCount && |
|
6369 ConcurrentMarkSweepThread::should_yield() && |
|
6370 !CMSCollector::foregroundGCIsActive(); ++i) { |
|
6371 os::naked_short_sleep(1); |
|
6372 } |
|
6373 |
|
6374 ConcurrentMarkSweepThread::synchronize(true); |
|
6375 _bitMap->lock()->lock_without_safepoint_check(); |
|
6376 _collector->startTimer(); |
|
6377 } |
|
6378 |
|
6379 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { |
|
6380 assert(_bitMap->isMarked(ptr), "expected bit to be set"); |
|
6381 assert(_markStack->isEmpty(), |
|
6382 "should drain stack to limit stack usage"); |
|
6383 // convert ptr to an oop preparatory to scanning |
|
6384 oop obj = oop(ptr); |
|
6385 // Ignore mark word in verification below, since we |
|
6386 // may be running concurrent with mutators. |
|
6387 assert(oopDesc::is_oop(obj, true), "should be an oop"); |
|
6388 assert(_finger <= ptr, "_finger runneth ahead"); |
|
6389 // advance the finger to right end of this object |
|
6390 _finger = ptr + obj->size(); |
|
6391 assert(_finger > ptr, "we just incremented it above"); |
|
6392 // On large heaps, it may take us some time to get through |
|
6393 // the marking phase. During |
|
6394 // this time it's possible that a lot of mutations have |
|
6395 // accumulated in the card table and the mod union table -- |
|
6396 // these mutation records are redundant until we have |
|
6397 // actually traced into the corresponding card. |
|
6398 // Here, we check whether advancing the finger would make |
|
6399 // us cross into a new card, and if so clear corresponding |
|
6400 // cards in the MUT (preclean them in the card-table in the |
|
6401 // future). |
|
6402 |
|
6403 DEBUG_ONLY(if (!_verifying) {) |
|
6404 // The clean-on-enter optimization is disabled by default, |
|
6405 // until we fix 6178663. |
|
6406 if (CMSCleanOnEnter && (_finger > _threshold)) { |
|
6407 // [_threshold, _finger) represents the interval |
|
6408 // of cards to be cleared in MUT (or precleaned in card table). |
|
6409 // The set of cards to be cleared is all those that overlap |
|
6410 // with the interval [_threshold, _finger); note that |
|
6411 // _threshold is always kept card-aligned but _finger isn't |
|
6412 // always card-aligned. |
|
6413 HeapWord* old_threshold = _threshold; |
|
6414 assert(is_aligned(old_threshold, CardTable::card_size), |
|
6415 "_threshold should always be card-aligned"); |
|
6416 _threshold = align_up(_finger, CardTable::card_size); |
|
6417 MemRegion mr(old_threshold, _threshold); |
|
6418 assert(!mr.is_empty(), "Control point invariant"); |
|
6419 assert(_span.contains(mr), "Should clear within span"); |
|
6420 _mut->clear_range(mr); |
|
6421 } |
|
6422 DEBUG_ONLY(}) |
|
6423 // Note: the finger doesn't advance while we drain |
|
6424 // the stack below. |
|
6425 PushOrMarkClosure pushOrMarkClosure(_collector, |
|
6426 _span, _bitMap, _markStack, |
|
6427 _finger, this); |
|
6428 bool res = _markStack->push(obj); |
|
6429 assert(res, "Empty non-zero size stack should have space for single push"); |
|
6430 while (!_markStack->isEmpty()) { |
|
6431 oop new_oop = _markStack->pop(); |
|
6432 // Skip verifying header mark word below because we are |
|
6433 // running concurrent with mutators. |
|
6434 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop"); |
|
6435 // now scan this oop's oops |
|
6436 new_oop->oop_iterate(&pushOrMarkClosure); |
|
6437 do_yield_check(); |
|
6438 } |
|
6439 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); |
|
6440 } |
|
6441 |
|
6442 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task, |
|
6443 CMSCollector* collector, MemRegion span, |
|
6444 CMSBitMap* bit_map, |
|
6445 OopTaskQueue* work_queue, |
|
6446 CMSMarkStack* overflow_stack): |
|
6447 _collector(collector), |
|
6448 _whole_span(collector->_span), |
|
6449 _span(span), |
|
6450 _bit_map(bit_map), |
|
6451 _mut(&collector->_modUnionTable), |
|
6452 _work_queue(work_queue), |
|
6453 _overflow_stack(overflow_stack), |
|
6454 _skip_bits(0), |
|
6455 _task(task) |
|
6456 { |
|
6457 assert(_work_queue->size() == 0, "work_queue should be empty"); |
|
6458 _finger = span.start(); |
|
6459 _threshold = _finger; // XXX Defer clear-on-enter optimization for now |
|
6460 assert(_span.contains(_finger), "Out of bounds _finger?"); |
|
6461 } |
|
6462 |
|
6463 // Should revisit to see if this should be restructured for |
|
6464 // greater efficiency. |
|
6465 bool ParMarkFromRootsClosure::do_bit(size_t offset) { |
|
6466 if (_skip_bits > 0) { |
|
6467 _skip_bits--; |
|
6468 return true; |
|
6469 } |
|
6470 // convert offset into a HeapWord* |
|
6471 HeapWord* addr = _bit_map->startWord() + offset; |
|
6472 assert(_bit_map->endWord() && addr < _bit_map->endWord(), |
|
6473 "address out of range"); |
|
6474 assert(_bit_map->isMarked(addr), "tautology"); |
|
6475 if (_bit_map->isMarked(addr+1)) { |
|
6476 // this is an allocated object that might not yet be initialized |
|
6477 assert(_skip_bits == 0, "tautology"); |
|
6478 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") |
|
6479 oop p = oop(addr); |
|
6480 if (p->klass_or_null_acquire() == NULL) { |
|
6481 // in the case of Clean-on-Enter optimization, redirty card |
|
6482 // and avoid clearing card by increasing the threshold. |
|
6483 return true; |
|
6484 } |
|
6485 } |
|
6486 scan_oops_in_oop(addr); |
|
6487 return true; |
|
6488 } |
|
6489 |
|
6490 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { |
|
6491 assert(_bit_map->isMarked(ptr), "expected bit to be set"); |
|
6492 // Should we assert that our work queue is empty or |
|
6493 // below some drain limit? |
|
6494 assert(_work_queue->size() == 0, |
|
6495 "should drain stack to limit stack usage"); |
|
6496 // convert ptr to an oop preparatory to scanning |
|
6497 oop obj = oop(ptr); |
|
6498 // Ignore mark word in verification below, since we |
|
6499 // may be running concurrent with mutators. |
|
6500 assert(oopDesc::is_oop(obj, true), "should be an oop"); |
|
6501 assert(_finger <= ptr, "_finger runneth ahead"); |
|
6502 // advance the finger to right end of this object |
|
6503 _finger = ptr + obj->size(); |
|
6504 assert(_finger > ptr, "we just incremented it above"); |
|
6505 // On large heaps, it may take us some time to get through |
|
6506 // the marking phase. During |
|
6507 // this time it's possible that a lot of mutations have |
|
6508 // accumulated in the card table and the mod union table -- |
|
6509 // these mutation records are redundant until we have |
|
6510 // actually traced into the corresponding card. |
|
6511 // Here, we check whether advancing the finger would make |
|
6512 // us cross into a new card, and if so clear corresponding |
|
6513 // cards in the MUT (preclean them in the card-table in the |
|
6514 // future). |
|
6515 |
|
6516 // The clean-on-enter optimization is disabled by default, |
|
6517 // until we fix 6178663. |
|
6518 if (CMSCleanOnEnter && (_finger > _threshold)) { |
|
6519 // [_threshold, _finger) represents the interval |
|
6520 // of cards to be cleared in MUT (or precleaned in card table). |
|
6521 // The set of cards to be cleared is all those that overlap |
|
6522 // with the interval [_threshold, _finger); note that |
|
6523 // _threshold is always kept card-aligned but _finger isn't |
|
6524 // always card-aligned. |
|
6525 HeapWord* old_threshold = _threshold; |
|
6526 assert(is_aligned(old_threshold, CardTable::card_size), |
|
6527 "_threshold should always be card-aligned"); |
|
6528 _threshold = align_up(_finger, CardTable::card_size); |
|
6529 MemRegion mr(old_threshold, _threshold); |
|
6530 assert(!mr.is_empty(), "Control point invariant"); |
|
6531 assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? |
|
6532 _mut->clear_range(mr); |
|
6533 } |
|
6534 |
|
6535 // Note: the local finger doesn't advance while we drain |
|
6536 // the stack below, but the global finger sure can and will. |
|
6537 HeapWord* volatile* gfa = _task->global_finger_addr(); |
|
6538 ParPushOrMarkClosure pushOrMarkClosure(_collector, |
|
6539 _span, _bit_map, |
|
6540 _work_queue, |
|
6541 _overflow_stack, |
|
6542 _finger, |
|
6543 gfa, this); |
|
6544 bool res = _work_queue->push(obj); // overflow could occur here |
|
6545 assert(res, "Will hold once we use workqueues"); |
|
6546 while (true) { |
|
6547 oop new_oop; |
|
6548 if (!_work_queue->pop_local(new_oop)) { |
|
6549 // We emptied our work_queue; check if there's stuff that can |
|
6550 // be gotten from the overflow stack. |
|
6551 if (CMSConcMarkingTask::get_work_from_overflow_stack( |
|
6552 _overflow_stack, _work_queue)) { |
|
6553 do_yield_check(); |
|
6554 continue; |
|
6555 } else { // done |
|
6556 break; |
|
6557 } |
|
6558 } |
|
6559 // Skip verifying header mark word below because we are |
|
6560 // running concurrent with mutators. |
|
6561 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop"); |
|
6562 // now scan this oop's oops |
|
6563 new_oop->oop_iterate(&pushOrMarkClosure); |
|
6564 do_yield_check(); |
|
6565 } |
|
6566 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); |
|
6567 } |
|
6568 |
|
6569 // Yield in response to a request from VM Thread or |
|
6570 // from mutators. |
|
6571 void ParMarkFromRootsClosure::do_yield_work() { |
|
6572 assert(_task != NULL, "sanity"); |
|
6573 _task->yield(); |
|
6574 } |
|
6575 |
|
6576 // A variant of the above used for verifying CMS marking work. |
|
6577 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector, |
|
6578 MemRegion span, |
|
6579 CMSBitMap* verification_bm, CMSBitMap* cms_bm, |
|
6580 CMSMarkStack* mark_stack): |
|
6581 _collector(collector), |
|
6582 _span(span), |
|
6583 _verification_bm(verification_bm), |
|
6584 _cms_bm(cms_bm), |
|
6585 _mark_stack(mark_stack), |
|
6586 _pam_verify_closure(collector, span, verification_bm, cms_bm, |
|
6587 mark_stack) |
|
6588 { |
|
6589 assert(_mark_stack->isEmpty(), "stack should be empty"); |
|
6590 _finger = _verification_bm->startWord(); |
|
6591 assert(_collector->_restart_addr == NULL, "Sanity check"); |
|
6592 assert(_span.contains(_finger), "Out of bounds _finger?"); |
|
6593 } |
|
6594 |
|
6595 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) { |
|
6596 assert(_mark_stack->isEmpty(), "would cause duplicates on stack"); |
|
6597 assert(_span.contains(addr), "Out of bounds _finger?"); |
|
6598 _finger = addr; |
|
6599 } |
|
6600 |
|
6601 // Should revisit to see if this should be restructured for |
|
6602 // greater efficiency. |
|
6603 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) { |
|
6604 // convert offset into a HeapWord* |
|
6605 HeapWord* addr = _verification_bm->startWord() + offset; |
|
6606 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), |
|
6607 "address out of range"); |
|
6608 assert(_verification_bm->isMarked(addr), "tautology"); |
|
6609 assert(_cms_bm->isMarked(addr), "tautology"); |
|
6610 |
|
6611 assert(_mark_stack->isEmpty(), |
|
6612 "should drain stack to limit stack usage"); |
|
6613 // convert addr to an oop preparatory to scanning |
|
6614 oop obj = oop(addr); |
|
6615 assert(oopDesc::is_oop(obj), "should be an oop"); |
|
6616 assert(_finger <= addr, "_finger runneth ahead"); |
|
6617 // advance the finger to right end of this object |
|
6618 _finger = addr + obj->size(); |
|
6619 assert(_finger > addr, "we just incremented it above"); |
|
6620 // Note: the finger doesn't advance while we drain |
|
6621 // the stack below. |
|
6622 bool res = _mark_stack->push(obj); |
|
6623 assert(res, "Empty non-zero size stack should have space for single push"); |
|
6624 while (!_mark_stack->isEmpty()) { |
|
6625 oop new_oop = _mark_stack->pop(); |
|
6626 assert(oopDesc::is_oop(new_oop), "Oops! expected to pop an oop"); |
|
6627 // now scan this oop's oops |
|
6628 new_oop->oop_iterate(&_pam_verify_closure); |
|
6629 } |
|
6630 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); |
|
6631 return true; |
|
6632 } |
|
6633 |
|
6634 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( |
|
6635 CMSCollector* collector, MemRegion span, |
|
6636 CMSBitMap* verification_bm, CMSBitMap* cms_bm, |
|
6637 CMSMarkStack* mark_stack): |
|
6638 MetadataVisitingOopIterateClosure(collector->ref_processor()), |
|
6639 _collector(collector), |
|
6640 _span(span), |
|
6641 _verification_bm(verification_bm), |
|
6642 _cms_bm(cms_bm), |
|
6643 _mark_stack(mark_stack) |
|
6644 { } |
|
6645 |
|
6646 template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) { |
|
6647 oop obj = RawAccess<>::oop_load(p); |
|
6648 do_oop(obj); |
|
6649 } |
|
6650 |
|
6651 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
|
6652 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
|
6653 |
|
6654 // Upon stack overflow, we discard (part of) the stack, |
|
6655 // remembering the least address amongst those discarded |
|
6656 // in CMSCollector's _restart_address. |
|
6657 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) { |
|
6658 // Remember the least grey address discarded |
|
6659 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost); |
|
6660 _collector->lower_restart_addr(ra); |
|
6661 _mark_stack->reset(); // discard stack contents |
|
6662 _mark_stack->expand(); // expand the stack if possible |
|
6663 } |
|
6664 |
|
6665 void PushAndMarkVerifyClosure::do_oop(oop obj) { |
|
6666 assert(oopDesc::is_oop_or_null(obj), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); |
|
6667 HeapWord* addr = (HeapWord*)obj; |
|
6668 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { |
|
6669 // Oop lies in _span and isn't yet grey or black |
|
6670 _verification_bm->mark(addr); // now grey |
|
6671 if (!_cms_bm->isMarked(addr)) { |
|
6672 Log(gc, verify) log; |
|
6673 ResourceMark rm; |
|
6674 LogStream ls(log.error()); |
|
6675 oop(addr)->print_on(&ls); |
|
6676 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); |
|
6677 fatal("... aborting"); |
|
6678 } |
|
6679 |
|
6680 if (!_mark_stack->push(obj)) { // stack overflow |
|
6681 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity()); |
|
6682 assert(_mark_stack->isFull(), "Else push should have succeeded"); |
|
6683 handle_stack_overflow(addr); |
|
6684 } |
|
6685 // anything including and to the right of _finger |
|
6686 // will be scanned as we iterate over the remainder of the |
|
6687 // bit map |
|
6688 } |
|
6689 } |
|
6690 |
|
6691 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, |
|
6692 MemRegion span, |
|
6693 CMSBitMap* bitMap, CMSMarkStack* markStack, |
|
6694 HeapWord* finger, MarkFromRootsClosure* parent) : |
|
6695 MetadataVisitingOopIterateClosure(collector->ref_processor()), |
|
6696 _collector(collector), |
|
6697 _span(span), |
|
6698 _bitMap(bitMap), |
|
6699 _markStack(markStack), |
|
6700 _finger(finger), |
|
6701 _parent(parent) |
|
6702 { } |
|
6703 |
|
6704 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector, |
|
6705 MemRegion span, |
|
6706 CMSBitMap* bit_map, |
|
6707 OopTaskQueue* work_queue, |
|
6708 CMSMarkStack* overflow_stack, |
|
6709 HeapWord* finger, |
|
6710 HeapWord* volatile* global_finger_addr, |
|
6711 ParMarkFromRootsClosure* parent) : |
|
6712 MetadataVisitingOopIterateClosure(collector->ref_processor()), |
|
6713 _collector(collector), |
|
6714 _whole_span(collector->_span), |
|
6715 _span(span), |
|
6716 _bit_map(bit_map), |
|
6717 _work_queue(work_queue), |
|
6718 _overflow_stack(overflow_stack), |
|
6719 _finger(finger), |
|
6720 _global_finger_addr(global_finger_addr), |
|
6721 _parent(parent) |
|
6722 { } |
|
6723 |
|
6724 // Assumes thread-safe access by callers, who are |
|
6725 // responsible for mutual exclusion. |
|
6726 void CMSCollector::lower_restart_addr(HeapWord* low) { |
|
6727 assert(_span.contains(low), "Out of bounds addr"); |
|
6728 if (_restart_addr == NULL) { |
|
6729 _restart_addr = low; |
|
6730 } else { |
|
6731 _restart_addr = MIN2(_restart_addr, low); |
|
6732 } |
|
6733 } |
|
6734 |
|
6735 // Upon stack overflow, we discard (part of) the stack, |
|
6736 // remembering the least address amongst those discarded |
|
6737 // in CMSCollector's _restart_address. |
|
6738 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { |
|
6739 // Remember the least grey address discarded |
|
6740 HeapWord* ra = (HeapWord*)_markStack->least_value(lost); |
|
6741 _collector->lower_restart_addr(ra); |
|
6742 _markStack->reset(); // discard stack contents |
|
6743 _markStack->expand(); // expand the stack if possible |
|
6744 } |
|
6745 |
|
6746 // Upon stack overflow, we discard (part of) the stack, |
|
6747 // remembering the least address amongst those discarded |
|
6748 // in CMSCollector's _restart_address. |
|
6749 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { |
|
6750 // We need to do this under a mutex to prevent other |
|
6751 // workers from interfering with the work done below. |
|
6752 MutexLocker ml(_overflow_stack->par_lock(), |
|
6753 Mutex::_no_safepoint_check_flag); |
|
6754 // Remember the least grey address discarded |
|
6755 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); |
|
6756 _collector->lower_restart_addr(ra); |
|
6757 _overflow_stack->reset(); // discard stack contents |
|
6758 _overflow_stack->expand(); // expand the stack if possible |
|
6759 } |
|
6760 |
|
6761 void PushOrMarkClosure::do_oop(oop obj) { |
|
6762 // Ignore mark word because we are running concurrent with mutators. |
|
6763 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); |
|
6764 HeapWord* addr = (HeapWord*)obj; |
|
6765 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { |
|
6766 // Oop lies in _span and isn't yet grey or black |
|
6767 _bitMap->mark(addr); // now grey |
|
6768 if (addr < _finger) { |
|
6769 // the bit map iteration has already either passed, or |
|
6770 // sampled, this bit in the bit map; we'll need to |
|
6771 // use the marking stack to scan this oop's oops. |
|
6772 bool simulate_overflow = false; |
|
6773 NOT_PRODUCT( |
|
6774 if (CMSMarkStackOverflowALot && |
|
6775 _collector->simulate_overflow()) { |
|
6776 // simulate a stack overflow |
|
6777 simulate_overflow = true; |
|
6778 } |
|
6779 ) |
|
6780 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow |
|
6781 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity()); |
|
6782 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); |
|
6783 handle_stack_overflow(addr); |
|
6784 } |
|
6785 } |
|
6786 // anything including and to the right of _finger |
|
6787 // will be scanned as we iterate over the remainder of the |
|
6788 // bit map |
|
6789 do_yield_check(); |
|
6790 } |
|
6791 } |
|
6792 |
|
6793 void ParPushOrMarkClosure::do_oop(oop obj) { |
|
6794 // Ignore mark word because we are running concurrent with mutators. |
|
6795 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); |
|
6796 HeapWord* addr = (HeapWord*)obj; |
|
6797 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { |
|
6798 // Oop lies in _span and isn't yet grey or black |
|
6799 // We read the global_finger (volatile read) strictly after marking oop |
|
6800 bool res = _bit_map->par_mark(addr); // now grey |
|
6801 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr; |
|
6802 // Should we push this marked oop on our stack? |
|
6803 // -- if someone else marked it, nothing to do |
|
6804 // -- if target oop is above global finger nothing to do |
|
6805 // -- if target oop is in chunk and above local finger |
|
6806 // then nothing to do |
|
6807 // -- else push on work queue |
|
6808 if ( !res // someone else marked it, they will deal with it |
|
6809 || (addr >= *gfa) // will be scanned in a later task |
|
6810 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk |
|
6811 return; |
|
6812 } |
|
6813 // the bit map iteration has already either passed, or |
|
6814 // sampled, this bit in the bit map; we'll need to |
|
6815 // use the marking stack to scan this oop's oops. |
|
6816 bool simulate_overflow = false; |
|
6817 NOT_PRODUCT( |
|
6818 if (CMSMarkStackOverflowALot && |
|
6819 _collector->simulate_overflow()) { |
|
6820 // simulate a stack overflow |
|
6821 simulate_overflow = true; |
|
6822 } |
|
6823 ) |
|
6824 if (simulate_overflow || |
|
6825 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
|
6826 // stack overflow |
|
6827 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); |
|
6828 // We cannot assert that the overflow stack is full because |
|
6829 // it may have been emptied since. |
|
6830 assert(simulate_overflow || |
|
6831 _work_queue->size() == _work_queue->max_elems(), |
|
6832 "Else push should have succeeded"); |
|
6833 handle_stack_overflow(addr); |
|
6834 } |
|
6835 do_yield_check(); |
|
6836 } |
|
6837 } |
|
6838 |
|
6839 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, |
|
6840 MemRegion span, |
|
6841 ReferenceDiscoverer* rd, |
|
6842 CMSBitMap* bit_map, |
|
6843 CMSBitMap* mod_union_table, |
|
6844 CMSMarkStack* mark_stack, |
|
6845 bool concurrent_precleaning): |
|
6846 MetadataVisitingOopIterateClosure(rd), |
|
6847 _collector(collector), |
|
6848 _span(span), |
|
6849 _bit_map(bit_map), |
|
6850 _mod_union_table(mod_union_table), |
|
6851 _mark_stack(mark_stack), |
|
6852 _concurrent_precleaning(concurrent_precleaning) |
|
6853 { |
|
6854 assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL"); |
|
6855 } |
|
6856 |
|
6857 // Grey object rescan during pre-cleaning and second checkpoint phases -- |
|
6858 // the non-parallel version (the parallel version appears further below.) |
|
6859 void PushAndMarkClosure::do_oop(oop obj) { |
|
6860 // Ignore mark word verification. If during concurrent precleaning, |
|
6861 // the object monitor may be locked. If during the checkpoint |
|
6862 // phases, the object may already have been reached by a different |
|
6863 // path and may be at the end of the global overflow list (so |
|
6864 // the mark word may be NULL). |
|
6865 assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */), |
|
6866 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); |
|
6867 HeapWord* addr = (HeapWord*)obj; |
|
6868 // Check if oop points into the CMS generation |
|
6869 // and is not marked |
|
6870 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { |
|
6871 // a white object ... |
|
6872 _bit_map->mark(addr); // ... now grey |
|
6873 // push on the marking stack (grey set) |
|
6874 bool simulate_overflow = false; |
|
6875 NOT_PRODUCT( |
|
6876 if (CMSMarkStackOverflowALot && |
|
6877 _collector->simulate_overflow()) { |
|
6878 // simulate a stack overflow |
|
6879 simulate_overflow = true; |
|
6880 } |
|
6881 ) |
|
6882 if (simulate_overflow || !_mark_stack->push(obj)) { |
|
6883 if (_concurrent_precleaning) { |
|
6884 // During precleaning we can just dirty the appropriate card(s) |
|
6885 // in the mod union table, thus ensuring that the object remains |
|
6886 // in the grey set and continue. In the case of object arrays |
|
6887 // we need to dirty all of the cards that the object spans, |
|
6888 // since the rescan of object arrays will be limited to the |
|
6889 // dirty cards. |
|
6890 // Note that no one can be interfering with us in this action |
|
6891 // of dirtying the mod union table, so no locking or atomics |
|
6892 // are required. |
|
6893 if (obj->is_objArray()) { |
|
6894 size_t sz = obj->size(); |
|
6895 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); |
|
6896 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
|
6897 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
|
6898 _mod_union_table->mark_range(redirty_range); |
|
6899 } else { |
|
6900 _mod_union_table->mark(addr); |
|
6901 } |
|
6902 _collector->_ser_pmc_preclean_ovflw++; |
|
6903 } else { |
|
6904 // During the remark phase, we need to remember this oop |
|
6905 // in the overflow list. |
|
6906 _collector->push_on_overflow_list(obj); |
|
6907 _collector->_ser_pmc_remark_ovflw++; |
|
6908 } |
|
6909 } |
|
6910 } |
|
6911 } |
|
6912 |
|
6913 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector, |
|
6914 MemRegion span, |
|
6915 ReferenceDiscoverer* rd, |
|
6916 CMSBitMap* bit_map, |
|
6917 OopTaskQueue* work_queue): |
|
6918 MetadataVisitingOopIterateClosure(rd), |
|
6919 _collector(collector), |
|
6920 _span(span), |
|
6921 _bit_map(bit_map), |
|
6922 _work_queue(work_queue) |
|
6923 { |
|
6924 assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL"); |
|
6925 } |
|
6926 |
|
6927 // Grey object rescan during second checkpoint phase -- |
|
6928 // the parallel version. |
|
6929 void ParPushAndMarkClosure::do_oop(oop obj) { |
|
6930 // In the assert below, we ignore the mark word because |
|
6931 // this oop may point to an already visited object that is |
|
6932 // on the overflow stack (in which case the mark word has |
|
6933 // been hijacked for chaining into the overflow stack -- |
|
6934 // if this is the last object in the overflow stack then |
|
6935 // its mark word will be NULL). Because this object may |
|
6936 // have been subsequently popped off the global overflow |
|
6937 // stack, and the mark word possibly restored to the prototypical |
|
6938 // value, by the time we get to examined this failing assert in |
|
6939 // the debugger, is_oop_or_null(false) may subsequently start |
|
6940 // to hold. |
|
6941 assert(oopDesc::is_oop_or_null(obj, true), |
|
6942 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); |
|
6943 HeapWord* addr = (HeapWord*)obj; |
|
6944 // Check if oop points into the CMS generation |
|
6945 // and is not marked |
|
6946 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { |
|
6947 // a white object ... |
|
6948 // If we manage to "claim" the object, by being the |
|
6949 // first thread to mark it, then we push it on our |
|
6950 // marking stack |
|
6951 if (_bit_map->par_mark(addr)) { // ... now grey |
|
6952 // push on work queue (grey set) |
|
6953 bool simulate_overflow = false; |
|
6954 NOT_PRODUCT( |
|
6955 if (CMSMarkStackOverflowALot && |
|
6956 _collector->par_simulate_overflow()) { |
|
6957 // simulate a stack overflow |
|
6958 simulate_overflow = true; |
|
6959 } |
|
6960 ) |
|
6961 if (simulate_overflow || !_work_queue->push(obj)) { |
|
6962 _collector->par_push_on_overflow_list(obj); |
|
6963 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS |
|
6964 } |
|
6965 } // Else, some other thread got there first |
|
6966 } |
|
6967 } |
|
6968 |
|
6969 void CMSPrecleanRefsYieldClosure::do_yield_work() { |
|
6970 Mutex* bml = _collector->bitMapLock(); |
|
6971 assert_lock_strong(bml); |
|
6972 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
6973 "CMS thread should hold CMS token"); |
|
6974 |
|
6975 bml->unlock(); |
|
6976 ConcurrentMarkSweepThread::desynchronize(true); |
|
6977 |
|
6978 _collector->stopTimer(); |
|
6979 _collector->incrementYields(); |
|
6980 |
|
6981 // See the comment in coordinator_yield() |
|
6982 for (unsigned i = 0; i < CMSYieldSleepCount && |
|
6983 ConcurrentMarkSweepThread::should_yield() && |
|
6984 !CMSCollector::foregroundGCIsActive(); ++i) { |
|
6985 os::naked_short_sleep(1); |
|
6986 } |
|
6987 |
|
6988 ConcurrentMarkSweepThread::synchronize(true); |
|
6989 bml->lock_without_safepoint_check(); |
|
6990 |
|
6991 _collector->startTimer(); |
|
6992 } |
|
6993 |
|
6994 bool CMSPrecleanRefsYieldClosure::should_return() { |
|
6995 if (ConcurrentMarkSweepThread::should_yield()) { |
|
6996 do_yield_work(); |
|
6997 } |
|
6998 return _collector->foregroundGCIsActive(); |
|
6999 } |
|
7000 |
|
7001 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) { |
|
7002 assert(((size_t)mr.start())%CardTable::card_size_in_words == 0, |
|
7003 "mr should be aligned to start at a card boundary"); |
|
7004 // We'd like to assert: |
|
7005 // assert(mr.word_size()%CardTable::card_size_in_words == 0, |
|
7006 // "mr should be a range of cards"); |
|
7007 // However, that would be too strong in one case -- the last |
|
7008 // partition ends at _unallocated_block which, in general, can be |
|
7009 // an arbitrary boundary, not necessarily card aligned. |
|
7010 _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words; |
|
7011 _space->object_iterate_mem(mr, &_scan_cl); |
|
7012 } |
|
7013 |
|
7014 SweepClosure::SweepClosure(CMSCollector* collector, |
|
7015 ConcurrentMarkSweepGeneration* g, |
|
7016 CMSBitMap* bitMap, bool should_yield) : |
|
7017 _collector(collector), |
|
7018 _g(g), |
|
7019 _sp(g->cmsSpace()), |
|
7020 _limit(_sp->sweep_limit()), |
|
7021 _freelistLock(_sp->freelistLock()), |
|
7022 _bitMap(bitMap), |
|
7023 _inFreeRange(false), // No free range at beginning of sweep |
|
7024 _freeRangeInFreeLists(false), // No free range at beginning of sweep |
|
7025 _lastFreeRangeCoalesced(false), |
|
7026 _yield(should_yield), |
|
7027 _freeFinger(g->used_region().start()) |
|
7028 { |
|
7029 NOT_PRODUCT( |
|
7030 _numObjectsFreed = 0; |
|
7031 _numWordsFreed = 0; |
|
7032 _numObjectsLive = 0; |
|
7033 _numWordsLive = 0; |
|
7034 _numObjectsAlreadyFree = 0; |
|
7035 _numWordsAlreadyFree = 0; |
|
7036 _last_fc = NULL; |
|
7037 |
|
7038 _sp->initializeIndexedFreeListArrayReturnedBytes(); |
|
7039 _sp->dictionary()->initialize_dict_returned_bytes(); |
|
7040 ) |
|
7041 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), |
|
7042 "sweep _limit out of bounds"); |
|
7043 log_develop_trace(gc, sweep)("===================="); |
|
7044 log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit)); |
|
7045 } |
|
7046 |
|
7047 void SweepClosure::print_on(outputStream* st) const { |
|
7048 st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", |
|
7049 p2i(_sp->bottom()), p2i(_sp->end())); |
|
7050 st->print_cr("_limit = " PTR_FORMAT, p2i(_limit)); |
|
7051 st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger)); |
|
7052 NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));) |
|
7053 st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d", |
|
7054 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced); |
|
7055 } |
|
7056 |
|
7057 #ifndef PRODUCT |
|
7058 // Assertion checking only: no useful work in product mode -- |
|
7059 // however, if any of the flags below become product flags, |
|
7060 // you may need to review this code to see if it needs to be |
|
7061 // enabled in product mode. |
|
7062 SweepClosure::~SweepClosure() { |
|
7063 assert_lock_strong(_freelistLock); |
|
7064 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), |
|
7065 "sweep _limit out of bounds"); |
|
7066 if (inFreeRange()) { |
|
7067 Log(gc, sweep) log; |
|
7068 log.error("inFreeRange() should have been reset; dumping state of SweepClosure"); |
|
7069 ResourceMark rm; |
|
7070 LogStream ls(log.error()); |
|
7071 print_on(&ls); |
|
7072 ShouldNotReachHere(); |
|
7073 } |
|
7074 |
|
7075 if (log_is_enabled(Debug, gc, sweep)) { |
|
7076 log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", |
|
7077 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); |
|
7078 log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", |
|
7079 _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); |
|
7080 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord); |
|
7081 log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes); |
|
7082 } |
|
7083 |
|
7084 if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) { |
|
7085 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); |
|
7086 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes(); |
|
7087 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes; |
|
7088 log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes", |
|
7089 returned_bytes, indexListReturnedBytes, dict_returned_bytes); |
|
7090 } |
|
7091 log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit)); |
|
7092 log_develop_trace(gc, sweep)("================"); |
|
7093 } |
|
7094 #endif // PRODUCT |
|
7095 |
|
7096 void SweepClosure::initialize_free_range(HeapWord* freeFinger, |
|
7097 bool freeRangeInFreeLists) { |
|
7098 log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)", |
|
7099 p2i(freeFinger), freeRangeInFreeLists); |
|
7100 assert(!inFreeRange(), "Trampling existing free range"); |
|
7101 set_inFreeRange(true); |
|
7102 set_lastFreeRangeCoalesced(false); |
|
7103 |
|
7104 set_freeFinger(freeFinger); |
|
7105 set_freeRangeInFreeLists(freeRangeInFreeLists); |
|
7106 if (CMSTestInFreeList) { |
|
7107 if (freeRangeInFreeLists) { |
|
7108 FreeChunk* fc = (FreeChunk*) freeFinger; |
|
7109 assert(fc->is_free(), "A chunk on the free list should be free."); |
|
7110 assert(fc->size() > 0, "Free range should have a size"); |
|
7111 assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists"); |
|
7112 } |
|
7113 } |
|
7114 } |
|
7115 |
|
7116 // Note that the sweeper runs concurrently with mutators. Thus, |
|
7117 // it is possible for direct allocation in this generation to happen |
|
7118 // in the middle of the sweep. Note that the sweeper also coalesces |
|
7119 // contiguous free blocks. Thus, unless the sweeper and the allocator |
|
7120 // synchronize appropriately freshly allocated blocks may get swept up. |
|
7121 // This is accomplished by the sweeper locking the free lists while |
|
7122 // it is sweeping. Thus blocks that are determined to be free are |
|
7123 // indeed free. There is however one additional complication: |
|
7124 // blocks that have been allocated since the final checkpoint and |
|
7125 // mark, will not have been marked and so would be treated as |
|
7126 // unreachable and swept up. To prevent this, the allocator marks |
|
7127 // the bit map when allocating during the sweep phase. This leads, |
|
7128 // however, to a further complication -- objects may have been allocated |
|
7129 // but not yet initialized -- in the sense that the header isn't yet |
|
7130 // installed. The sweeper can not then determine the size of the block |
|
7131 // in order to skip over it. To deal with this case, we use a technique |
|
7132 // (due to Printezis) to encode such uninitialized block sizes in the |
|
7133 // bit map. Since the bit map uses a bit per every HeapWord, but the |
|
7134 // CMS generation has a minimum object size of 3 HeapWords, it follows |
|
7135 // that "normal marks" won't be adjacent in the bit map (there will |
|
7136 // always be at least two 0 bits between successive 1 bits). We make use |
|
7137 // of these "unused" bits to represent uninitialized blocks -- the bit |
|
7138 // corresponding to the start of the uninitialized object and the next |
|
7139 // bit are both set. Finally, a 1 bit marks the end of the object that |
|
7140 // started with the two consecutive 1 bits to indicate its potentially |
|
7141 // uninitialized state. |
|
7142 |
|
7143 size_t SweepClosure::do_blk_careful(HeapWord* addr) { |
|
7144 FreeChunk* fc = (FreeChunk*)addr; |
|
7145 size_t res; |
|
7146 |
|
7147 // Check if we are done sweeping. Below we check "addr >= _limit" rather |
|
7148 // than "addr == _limit" because although _limit was a block boundary when |
|
7149 // we started the sweep, it may no longer be one because heap expansion |
|
7150 // may have caused us to coalesce the block ending at the address _limit |
|
7151 // with a newly expanded chunk (this happens when _limit was set to the |
|
7152 // previous _end of the space), so we may have stepped past _limit: |
|
7153 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740. |
|
7154 if (addr >= _limit) { // we have swept up to or past the limit: finish up |
|
7155 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), |
|
7156 "sweep _limit out of bounds"); |
|
7157 assert(addr < _sp->end(), "addr out of bounds"); |
|
7158 // Flush any free range we might be holding as a single |
|
7159 // coalesced chunk to the appropriate free list. |
|
7160 if (inFreeRange()) { |
|
7161 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit, |
|
7162 "freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger())); |
|
7163 flush_cur_free_chunk(freeFinger(), |
|
7164 pointer_delta(addr, freeFinger())); |
|
7165 log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]", |
|
7166 p2i(freeFinger()), pointer_delta(addr, freeFinger()), |
|
7167 lastFreeRangeCoalesced() ? 1 : 0); |
|
7168 } |
|
7169 |
|
7170 // help the iterator loop finish |
|
7171 return pointer_delta(_sp->end(), addr); |
|
7172 } |
|
7173 |
|
7174 assert(addr < _limit, "sweep invariant"); |
|
7175 // check if we should yield |
|
7176 do_yield_check(addr); |
|
7177 if (fc->is_free()) { |
|
7178 // Chunk that is already free |
|
7179 res = fc->size(); |
|
7180 do_already_free_chunk(fc); |
|
7181 debug_only(_sp->verifyFreeLists()); |
|
7182 // If we flush the chunk at hand in lookahead_and_flush() |
|
7183 // and it's coalesced with a preceding chunk, then the |
|
7184 // process of "mangling" the payload of the coalesced block |
|
7185 // will cause erasure of the size information from the |
|
7186 // (erstwhile) header of all the coalesced blocks but the |
|
7187 // first, so the first disjunct in the assert will not hold |
|
7188 // in that specific case (in which case the second disjunct |
|
7189 // will hold). |
|
7190 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit, |
|
7191 "Otherwise the size info doesn't change at this step"); |
|
7192 NOT_PRODUCT( |
|
7193 _numObjectsAlreadyFree++; |
|
7194 _numWordsAlreadyFree += res; |
|
7195 ) |
|
7196 NOT_PRODUCT(_last_fc = fc;) |
|
7197 } else if (!_bitMap->isMarked(addr)) { |
|
7198 // Chunk is fresh garbage |
|
7199 res = do_garbage_chunk(fc); |
|
7200 debug_only(_sp->verifyFreeLists()); |
|
7201 NOT_PRODUCT( |
|
7202 _numObjectsFreed++; |
|
7203 _numWordsFreed += res; |
|
7204 ) |
|
7205 } else { |
|
7206 // Chunk that is alive. |
|
7207 res = do_live_chunk(fc); |
|
7208 debug_only(_sp->verifyFreeLists()); |
|
7209 NOT_PRODUCT( |
|
7210 _numObjectsLive++; |
|
7211 _numWordsLive += res; |
|
7212 ) |
|
7213 } |
|
7214 return res; |
|
7215 } |
|
7216 |
|
7217 // For the smart allocation, record following |
|
7218 // split deaths - a free chunk is removed from its free list because |
|
7219 // it is being split into two or more chunks. |
|
7220 // split birth - a free chunk is being added to its free list because |
|
7221 // a larger free chunk has been split and resulted in this free chunk. |
|
7222 // coal death - a free chunk is being removed from its free list because |
|
7223 // it is being coalesced into a large free chunk. |
|
7224 // coal birth - a free chunk is being added to its free list because |
|
7225 // it was created when two or more free chunks where coalesced into |
|
7226 // this free chunk. |
|
7227 // |
|
7228 // These statistics are used to determine the desired number of free |
|
7229 // chunks of a given size. The desired number is chosen to be relative |
|
7230 // to the end of a CMS sweep. The desired number at the end of a sweep |
|
7231 // is the |
|
7232 // count-at-end-of-previous-sweep (an amount that was enough) |
|
7233 // - count-at-beginning-of-current-sweep (the excess) |
|
7234 // + split-births (gains in this size during interval) |
|
7235 // - split-deaths (demands on this size during interval) |
|
7236 // where the interval is from the end of one sweep to the end of the |
|
7237 // next. |
|
7238 // |
|
7239 // When sweeping the sweeper maintains an accumulated chunk which is |
|
7240 // the chunk that is made up of chunks that have been coalesced. That |
|
7241 // will be termed the left-hand chunk. A new chunk of garbage that |
|
7242 // is being considered for coalescing will be referred to as the |
|
7243 // right-hand chunk. |
|
7244 // |
|
7245 // When making a decision on whether to coalesce a right-hand chunk with |
|
7246 // the current left-hand chunk, the current count vs. the desired count |
|
7247 // of the left-hand chunk is considered. Also if the right-hand chunk |
|
7248 // is near the large chunk at the end of the heap (see |
|
7249 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the |
|
7250 // left-hand chunk is coalesced. |
|
7251 // |
|
7252 // When making a decision about whether to split a chunk, the desired count |
|
7253 // vs. the current count of the candidate to be split is also considered. |
|
7254 // If the candidate is underpopulated (currently fewer chunks than desired) |
|
7255 // a chunk of an overpopulated (currently more chunks than desired) size may |
|
7256 // be chosen. The "hint" associated with a free list, if non-null, points |
|
7257 // to a free list which may be overpopulated. |
|
7258 // |
|
7259 |
|
7260 void SweepClosure::do_already_free_chunk(FreeChunk* fc) { |
|
7261 const size_t size = fc->size(); |
|
7262 // Chunks that cannot be coalesced are not in the |
|
7263 // free lists. |
|
7264 if (CMSTestInFreeList && !fc->cantCoalesce()) { |
|
7265 assert(_sp->verify_chunk_in_free_list(fc), |
|
7266 "free chunk should be in free lists"); |
|
7267 } |
|
7268 // a chunk that is already free, should not have been |
|
7269 // marked in the bit map |
|
7270 HeapWord* const addr = (HeapWord*) fc; |
|
7271 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); |
|
7272 // Verify that the bit map has no bits marked between |
|
7273 // addr and purported end of this block. |
|
7274 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); |
|
7275 |
|
7276 // Some chunks cannot be coalesced under any circumstances. |
|
7277 // See the definition of cantCoalesce(). |
|
7278 if (!fc->cantCoalesce()) { |
|
7279 // This chunk can potentially be coalesced. |
|
7280 // All the work is done in |
|
7281 do_post_free_or_garbage_chunk(fc, size); |
|
7282 // Note that if the chunk is not coalescable (the else arm |
|
7283 // below), we unconditionally flush, without needing to do |
|
7284 // a "lookahead," as we do below. |
|
7285 if (inFreeRange()) lookahead_and_flush(fc, size); |
|
7286 } else { |
|
7287 // Code path common to both original and adaptive free lists. |
|
7288 |
|
7289 // cant coalesce with previous block; this should be treated |
|
7290 // as the end of a free run if any |
|
7291 if (inFreeRange()) { |
|
7292 // we kicked some butt; time to pick up the garbage |
|
7293 assert(freeFinger() < addr, "freeFinger points too high"); |
|
7294 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
|
7295 } |
|
7296 // else, nothing to do, just continue |
|
7297 } |
|
7298 } |
|
7299 |
|
7300 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) { |
|
7301 // This is a chunk of garbage. It is not in any free list. |
|
7302 // Add it to a free list or let it possibly be coalesced into |
|
7303 // a larger chunk. |
|
7304 HeapWord* const addr = (HeapWord*) fc; |
|
7305 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); |
|
7306 |
|
7307 // Verify that the bit map has no bits marked between |
|
7308 // addr and purported end of just dead object. |
|
7309 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); |
|
7310 do_post_free_or_garbage_chunk(fc, size); |
|
7311 |
|
7312 assert(_limit >= addr + size, |
|
7313 "A freshly garbage chunk can't possibly straddle over _limit"); |
|
7314 if (inFreeRange()) lookahead_and_flush(fc, size); |
|
7315 return size; |
|
7316 } |
|
7317 |
|
7318 size_t SweepClosure::do_live_chunk(FreeChunk* fc) { |
|
7319 HeapWord* addr = (HeapWord*) fc; |
|
7320 // The sweeper has just found a live object. Return any accumulated |
|
7321 // left hand chunk to the free lists. |
|
7322 if (inFreeRange()) { |
|
7323 assert(freeFinger() < addr, "freeFinger points too high"); |
|
7324 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
|
7325 } |
|
7326 |
|
7327 // This object is live: we'd normally expect this to be |
|
7328 // an oop, and like to assert the following: |
|
7329 // assert(oopDesc::is_oop(oop(addr)), "live block should be an oop"); |
|
7330 // However, as we commented above, this may be an object whose |
|
7331 // header hasn't yet been initialized. |
|
7332 size_t size; |
|
7333 assert(_bitMap->isMarked(addr), "Tautology for this control point"); |
|
7334 if (_bitMap->isMarked(addr + 1)) { |
|
7335 // Determine the size from the bit map, rather than trying to |
|
7336 // compute it from the object header. |
|
7337 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); |
|
7338 size = pointer_delta(nextOneAddr + 1, addr); |
|
7339 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), |
|
7340 "alignment problem"); |
|
7341 |
|
7342 #ifdef ASSERT |
|
7343 if (oop(addr)->klass_or_null_acquire() != NULL) { |
|
7344 // Ignore mark word because we are running concurrent with mutators |
|
7345 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop"); |
|
7346 assert(size == |
|
7347 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), |
|
7348 "P-mark and computed size do not agree"); |
|
7349 } |
|
7350 #endif |
|
7351 |
|
7352 } else { |
|
7353 // This should be an initialized object that's alive. |
|
7354 assert(oop(addr)->klass_or_null_acquire() != NULL, |
|
7355 "Should be an initialized object"); |
|
7356 // Ignore mark word because we are running concurrent with mutators |
|
7357 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop"); |
|
7358 // Verify that the bit map has no bits marked between |
|
7359 // addr and purported end of this block. |
|
7360 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); |
|
7361 assert(size >= 3, "Necessary for Printezis marks to work"); |
|
7362 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point"); |
|
7363 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);) |
|
7364 } |
|
7365 return size; |
|
7366 } |
|
7367 |
|
7368 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, |
|
7369 size_t chunkSize) { |
|
7370 // do_post_free_or_garbage_chunk() should only be called in the case |
|
7371 // of the adaptive free list allocator. |
|
7372 const bool fcInFreeLists = fc->is_free(); |
|
7373 assert((HeapWord*)fc <= _limit, "sweep invariant"); |
|
7374 if (CMSTestInFreeList && fcInFreeLists) { |
|
7375 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists"); |
|
7376 } |
|
7377 |
|
7378 log_develop_trace(gc, sweep)(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize); |
|
7379 |
|
7380 HeapWord* const fc_addr = (HeapWord*) fc; |
|
7381 |
|
7382 bool coalesce = false; |
|
7383 const size_t left = pointer_delta(fc_addr, freeFinger()); |
|
7384 const size_t right = chunkSize; |
|
7385 switch (FLSCoalescePolicy) { |
|
7386 // numeric value forms a coalition aggressiveness metric |
|
7387 case 0: { // never coalesce |
|
7388 coalesce = false; |
|
7389 break; |
|
7390 } |
|
7391 case 1: { // coalesce if left & right chunks on overpopulated lists |
|
7392 coalesce = _sp->coalOverPopulated(left) && |
|
7393 _sp->coalOverPopulated(right); |
|
7394 break; |
|
7395 } |
|
7396 case 2: { // coalesce if left chunk on overpopulated list (default) |
|
7397 coalesce = _sp->coalOverPopulated(left); |
|
7398 break; |
|
7399 } |
|
7400 case 3: { // coalesce if left OR right chunk on overpopulated list |
|
7401 coalesce = _sp->coalOverPopulated(left) || |
|
7402 _sp->coalOverPopulated(right); |
|
7403 break; |
|
7404 } |
|
7405 case 4: { // always coalesce |
|
7406 coalesce = true; |
|
7407 break; |
|
7408 } |
|
7409 default: |
|
7410 ShouldNotReachHere(); |
|
7411 } |
|
7412 |
|
7413 // Should the current free range be coalesced? |
|
7414 // If the chunk is in a free range and either we decided to coalesce above |
|
7415 // or the chunk is near the large block at the end of the heap |
|
7416 // (isNearLargestChunk() returns true), then coalesce this chunk. |
|
7417 const bool doCoalesce = inFreeRange() |
|
7418 && (coalesce || _g->isNearLargestChunk(fc_addr)); |
|
7419 if (doCoalesce) { |
|
7420 // Coalesce the current free range on the left with the new |
|
7421 // chunk on the right. If either is on a free list, |
|
7422 // it must be removed from the list and stashed in the closure. |
|
7423 if (freeRangeInFreeLists()) { |
|
7424 FreeChunk* const ffc = (FreeChunk*)freeFinger(); |
|
7425 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()), |
|
7426 "Size of free range is inconsistent with chunk size."); |
|
7427 if (CMSTestInFreeList) { |
|
7428 assert(_sp->verify_chunk_in_free_list(ffc), |
|
7429 "Chunk is not in free lists"); |
|
7430 } |
|
7431 _sp->coalDeath(ffc->size()); |
|
7432 _sp->removeFreeChunkFromFreeLists(ffc); |
|
7433 set_freeRangeInFreeLists(false); |
|
7434 } |
|
7435 if (fcInFreeLists) { |
|
7436 _sp->coalDeath(chunkSize); |
|
7437 assert(fc->size() == chunkSize, |
|
7438 "The chunk has the wrong size or is not in the free lists"); |
|
7439 _sp->removeFreeChunkFromFreeLists(fc); |
|
7440 } |
|
7441 set_lastFreeRangeCoalesced(true); |
|
7442 print_free_block_coalesced(fc); |
|
7443 } else { // not in a free range and/or should not coalesce |
|
7444 // Return the current free range and start a new one. |
|
7445 if (inFreeRange()) { |
|
7446 // In a free range but cannot coalesce with the right hand chunk. |
|
7447 // Put the current free range into the free lists. |
|
7448 flush_cur_free_chunk(freeFinger(), |
|
7449 pointer_delta(fc_addr, freeFinger())); |
|
7450 } |
|
7451 // Set up for new free range. Pass along whether the right hand |
|
7452 // chunk is in the free lists. |
|
7453 initialize_free_range((HeapWord*)fc, fcInFreeLists); |
|
7454 } |
|
7455 } |
|
7456 |
|
7457 // Lookahead flush: |
|
7458 // If we are tracking a free range, and this is the last chunk that |
|
7459 // we'll look at because its end crosses past _limit, we'll preemptively |
|
7460 // flush it along with any free range we may be holding on to. Note that |
|
7461 // this can be the case only for an already free or freshly garbage |
|
7462 // chunk. If this block is an object, it can never straddle |
|
7463 // over _limit. The "straddling" occurs when _limit is set at |
|
7464 // the previous end of the space when this cycle started, and |
|
7465 // a subsequent heap expansion caused the previously co-terminal |
|
7466 // free block to be coalesced with the newly expanded portion, |
|
7467 // thus rendering _limit a non-block-boundary making it dangerous |
|
7468 // for the sweeper to step over and examine. |
|
7469 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) { |
|
7470 assert(inFreeRange(), "Should only be called if currently in a free range."); |
|
7471 HeapWord* const eob = ((HeapWord*)fc) + chunk_size; |
|
7472 assert(_sp->used_region().contains(eob - 1), |
|
7473 "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT |
|
7474 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" |
|
7475 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")", |
|
7476 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size); |
|
7477 if (eob >= _limit) { |
|
7478 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit"); |
|
7479 log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block " |
|
7480 "[" PTR_FORMAT "," PTR_FORMAT ") in space " |
|
7481 "[" PTR_FORMAT "," PTR_FORMAT ")", |
|
7482 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end())); |
|
7483 // Return the storage we are tracking back into the free lists. |
|
7484 log_develop_trace(gc, sweep)("Flushing ... "); |
|
7485 assert(freeFinger() < eob, "Error"); |
|
7486 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger())); |
|
7487 } |
|
7488 } |
|
7489 |
|
7490 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { |
|
7491 assert(inFreeRange(), "Should only be called if currently in a free range."); |
|
7492 assert(size > 0, |
|
7493 "A zero sized chunk cannot be added to the free lists."); |
|
7494 if (!freeRangeInFreeLists()) { |
|
7495 if (CMSTestInFreeList) { |
|
7496 FreeChunk* fc = (FreeChunk*) chunk; |
|
7497 fc->set_size(size); |
|
7498 assert(!_sp->verify_chunk_in_free_list(fc), |
|
7499 "chunk should not be in free lists yet"); |
|
7500 } |
|
7501 log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size); |
|
7502 // A new free range is going to be starting. The current |
|
7503 // free range has not been added to the free lists yet or |
|
7504 // was removed so add it back. |
|
7505 // If the current free range was coalesced, then the death |
|
7506 // of the free range was recorded. Record a birth now. |
|
7507 if (lastFreeRangeCoalesced()) { |
|
7508 _sp->coalBirth(size); |
|
7509 } |
|
7510 _sp->addChunkAndRepairOffsetTable(chunk, size, |
|
7511 lastFreeRangeCoalesced()); |
|
7512 } else { |
|
7513 log_develop_trace(gc, sweep)("Already in free list: nothing to flush"); |
|
7514 } |
|
7515 set_inFreeRange(false); |
|
7516 set_freeRangeInFreeLists(false); |
|
7517 } |
|
7518 |
|
7519 // We take a break if we've been at this for a while, |
|
7520 // so as to avoid monopolizing the locks involved. |
|
7521 void SweepClosure::do_yield_work(HeapWord* addr) { |
|
7522 // Return current free chunk being used for coalescing (if any) |
|
7523 // to the appropriate freelist. After yielding, the next |
|
7524 // free block encountered will start a coalescing range of |
|
7525 // free blocks. If the next free block is adjacent to the |
|
7526 // chunk just flushed, they will need to wait for the next |
|
7527 // sweep to be coalesced. |
|
7528 if (inFreeRange()) { |
|
7529 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
|
7530 } |
|
7531 |
|
7532 // First give up the locks, then yield, then re-lock. |
|
7533 // We should probably use a constructor/destructor idiom to |
|
7534 // do this unlock/lock or modify the MutexUnlocker class to |
|
7535 // serve our purpose. XXX |
|
7536 assert_lock_strong(_bitMap->lock()); |
|
7537 assert_lock_strong(_freelistLock); |
|
7538 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
7539 "CMS thread should hold CMS token"); |
|
7540 _bitMap->lock()->unlock(); |
|
7541 _freelistLock->unlock(); |
|
7542 ConcurrentMarkSweepThread::desynchronize(true); |
|
7543 _collector->stopTimer(); |
|
7544 _collector->incrementYields(); |
|
7545 |
|
7546 // See the comment in coordinator_yield() |
|
7547 for (unsigned i = 0; i < CMSYieldSleepCount && |
|
7548 ConcurrentMarkSweepThread::should_yield() && |
|
7549 !CMSCollector::foregroundGCIsActive(); ++i) { |
|
7550 os::naked_short_sleep(1); |
|
7551 } |
|
7552 |
|
7553 ConcurrentMarkSweepThread::synchronize(true); |
|
7554 _freelistLock->lock_without_safepoint_check(); |
|
7555 _bitMap->lock()->lock_without_safepoint_check(); |
|
7556 _collector->startTimer(); |
|
7557 } |
|
7558 |
|
7559 #ifndef PRODUCT |
|
7560 // This is actually very useful in a product build if it can |
|
7561 // be called from the debugger. Compile it into the product |
|
7562 // as needed. |
|
7563 bool debug_verify_chunk_in_free_list(FreeChunk* fc) { |
|
7564 return debug_cms_space->verify_chunk_in_free_list(fc); |
|
7565 } |
|
7566 #endif |
|
7567 |
|
7568 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const { |
|
7569 log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", |
|
7570 p2i(fc), fc->size()); |
|
7571 } |
|
7572 |
|
7573 // CMSIsAliveClosure |
|
7574 bool CMSIsAliveClosure::do_object_b(oop obj) { |
|
7575 HeapWord* addr = (HeapWord*)obj; |
|
7576 return addr != NULL && |
|
7577 (!_span.contains(addr) || _bit_map->isMarked(addr)); |
|
7578 } |
|
7579 |
|
7580 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, |
|
7581 MemRegion span, |
|
7582 CMSBitMap* bit_map, CMSMarkStack* mark_stack, |
|
7583 bool cpc): |
|
7584 _collector(collector), |
|
7585 _span(span), |
|
7586 _mark_stack(mark_stack), |
|
7587 _bit_map(bit_map), |
|
7588 _concurrent_precleaning(cpc) { |
|
7589 assert(!_span.is_empty(), "Empty span could spell trouble"); |
|
7590 } |
|
7591 |
|
7592 |
|
7593 // CMSKeepAliveClosure: the serial version |
|
7594 void CMSKeepAliveClosure::do_oop(oop obj) { |
|
7595 HeapWord* addr = (HeapWord*)obj; |
|
7596 if (_span.contains(addr) && |
|
7597 !_bit_map->isMarked(addr)) { |
|
7598 _bit_map->mark(addr); |
|
7599 bool simulate_overflow = false; |
|
7600 NOT_PRODUCT( |
|
7601 if (CMSMarkStackOverflowALot && |
|
7602 _collector->simulate_overflow()) { |
|
7603 // simulate a stack overflow |
|
7604 simulate_overflow = true; |
|
7605 } |
|
7606 ) |
|
7607 if (simulate_overflow || !_mark_stack->push(obj)) { |
|
7608 if (_concurrent_precleaning) { |
|
7609 // We dirty the overflown object and let the remark |
|
7610 // phase deal with it. |
|
7611 assert(_collector->overflow_list_is_empty(), "Error"); |
|
7612 // In the case of object arrays, we need to dirty all of |
|
7613 // the cards that the object spans. No locking or atomics |
|
7614 // are needed since no one else can be mutating the mod union |
|
7615 // table. |
|
7616 if (obj->is_objArray()) { |
|
7617 size_t sz = obj->size(); |
|
7618 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); |
|
7619 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
|
7620 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
|
7621 _collector->_modUnionTable.mark_range(redirty_range); |
|
7622 } else { |
|
7623 _collector->_modUnionTable.mark(addr); |
|
7624 } |
|
7625 _collector->_ser_kac_preclean_ovflw++; |
|
7626 } else { |
|
7627 _collector->push_on_overflow_list(obj); |
|
7628 _collector->_ser_kac_ovflw++; |
|
7629 } |
|
7630 } |
|
7631 } |
|
7632 } |
|
7633 |
|
7634 // CMSParKeepAliveClosure: a parallel version of the above. |
|
7635 // The work queues are private to each closure (thread), |
|
7636 // but (may be) available for stealing by other threads. |
|
7637 void CMSParKeepAliveClosure::do_oop(oop obj) { |
|
7638 HeapWord* addr = (HeapWord*)obj; |
|
7639 if (_span.contains(addr) && |
|
7640 !_bit_map->isMarked(addr)) { |
|
7641 // In general, during recursive tracing, several threads |
|
7642 // may be concurrently getting here; the first one to |
|
7643 // "tag" it, claims it. |
|
7644 if (_bit_map->par_mark(addr)) { |
|
7645 bool res = _work_queue->push(obj); |
|
7646 assert(res, "Low water mark should be much less than capacity"); |
|
7647 // Do a recursive trim in the hope that this will keep |
|
7648 // stack usage lower, but leave some oops for potential stealers |
|
7649 trim_queue(_low_water_mark); |
|
7650 } // Else, another thread got there first |
|
7651 } |
|
7652 } |
|
7653 |
|
7654 void CMSParKeepAliveClosure::trim_queue(uint max) { |
|
7655 while (_work_queue->size() > max) { |
|
7656 oop new_oop; |
|
7657 if (_work_queue->pop_local(new_oop)) { |
|
7658 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop"); |
|
7659 assert(_bit_map->isMarked((HeapWord*)new_oop), |
|
7660 "no white objects on this stack!"); |
|
7661 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); |
|
7662 // iterate over the oops in this oop, marking and pushing |
|
7663 // the ones in CMS heap (i.e. in _span). |
|
7664 new_oop->oop_iterate(&_mark_and_push); |
|
7665 } |
|
7666 } |
|
7667 } |
|
7668 |
|
7669 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( |
|
7670 CMSCollector* collector, |
|
7671 MemRegion span, CMSBitMap* bit_map, |
|
7672 OopTaskQueue* work_queue): |
|
7673 _collector(collector), |
|
7674 _span(span), |
|
7675 _work_queue(work_queue), |
|
7676 _bit_map(bit_map) { } |
|
7677 |
|
7678 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { |
|
7679 HeapWord* addr = (HeapWord*)obj; |
|
7680 if (_span.contains(addr) && |
|
7681 !_bit_map->isMarked(addr)) { |
|
7682 if (_bit_map->par_mark(addr)) { |
|
7683 bool simulate_overflow = false; |
|
7684 NOT_PRODUCT( |
|
7685 if (CMSMarkStackOverflowALot && |
|
7686 _collector->par_simulate_overflow()) { |
|
7687 // simulate a stack overflow |
|
7688 simulate_overflow = true; |
|
7689 } |
|
7690 ) |
|
7691 if (simulate_overflow || !_work_queue->push(obj)) { |
|
7692 _collector->par_push_on_overflow_list(obj); |
|
7693 _collector->_par_kac_ovflw++; |
|
7694 } |
|
7695 } // Else another thread got there already |
|
7696 } |
|
7697 } |
|
7698 |
|
7699 ////////////////////////////////////////////////////////////////// |
|
7700 // CMSExpansionCause ///////////////////////////// |
|
7701 ////////////////////////////////////////////////////////////////// |
|
7702 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) { |
|
7703 switch (cause) { |
|
7704 case _no_expansion: |
|
7705 return "No expansion"; |
|
7706 case _satisfy_free_ratio: |
|
7707 return "Free ratio"; |
|
7708 case _satisfy_promotion: |
|
7709 return "Satisfy promotion"; |
|
7710 case _satisfy_allocation: |
|
7711 return "allocation"; |
|
7712 case _allocate_par_lab: |
|
7713 return "Par LAB"; |
|
7714 case _allocate_par_spooling_space: |
|
7715 return "Par Spooling Space"; |
|
7716 case _adaptive_size_policy: |
|
7717 return "Ergonomics"; |
|
7718 default: |
|
7719 return "unknown"; |
|
7720 } |
|
7721 } |
|
7722 |
|
7723 void CMSDrainMarkingStackClosure::do_void() { |
|
7724 // the max number to take from overflow list at a time |
|
7725 const size_t num = _mark_stack->capacity()/4; |
|
7726 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(), |
|
7727 "Overflow list should be NULL during concurrent phases"); |
|
7728 while (!_mark_stack->isEmpty() || |
|
7729 // if stack is empty, check the overflow list |
|
7730 _collector->take_from_overflow_list(num, _mark_stack)) { |
|
7731 oop obj = _mark_stack->pop(); |
|
7732 HeapWord* addr = (HeapWord*)obj; |
|
7733 assert(_span.contains(addr), "Should be within span"); |
|
7734 assert(_bit_map->isMarked(addr), "Should be marked"); |
|
7735 assert(oopDesc::is_oop(obj), "Should be an oop"); |
|
7736 obj->oop_iterate(_keep_alive); |
|
7737 } |
|
7738 } |
|
7739 |
|
7740 void CMSParDrainMarkingStackClosure::do_void() { |
|
7741 // drain queue |
|
7742 trim_queue(0); |
|
7743 } |
|
7744 |
|
7745 // Trim our work_queue so its length is below max at return |
|
7746 void CMSParDrainMarkingStackClosure::trim_queue(uint max) { |
|
7747 while (_work_queue->size() > max) { |
|
7748 oop new_oop; |
|
7749 if (_work_queue->pop_local(new_oop)) { |
|
7750 assert(oopDesc::is_oop(new_oop), "Expected an oop"); |
|
7751 assert(_bit_map->isMarked((HeapWord*)new_oop), |
|
7752 "no white objects on this stack!"); |
|
7753 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); |
|
7754 // iterate over the oops in this oop, marking and pushing |
|
7755 // the ones in CMS heap (i.e. in _span). |
|
7756 new_oop->oop_iterate(&_mark_and_push); |
|
7757 } |
|
7758 } |
|
7759 } |
|
7760 |
|
7761 //////////////////////////////////////////////////////////////////// |
|
7762 // Support for Marking Stack Overflow list handling and related code |
|
7763 //////////////////////////////////////////////////////////////////// |
|
7764 // Much of the following code is similar in shape and spirit to the |
|
7765 // code used in ParNewGC. We should try and share that code |
|
7766 // as much as possible in the future. |
|
7767 |
|
7768 #ifndef PRODUCT |
|
7769 // Debugging support for CMSStackOverflowALot |
|
7770 |
|
7771 // It's OK to call this multi-threaded; the worst thing |
|
7772 // that can happen is that we'll get a bunch of closely |
|
7773 // spaced simulated overflows, but that's OK, in fact |
|
7774 // probably good as it would exercise the overflow code |
|
7775 // under contention. |
|
7776 bool CMSCollector::simulate_overflow() { |
|
7777 if (_overflow_counter-- <= 0) { // just being defensive |
|
7778 _overflow_counter = CMSMarkStackOverflowInterval; |
|
7779 return true; |
|
7780 } else { |
|
7781 return false; |
|
7782 } |
|
7783 } |
|
7784 |
|
7785 bool CMSCollector::par_simulate_overflow() { |
|
7786 return simulate_overflow(); |
|
7787 } |
|
7788 #endif |
|
7789 |
|
7790 // Single-threaded |
|
7791 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) { |
|
7792 assert(stack->isEmpty(), "Expected precondition"); |
|
7793 assert(stack->capacity() > num, "Shouldn't bite more than can chew"); |
|
7794 size_t i = num; |
|
7795 oop cur = _overflow_list; |
|
7796 const markWord proto = markWord::prototype(); |
|
7797 NOT_PRODUCT(ssize_t n = 0;) |
|
7798 for (oop next; i > 0 && cur != NULL; cur = next, i--) { |
|
7799 next = oop(cur->mark_raw().to_pointer()); |
|
7800 cur->set_mark_raw(proto); // until proven otherwise |
|
7801 assert(oopDesc::is_oop(cur), "Should be an oop"); |
|
7802 bool res = stack->push(cur); |
|
7803 assert(res, "Bit off more than can chew?"); |
|
7804 NOT_PRODUCT(n++;) |
|
7805 } |
|
7806 _overflow_list = cur; |
|
7807 #ifndef PRODUCT |
|
7808 assert(_num_par_pushes >= n, "Too many pops?"); |
|
7809 _num_par_pushes -=n; |
|
7810 #endif |
|
7811 return !stack->isEmpty(); |
|
7812 } |
|
7813 |
|
7814 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) |
|
7815 // (MT-safe) Get a prefix of at most "num" from the list. |
|
7816 // The overflow list is chained through the mark word of |
|
7817 // each object in the list. We fetch the entire list, |
|
7818 // break off a prefix of the right size and return the |
|
7819 // remainder. If other threads try to take objects from |
|
7820 // the overflow list at that time, they will wait for |
|
7821 // some time to see if data becomes available. If (and |
|
7822 // only if) another thread places one or more object(s) |
|
7823 // on the global list before we have returned the suffix |
|
7824 // to the global list, we will walk down our local list |
|
7825 // to find its end and append the global list to |
|
7826 // our suffix before returning it. This suffix walk can |
|
7827 // prove to be expensive (quadratic in the amount of traffic) |
|
7828 // when there are many objects in the overflow list and |
|
7829 // there is much producer-consumer contention on the list. |
|
7830 // *NOTE*: The overflow list manipulation code here and |
|
7831 // in ParNewGeneration:: are very similar in shape, |
|
7832 // except that in the ParNew case we use the old (from/eden) |
|
7833 // copy of the object to thread the list via its klass word. |
|
7834 // Because of the common code, if you make any changes in |
|
7835 // the code below, please check the ParNew version to see if |
|
7836 // similar changes might be needed. |
|
7837 // CR 6797058 has been filed to consolidate the common code. |
|
7838 bool CMSCollector::par_take_from_overflow_list(size_t num, |
|
7839 OopTaskQueue* work_q, |
|
7840 int no_of_gc_threads) { |
|
7841 assert(work_q->size() == 0, "First empty local work queue"); |
|
7842 assert(num < work_q->max_elems(), "Can't bite more than we can chew"); |
|
7843 if (_overflow_list == NULL) { |
|
7844 return false; |
|
7845 } |
|
7846 // Grab the entire list; we'll put back a suffix |
|
7847 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); |
|
7848 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was |
|
7849 // set to ParallelGCThreads. |
|
7850 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads; |
|
7851 size_t sleep_time_millis = MAX2((size_t)1, num/100); |
|
7852 // If the list is busy, we spin for a short while, |
|
7853 // sleeping between attempts to get the list. |
|
7854 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) { |
|
7855 os::naked_sleep(sleep_time_millis); |
|
7856 if (_overflow_list == NULL) { |
|
7857 // Nothing left to take |
|
7858 return false; |
|
7859 } else if (_overflow_list != BUSY) { |
|
7860 // Try and grab the prefix |
|
7861 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); |
|
7862 } |
|
7863 } |
|
7864 // If the list was found to be empty, or we spun long |
|
7865 // enough, we give up and return empty-handed. If we leave |
|
7866 // the list in the BUSY state below, it must be the case that |
|
7867 // some other thread holds the overflow list and will set it |
|
7868 // to a non-BUSY state in the future. |
|
7869 if (prefix == NULL || prefix == BUSY) { |
|
7870 // Nothing to take or waited long enough |
|
7871 if (prefix == NULL) { |
|
7872 // Write back the NULL in case we overwrote it with BUSY above |
|
7873 // and it is still the same value. |
|
7874 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); |
|
7875 } |
|
7876 return false; |
|
7877 } |
|
7878 assert(prefix != NULL && prefix != BUSY, "Error"); |
|
7879 size_t i = num; |
|
7880 oop cur = prefix; |
|
7881 // Walk down the first "num" objects, unless we reach the end. |
|
7882 for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--); |
|
7883 if (cur->mark_raw().to_pointer() == NULL) { |
|
7884 // We have "num" or fewer elements in the list, so there |
|
7885 // is nothing to return to the global list. |
|
7886 // Write back the NULL in lieu of the BUSY we wrote |
|
7887 // above, if it is still the same value. |
|
7888 if (_overflow_list == BUSY) { |
|
7889 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); |
|
7890 } |
|
7891 } else { |
|
7892 // Chop off the suffix and return it to the global list. |
|
7893 assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error"); |
|
7894 oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list |
|
7895 cur->set_mark_raw(markWord::from_pointer(NULL)); // break off suffix |
|
7896 // It's possible that the list is still in the empty(busy) state |
|
7897 // we left it in a short while ago; in that case we may be |
|
7898 // able to place back the suffix without incurring the cost |
|
7899 // of a walk down the list. |
|
7900 oop observed_overflow_list = _overflow_list; |
|
7901 oop cur_overflow_list = observed_overflow_list; |
|
7902 bool attached = false; |
|
7903 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { |
|
7904 observed_overflow_list = |
|
7905 Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list); |
|
7906 if (cur_overflow_list == observed_overflow_list) { |
|
7907 attached = true; |
|
7908 break; |
|
7909 } else cur_overflow_list = observed_overflow_list; |
|
7910 } |
|
7911 if (!attached) { |
|
7912 // Too bad, someone else sneaked in (at least) an element; we'll need |
|
7913 // to do a splice. Find tail of suffix so we can prepend suffix to global |
|
7914 // list. |
|
7915 for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer())); |
|
7916 oop suffix_tail = cur; |
|
7917 assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL, |
|
7918 "Tautology"); |
|
7919 observed_overflow_list = _overflow_list; |
|
7920 do { |
|
7921 cur_overflow_list = observed_overflow_list; |
|
7922 if (cur_overflow_list != BUSY) { |
|
7923 // Do the splice ... |
|
7924 suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list)); |
|
7925 } else { // cur_overflow_list == BUSY |
|
7926 suffix_tail->set_mark_raw(markWord::from_pointer(NULL)); |
|
7927 } |
|
7928 // ... and try to place spliced list back on overflow_list ... |
|
7929 observed_overflow_list = |
|
7930 Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list); |
|
7931 } while (cur_overflow_list != observed_overflow_list); |
|
7932 // ... until we have succeeded in doing so. |
|
7933 } |
|
7934 } |
|
7935 |
|
7936 // Push the prefix elements on work_q |
|
7937 assert(prefix != NULL, "control point invariant"); |
|
7938 const markWord proto = markWord::prototype(); |
|
7939 oop next; |
|
7940 NOT_PRODUCT(ssize_t n = 0;) |
|
7941 for (cur = prefix; cur != NULL; cur = next) { |
|
7942 next = oop(cur->mark_raw().to_pointer()); |
|
7943 cur->set_mark_raw(proto); // until proven otherwise |
|
7944 assert(oopDesc::is_oop(cur), "Should be an oop"); |
|
7945 bool res = work_q->push(cur); |
|
7946 assert(res, "Bit off more than we can chew?"); |
|
7947 NOT_PRODUCT(n++;) |
|
7948 } |
|
7949 #ifndef PRODUCT |
|
7950 assert(_num_par_pushes >= n, "Too many pops?"); |
|
7951 Atomic::sub(n, &_num_par_pushes); |
|
7952 #endif |
|
7953 return true; |
|
7954 } |
|
7955 |
|
7956 // Single-threaded |
|
7957 void CMSCollector::push_on_overflow_list(oop p) { |
|
7958 NOT_PRODUCT(_num_par_pushes++;) |
|
7959 assert(oopDesc::is_oop(p), "Not an oop"); |
|
7960 preserve_mark_if_necessary(p); |
|
7961 p->set_mark_raw(markWord::from_pointer(_overflow_list)); |
|
7962 _overflow_list = p; |
|
7963 } |
|
7964 |
|
7965 // Multi-threaded; use CAS to prepend to overflow list |
|
7966 void CMSCollector::par_push_on_overflow_list(oop p) { |
|
7967 NOT_PRODUCT(Atomic::inc(&_num_par_pushes);) |
|
7968 assert(oopDesc::is_oop(p), "Not an oop"); |
|
7969 par_preserve_mark_if_necessary(p); |
|
7970 oop observed_overflow_list = _overflow_list; |
|
7971 oop cur_overflow_list; |
|
7972 do { |
|
7973 cur_overflow_list = observed_overflow_list; |
|
7974 if (cur_overflow_list != BUSY) { |
|
7975 p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list)); |
|
7976 } else { |
|
7977 p->set_mark_raw(markWord::from_pointer(NULL)); |
|
7978 } |
|
7979 observed_overflow_list = |
|
7980 Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list); |
|
7981 } while (cur_overflow_list != observed_overflow_list); |
|
7982 } |
|
7983 #undef BUSY |
|
7984 |
|
7985 // Single threaded |
|
7986 // General Note on GrowableArray: pushes may silently fail |
|
7987 // because we are (temporarily) out of C-heap for expanding |
|
7988 // the stack. The problem is quite ubiquitous and affects |
|
7989 // a lot of code in the JVM. The prudent thing for GrowableArray |
|
7990 // to do (for now) is to exit with an error. However, that may |
|
7991 // be too draconian in some cases because the caller may be |
|
7992 // able to recover without much harm. For such cases, we |
|
7993 // should probably introduce a "soft_push" method which returns |
|
7994 // an indication of success or failure with the assumption that |
|
7995 // the caller may be able to recover from a failure; code in |
|
7996 // the VM can then be changed, incrementally, to deal with such |
|
7997 // failures where possible, thus, incrementally hardening the VM |
|
7998 // in such low resource situations. |
|
7999 void CMSCollector::preserve_mark_work(oop p, markWord m) { |
|
8000 _preserved_oop_stack.push(p); |
|
8001 _preserved_mark_stack.push(m); |
|
8002 assert(m == p->mark_raw(), "Mark word changed"); |
|
8003 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), |
|
8004 "bijection"); |
|
8005 } |
|
8006 |
|
8007 // Single threaded |
|
8008 void CMSCollector::preserve_mark_if_necessary(oop p) { |
|
8009 markWord m = p->mark_raw(); |
|
8010 if (p->mark_must_be_preserved(m)) { |
|
8011 preserve_mark_work(p, m); |
|
8012 } |
|
8013 } |
|
8014 |
|
8015 void CMSCollector::par_preserve_mark_if_necessary(oop p) { |
|
8016 markWord m = p->mark_raw(); |
|
8017 if (p->mark_must_be_preserved(m)) { |
|
8018 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
|
8019 // Even though we read the mark word without holding |
|
8020 // the lock, we are assured that it will not change |
|
8021 // because we "own" this oop, so no other thread can |
|
8022 // be trying to push it on the overflow list; see |
|
8023 // the assertion in preserve_mark_work() that checks |
|
8024 // that m == p->mark_raw(). |
|
8025 preserve_mark_work(p, m); |
|
8026 } |
|
8027 } |
|
8028 |
|
8029 // We should be able to do this multi-threaded, |
|
8030 // a chunk of stack being a task (this is |
|
8031 // correct because each oop only ever appears |
|
8032 // once in the overflow list. However, it's |
|
8033 // not very easy to completely overlap this with |
|
8034 // other operations, so will generally not be done |
|
8035 // until all work's been completed. Because we |
|
8036 // expect the preserved oop stack (set) to be small, |
|
8037 // it's probably fine to do this single-threaded. |
|
8038 // We can explore cleverer concurrent/overlapped/parallel |
|
8039 // processing of preserved marks if we feel the |
|
8040 // need for this in the future. Stack overflow should |
|
8041 // be so rare in practice and, when it happens, its |
|
8042 // effect on performance so great that this will |
|
8043 // likely just be in the noise anyway. |
|
8044 void CMSCollector::restore_preserved_marks_if_any() { |
|
8045 assert(SafepointSynchronize::is_at_safepoint(), |
|
8046 "world should be stopped"); |
|
8047 assert(Thread::current()->is_ConcurrentGC_thread() || |
|
8048 Thread::current()->is_VM_thread(), |
|
8049 "should be single-threaded"); |
|
8050 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), |
|
8051 "bijection"); |
|
8052 |
|
8053 while (!_preserved_oop_stack.is_empty()) { |
|
8054 oop p = _preserved_oop_stack.pop(); |
|
8055 assert(oopDesc::is_oop(p), "Should be an oop"); |
|
8056 assert(_span.contains(p), "oop should be in _span"); |
|
8057 assert(p->mark_raw() == markWord::prototype(), |
|
8058 "Set when taken from overflow list"); |
|
8059 markWord m = _preserved_mark_stack.pop(); |
|
8060 p->set_mark_raw(m); |
|
8061 } |
|
8062 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(), |
|
8063 "stacks were cleared above"); |
|
8064 } |
|
8065 |
|
8066 #ifndef PRODUCT |
|
8067 bool CMSCollector::no_preserved_marks() const { |
|
8068 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(); |
|
8069 } |
|
8070 #endif |
|
8071 |
|
8072 // Transfer some number of overflown objects to usual marking |
|
8073 // stack. Return true if some objects were transferred. |
|
8074 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() { |
|
8075 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4, |
|
8076 (size_t)ParGCDesiredObjsFromOverflowList); |
|
8077 |
|
8078 bool res = _collector->take_from_overflow_list(num, _mark_stack); |
|
8079 assert(_collector->overflow_list_is_empty() || res, |
|
8080 "If list is not empty, we should have taken something"); |
|
8081 assert(!res || !_mark_stack->isEmpty(), |
|
8082 "If we took something, it should now be on our stack"); |
|
8083 return res; |
|
8084 } |
|
8085 |
|
8086 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) { |
|
8087 size_t res = _sp->block_size_no_stall(addr, _collector); |
|
8088 if (_sp->block_is_obj(addr)) { |
|
8089 if (_live_bit_map->isMarked(addr)) { |
|
8090 // It can't have been dead in a previous cycle |
|
8091 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!"); |
|
8092 } else { |
|
8093 _dead_bit_map->mark(addr); // mark the dead object |
|
8094 } |
|
8095 } |
|
8096 // Could be 0, if the block size could not be computed without stalling. |
|
8097 return res; |
|
8098 } |
|
8099 |
|
8100 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() { |
|
8101 GCMemoryManager* manager = CMSHeap::heap()->old_manager(); |
|
8102 switch (phase) { |
|
8103 case CMSCollector::InitialMarking: |
|
8104 initialize(manager /* GC manager */ , |
|
8105 cause /* cause of the GC */, |
|
8106 true /* allMemoryPoolsAffected */, |
|
8107 true /* recordGCBeginTime */, |
|
8108 true /* recordPreGCUsage */, |
|
8109 false /* recordPeakUsage */, |
|
8110 false /* recordPostGCusage */, |
|
8111 true /* recordAccumulatedGCTime */, |
|
8112 false /* recordGCEndTime */, |
|
8113 false /* countCollection */ ); |
|
8114 break; |
|
8115 |
|
8116 case CMSCollector::FinalMarking: |
|
8117 initialize(manager /* GC manager */ , |
|
8118 cause /* cause of the GC */, |
|
8119 true /* allMemoryPoolsAffected */, |
|
8120 false /* recordGCBeginTime */, |
|
8121 false /* recordPreGCUsage */, |
|
8122 false /* recordPeakUsage */, |
|
8123 false /* recordPostGCusage */, |
|
8124 true /* recordAccumulatedGCTime */, |
|
8125 false /* recordGCEndTime */, |
|
8126 false /* countCollection */ ); |
|
8127 break; |
|
8128 |
|
8129 case CMSCollector::Sweeping: |
|
8130 initialize(manager /* GC manager */ , |
|
8131 cause /* cause of the GC */, |
|
8132 true /* allMemoryPoolsAffected */, |
|
8133 false /* recordGCBeginTime */, |
|
8134 false /* recordPreGCUsage */, |
|
8135 true /* recordPeakUsage */, |
|
8136 true /* recordPostGCusage */, |
|
8137 false /* recordAccumulatedGCTime */, |
|
8138 true /* recordGCEndTime */, |
|
8139 true /* countCollection */ ); |
|
8140 break; |
|
8141 |
|
8142 default: |
|
8143 ShouldNotReachHere(); |
|
8144 } |
|
8145 } |
|