1
|
1 |
/*
|
|
2 |
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
# include "incls/_precompiled.incl"
|
|
26 |
# include "incls/_concurrentMarkSweepGeneration.cpp.incl"
|
|
27 |
|
|
28 |
// statics
|
|
29 |
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
|
|
30 |
bool CMSCollector::_full_gc_requested = false;
|
|
31 |
|
|
32 |
//////////////////////////////////////////////////////////////////
|
|
33 |
// In support of CMS/VM thread synchronization
|
|
34 |
//////////////////////////////////////////////////////////////////
|
|
35 |
// We split use of the CGC_lock into 2 "levels".
|
|
36 |
// The low-level locking is of the usual CGC_lock monitor. We introduce
|
|
37 |
// a higher level "token" (hereafter "CMS token") built on top of the
|
|
38 |
// low level monitor (hereafter "CGC lock").
|
|
39 |
// The token-passing protocol gives priority to the VM thread. The
|
|
40 |
// CMS-lock doesn't provide any fairness guarantees, but clients
|
|
41 |
// should ensure that it is only held for very short, bounded
|
|
42 |
// durations.
|
|
43 |
//
|
|
44 |
// When either of the CMS thread or the VM thread is involved in
|
|
45 |
// collection operations during which it does not want the other
|
|
46 |
// thread to interfere, it obtains the CMS token.
|
|
47 |
//
|
|
48 |
// If either thread tries to get the token while the other has
|
|
49 |
// it, that thread waits. However, if the VM thread and CMS thread
|
|
50 |
// both want the token, then the VM thread gets priority while the
|
|
51 |
// CMS thread waits. This ensures, for instance, that the "concurrent"
|
|
52 |
// phases of the CMS thread's work do not block out the VM thread
|
|
53 |
// for long periods of time as the CMS thread continues to hog
|
|
54 |
// the token. (See bug 4616232).
|
|
55 |
//
|
|
56 |
// The baton-passing functions are, however, controlled by the
|
|
57 |
// flags _foregroundGCShouldWait and _foregroundGCIsActive,
|
|
58 |
// and here the low-level CMS lock, not the high level token,
|
|
59 |
// ensures mutual exclusion.
|
|
60 |
//
|
|
61 |
// Two important conditions that we have to satisfy:
|
|
62 |
// 1. if a thread does a low-level wait on the CMS lock, then it
|
|
63 |
// relinquishes the CMS token if it were holding that token
|
|
64 |
// when it acquired the low-level CMS lock.
|
|
65 |
// 2. any low-level notifications on the low-level lock
|
|
66 |
// should only be sent when a thread has relinquished the token.
|
|
67 |
//
|
|
68 |
// In the absence of either property, we'd have potential deadlock.
|
|
69 |
//
|
|
70 |
// We protect each of the CMS (concurrent and sequential) phases
|
|
71 |
// with the CMS _token_, not the CMS _lock_.
|
|
72 |
//
|
|
73 |
// The only code protected by CMS lock is the token acquisition code
|
|
74 |
// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
|
|
75 |
// baton-passing code.
|
|
76 |
//
|
|
77 |
// Unfortunately, i couldn't come up with a good abstraction to factor and
|
|
78 |
// hide the naked CGC_lock manipulation in the baton-passing code
|
|
79 |
// further below. That's something we should try to do. Also, the proof
|
|
80 |
// of correctness of this 2-level locking scheme is far from obvious,
|
|
81 |
// and potentially quite slippery. We have an uneasy supsicion, for instance,
|
|
82 |
// that there may be a theoretical possibility of delay/starvation in the
|
|
83 |
// low-level lock/wait/notify scheme used for the baton-passing because of
|
|
84 |
// potential intereference with the priority scheme embodied in the
|
|
85 |
// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
|
|
86 |
// invocation further below and marked with "XXX 20011219YSR".
|
|
87 |
// Indeed, as we note elsewhere, this may become yet more slippery
|
|
88 |
// in the presence of multiple CMS and/or multiple VM threads. XXX
|
|
89 |
|
|
90 |
class CMSTokenSync: public StackObj {
|
|
91 |
private:
|
|
92 |
bool _is_cms_thread;
|
|
93 |
public:
|
|
94 |
CMSTokenSync(bool is_cms_thread):
|
|
95 |
_is_cms_thread(is_cms_thread) {
|
|
96 |
assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
|
|
97 |
"Incorrect argument to constructor");
|
|
98 |
ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
|
|
99 |
}
|
|
100 |
|
|
101 |
~CMSTokenSync() {
|
|
102 |
assert(_is_cms_thread ?
|
|
103 |
ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
|
|
104 |
ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
|
|
105 |
"Incorrect state");
|
|
106 |
ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
|
|
107 |
}
|
|
108 |
};
|
|
109 |
|
|
110 |
// Convenience class that does a CMSTokenSync, and then acquires
|
|
111 |
// upto three locks.
|
|
112 |
class CMSTokenSyncWithLocks: public CMSTokenSync {
|
|
113 |
private:
|
|
114 |
// Note: locks are acquired in textual declaration order
|
|
115 |
// and released in the opposite order
|
|
116 |
MutexLockerEx _locker1, _locker2, _locker3;
|
|
117 |
public:
|
|
118 |
CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
|
|
119 |
Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
|
|
120 |
CMSTokenSync(is_cms_thread),
|
|
121 |
_locker1(mutex1, Mutex::_no_safepoint_check_flag),
|
|
122 |
_locker2(mutex2, Mutex::_no_safepoint_check_flag),
|
|
123 |
_locker3(mutex3, Mutex::_no_safepoint_check_flag)
|
|
124 |
{ }
|
|
125 |
};
|
|
126 |
|
|
127 |
|
|
128 |
// Wrapper class to temporarily disable icms during a foreground cms collection.
|
|
129 |
class ICMSDisabler: public StackObj {
|
|
130 |
public:
|
|
131 |
// The ctor disables icms and wakes up the thread so it notices the change;
|
|
132 |
// the dtor re-enables icms. Note that the CMSCollector methods will check
|
|
133 |
// CMSIncrementalMode.
|
|
134 |
ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
|
|
135 |
~ICMSDisabler() { CMSCollector::enable_icms(); }
|
|
136 |
};
|
|
137 |
|
|
138 |
//////////////////////////////////////////////////////////////////
|
|
139 |
// Concurrent Mark-Sweep Generation /////////////////////////////
|
|
140 |
//////////////////////////////////////////////////////////////////
|
|
141 |
|
|
142 |
NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
|
|
143 |
|
|
144 |
// This struct contains per-thread things necessary to support parallel
|
|
145 |
// young-gen collection.
|
|
146 |
class CMSParGCThreadState: public CHeapObj {
|
|
147 |
public:
|
|
148 |
CFLS_LAB lab;
|
|
149 |
PromotionInfo promo;
|
|
150 |
|
|
151 |
// Constructor.
|
|
152 |
CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
|
|
153 |
promo.setSpace(cfls);
|
|
154 |
}
|
|
155 |
};
|
|
156 |
|
|
157 |
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
|
158 |
ReservedSpace rs, size_t initial_byte_size, int level,
|
|
159 |
CardTableRS* ct, bool use_adaptive_freelists,
|
|
160 |
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
|
|
161 |
CardGeneration(rs, initial_byte_size, level, ct),
|
|
162 |
_dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
|
|
163 |
_debug_collection_type(Concurrent_collection_type)
|
|
164 |
{
|
|
165 |
HeapWord* bottom = (HeapWord*) _virtual_space.low();
|
|
166 |
HeapWord* end = (HeapWord*) _virtual_space.high();
|
|
167 |
|
|
168 |
_direct_allocated_words = 0;
|
|
169 |
NOT_PRODUCT(
|
|
170 |
_numObjectsPromoted = 0;
|
|
171 |
_numWordsPromoted = 0;
|
|
172 |
_numObjectsAllocated = 0;
|
|
173 |
_numWordsAllocated = 0;
|
|
174 |
)
|
|
175 |
|
|
176 |
_cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
|
|
177 |
use_adaptive_freelists,
|
|
178 |
dictionaryChoice);
|
|
179 |
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
|
|
180 |
if (_cmsSpace == NULL) {
|
|
181 |
vm_exit_during_initialization(
|
|
182 |
"CompactibleFreeListSpace allocation failure");
|
|
183 |
}
|
|
184 |
_cmsSpace->_gen = this;
|
|
185 |
|
|
186 |
_gc_stats = new CMSGCStats();
|
|
187 |
|
|
188 |
// Verify the assumption that FreeChunk::_prev and OopDesc::_klass
|
|
189 |
// offsets match. The ability to tell free chunks from objects
|
|
190 |
// depends on this property.
|
|
191 |
debug_only(
|
|
192 |
FreeChunk* junk = NULL;
|
|
193 |
assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
|
|
194 |
"Offset of FreeChunk::_prev within FreeChunk must match"
|
|
195 |
" that of OopDesc::_klass within OopDesc");
|
|
196 |
)
|
|
197 |
if (ParallelGCThreads > 0) {
|
|
198 |
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
|
|
199 |
_par_gc_thread_states =
|
|
200 |
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
|
|
201 |
if (_par_gc_thread_states == NULL) {
|
|
202 |
vm_exit_during_initialization("Could not allocate par gc structs");
|
|
203 |
}
|
|
204 |
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
205 |
_par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
|
|
206 |
if (_par_gc_thread_states[i] == NULL) {
|
|
207 |
vm_exit_during_initialization("Could not allocate par gc structs");
|
|
208 |
}
|
|
209 |
}
|
|
210 |
} else {
|
|
211 |
_par_gc_thread_states = NULL;
|
|
212 |
}
|
|
213 |
_incremental_collection_failed = false;
|
|
214 |
// The "dilatation_factor" is the expansion that can occur on
|
|
215 |
// account of the fact that the minimum object size in the CMS
|
|
216 |
// generation may be larger than that in, say, a contiguous young
|
|
217 |
// generation.
|
|
218 |
// Ideally, in the calculation below, we'd compute the dilatation
|
|
219 |
// factor as: MinChunkSize/(promoting_gen's min object size)
|
|
220 |
// Since we do not have such a general query interface for the
|
|
221 |
// promoting generation, we'll instead just use the mimimum
|
|
222 |
// object size (which today is a header's worth of space);
|
|
223 |
// note that all arithmetic is in units of HeapWords.
|
|
224 |
assert(MinChunkSize >= oopDesc::header_size(), "just checking");
|
|
225 |
assert(_dilatation_factor >= 1.0, "from previous assert");
|
|
226 |
}
|
|
227 |
|
|
228 |
void ConcurrentMarkSweepGeneration::ref_processor_init() {
|
|
229 |
assert(collector() != NULL, "no collector");
|
|
230 |
collector()->ref_processor_init();
|
|
231 |
}
|
|
232 |
|
|
233 |
void CMSCollector::ref_processor_init() {
|
|
234 |
if (_ref_processor == NULL) {
|
|
235 |
// Allocate and initialize a reference processor
|
|
236 |
_ref_processor = ReferenceProcessor::create_ref_processor(
|
|
237 |
_span, // span
|
|
238 |
_cmsGen->refs_discovery_is_atomic(), // atomic_discovery
|
|
239 |
_cmsGen->refs_discovery_is_mt(), // mt_discovery
|
|
240 |
&_is_alive_closure,
|
|
241 |
ParallelGCThreads,
|
|
242 |
ParallelRefProcEnabled);
|
|
243 |
// Initialize the _ref_processor field of CMSGen
|
|
244 |
_cmsGen->set_ref_processor(_ref_processor);
|
|
245 |
|
|
246 |
// Allocate a dummy ref processor for perm gen.
|
|
247 |
ReferenceProcessor* rp2 = new ReferenceProcessor();
|
|
248 |
if (rp2 == NULL) {
|
|
249 |
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
|
|
250 |
}
|
|
251 |
_permGen->set_ref_processor(rp2);
|
|
252 |
}
|
|
253 |
}
|
|
254 |
|
|
255 |
CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
|
|
256 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
257 |
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
|
258 |
"Wrong type of heap");
|
|
259 |
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
|
|
260 |
gch->gen_policy()->size_policy();
|
|
261 |
assert(sp->is_gc_cms_adaptive_size_policy(),
|
|
262 |
"Wrong type of size policy");
|
|
263 |
return sp;
|
|
264 |
}
|
|
265 |
|
|
266 |
CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
|
|
267 |
CMSGCAdaptivePolicyCounters* results =
|
|
268 |
(CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
|
|
269 |
assert(
|
|
270 |
results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
|
271 |
"Wrong gc policy counter kind");
|
|
272 |
return results;
|
|
273 |
}
|
|
274 |
|
|
275 |
|
|
276 |
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
|
|
277 |
|
|
278 |
const char* gen_name = "old";
|
|
279 |
|
|
280 |
// Generation Counters - generation 1, 1 subspace
|
|
281 |
_gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
|
|
282 |
|
|
283 |
_space_counters = new GSpaceCounters(gen_name, 0,
|
|
284 |
_virtual_space.reserved_size(),
|
|
285 |
this, _gen_counters);
|
|
286 |
}
|
|
287 |
|
|
288 |
CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
|
|
289 |
_cms_gen(cms_gen)
|
|
290 |
{
|
|
291 |
assert(alpha <= 100, "bad value");
|
|
292 |
_saved_alpha = alpha;
|
|
293 |
|
|
294 |
// Initialize the alphas to the bootstrap value of 100.
|
|
295 |
_gc0_alpha = _cms_alpha = 100;
|
|
296 |
|
|
297 |
_cms_begin_time.update();
|
|
298 |
_cms_end_time.update();
|
|
299 |
|
|
300 |
_gc0_duration = 0.0;
|
|
301 |
_gc0_period = 0.0;
|
|
302 |
_gc0_promoted = 0;
|
|
303 |
|
|
304 |
_cms_duration = 0.0;
|
|
305 |
_cms_period = 0.0;
|
|
306 |
_cms_allocated = 0;
|
|
307 |
|
|
308 |
_cms_used_at_gc0_begin = 0;
|
|
309 |
_cms_used_at_gc0_end = 0;
|
|
310 |
_allow_duty_cycle_reduction = false;
|
|
311 |
_valid_bits = 0;
|
|
312 |
_icms_duty_cycle = CMSIncrementalDutyCycle;
|
|
313 |
}
|
|
314 |
|
|
315 |
// If promotion failure handling is on use
|
|
316 |
// the padded average size of the promotion for each
|
|
317 |
// young generation collection.
|
|
318 |
double CMSStats::time_until_cms_gen_full() const {
|
|
319 |
size_t cms_free = _cms_gen->cmsSpace()->free();
|
|
320 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
321 |
size_t expected_promotion = gch->get_gen(0)->capacity();
|
|
322 |
if (HandlePromotionFailure) {
|
|
323 |
expected_promotion = MIN2(
|
|
324 |
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
|
|
325 |
expected_promotion);
|
|
326 |
}
|
|
327 |
if (cms_free > expected_promotion) {
|
|
328 |
// Start a cms collection if there isn't enough space to promote
|
|
329 |
// for the next minor collection. Use the padded average as
|
|
330 |
// a safety factor.
|
|
331 |
cms_free -= expected_promotion;
|
|
332 |
|
|
333 |
// Adjust by the safety factor.
|
|
334 |
double cms_free_dbl = (double)cms_free;
|
|
335 |
cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
|
|
336 |
|
|
337 |
if (PrintGCDetails && Verbose) {
|
|
338 |
gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
|
|
339 |
SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
|
|
340 |
cms_free, expected_promotion);
|
|
341 |
gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
|
|
342 |
cms_free_dbl, cms_consumption_rate() + 1.0);
|
|
343 |
}
|
|
344 |
// Add 1 in case the consumption rate goes to zero.
|
|
345 |
return cms_free_dbl / (cms_consumption_rate() + 1.0);
|
|
346 |
}
|
|
347 |
return 0.0;
|
|
348 |
}
|
|
349 |
|
|
350 |
// Compare the duration of the cms collection to the
|
|
351 |
// time remaining before the cms generation is empty.
|
|
352 |
// Note that the time from the start of the cms collection
|
|
353 |
// to the start of the cms sweep (less than the total
|
|
354 |
// duration of the cms collection) can be used. This
|
|
355 |
// has been tried and some applications experienced
|
|
356 |
// promotion failures early in execution. This was
|
|
357 |
// possibly because the averages were not accurate
|
|
358 |
// enough at the beginning.
|
|
359 |
double CMSStats::time_until_cms_start() const {
|
|
360 |
// We add "gc0_period" to the "work" calculation
|
|
361 |
// below because this query is done (mostly) at the
|
|
362 |
// end of a scavenge, so we need to conservatively
|
|
363 |
// account for that much possible delay
|
|
364 |
// in the query so as to avoid concurrent mode failures
|
|
365 |
// due to starting the collection just a wee bit too
|
|
366 |
// late.
|
|
367 |
double work = cms_duration() + gc0_period();
|
|
368 |
double deadline = time_until_cms_gen_full();
|
|
369 |
if (work > deadline) {
|
|
370 |
if (Verbose && PrintGCDetails) {
|
|
371 |
gclog_or_tty->print(
|
|
372 |
" CMSCollector: collect because of anticipated promotion "
|
|
373 |
"before full %3.7f + %3.7f > %3.7f ", cms_duration(),
|
|
374 |
gc0_period(), time_until_cms_gen_full());
|
|
375 |
}
|
|
376 |
return 0.0;
|
|
377 |
}
|
|
378 |
return work - deadline;
|
|
379 |
}
|
|
380 |
|
|
381 |
// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
|
|
382 |
// amount of change to prevent wild oscillation.
|
|
383 |
unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
|
|
384 |
unsigned int new_duty_cycle) {
|
|
385 |
assert(old_duty_cycle <= 100, "bad input value");
|
|
386 |
assert(new_duty_cycle <= 100, "bad input value");
|
|
387 |
|
|
388 |
// Note: use subtraction with caution since it may underflow (values are
|
|
389 |
// unsigned). Addition is safe since we're in the range 0-100.
|
|
390 |
unsigned int damped_duty_cycle = new_duty_cycle;
|
|
391 |
if (new_duty_cycle < old_duty_cycle) {
|
|
392 |
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
|
|
393 |
if (new_duty_cycle + largest_delta < old_duty_cycle) {
|
|
394 |
damped_duty_cycle = old_duty_cycle - largest_delta;
|
|
395 |
}
|
|
396 |
} else if (new_duty_cycle > old_duty_cycle) {
|
|
397 |
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
|
|
398 |
if (new_duty_cycle > old_duty_cycle + largest_delta) {
|
|
399 |
damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
|
|
400 |
}
|
|
401 |
}
|
|
402 |
assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
|
|
403 |
|
|
404 |
if (CMSTraceIncrementalPacing) {
|
|
405 |
gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
|
|
406 |
old_duty_cycle, new_duty_cycle, damped_duty_cycle);
|
|
407 |
}
|
|
408 |
return damped_duty_cycle;
|
|
409 |
}
|
|
410 |
|
|
411 |
unsigned int CMSStats::icms_update_duty_cycle_impl() {
|
|
412 |
assert(CMSIncrementalPacing && valid(),
|
|
413 |
"should be handled in icms_update_duty_cycle()");
|
|
414 |
|
|
415 |
double cms_time_so_far = cms_timer().seconds();
|
|
416 |
double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
|
|
417 |
double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
|
|
418 |
|
|
419 |
// Avoid division by 0.
|
|
420 |
double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
|
|
421 |
double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
|
|
422 |
|
|
423 |
unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
|
|
424 |
if (new_duty_cycle > _icms_duty_cycle) {
|
|
425 |
// Avoid very small duty cycles (1 or 2); 0 is allowed.
|
|
426 |
if (new_duty_cycle > 2) {
|
|
427 |
_icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
|
|
428 |
new_duty_cycle);
|
|
429 |
}
|
|
430 |
} else if (_allow_duty_cycle_reduction) {
|
|
431 |
// The duty cycle is reduced only once per cms cycle (see record_cms_end()).
|
|
432 |
new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
|
|
433 |
// Respect the minimum duty cycle.
|
|
434 |
unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
|
|
435 |
_icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
|
|
436 |
}
|
|
437 |
|
|
438 |
if (PrintGCDetails || CMSTraceIncrementalPacing) {
|
|
439 |
gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
|
|
440 |
}
|
|
441 |
|
|
442 |
_allow_duty_cycle_reduction = false;
|
|
443 |
return _icms_duty_cycle;
|
|
444 |
}
|
|
445 |
|
|
446 |
#ifndef PRODUCT
|
|
447 |
void CMSStats::print_on(outputStream *st) const {
|
|
448 |
st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
|
|
449 |
st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
|
|
450 |
gc0_duration(), gc0_period(), gc0_promoted());
|
|
451 |
st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
|
|
452 |
cms_duration(), cms_duration_per_mb(),
|
|
453 |
cms_period(), cms_allocated());
|
|
454 |
st->print(",cms_since_beg=%g,cms_since_end=%g",
|
|
455 |
cms_time_since_begin(), cms_time_since_end());
|
|
456 |
st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
|
|
457 |
_cms_used_at_gc0_begin, _cms_used_at_gc0_end);
|
|
458 |
if (CMSIncrementalMode) {
|
|
459 |
st->print(",dc=%d", icms_duty_cycle());
|
|
460 |
}
|
|
461 |
|
|
462 |
if (valid()) {
|
|
463 |
st->print(",promo_rate=%g,cms_alloc_rate=%g",
|
|
464 |
promotion_rate(), cms_allocation_rate());
|
|
465 |
st->print(",cms_consumption_rate=%g,time_until_full=%g",
|
|
466 |
cms_consumption_rate(), time_until_cms_gen_full());
|
|
467 |
}
|
|
468 |
st->print(" ");
|
|
469 |
}
|
|
470 |
#endif // #ifndef PRODUCT
|
|
471 |
|
|
472 |
CMSCollector::CollectorState CMSCollector::_collectorState =
|
|
473 |
CMSCollector::Idling;
|
|
474 |
bool CMSCollector::_foregroundGCIsActive = false;
|
|
475 |
bool CMSCollector::_foregroundGCShouldWait = false;
|
|
476 |
|
|
477 |
CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
|
478 |
ConcurrentMarkSweepGeneration* permGen,
|
|
479 |
CardTableRS* ct,
|
|
480 |
ConcurrentMarkSweepPolicy* cp):
|
|
481 |
_cmsGen(cmsGen),
|
|
482 |
_permGen(permGen),
|
|
483 |
_ct(ct),
|
|
484 |
_ref_processor(NULL), // will be set later
|
|
485 |
_conc_workers(NULL), // may be set later
|
|
486 |
_abort_preclean(false),
|
|
487 |
_start_sampling(false),
|
|
488 |
_between_prologue_and_epilogue(false),
|
|
489 |
_markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
|
|
490 |
_perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
|
|
491 |
_modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
|
|
492 |
-1 /* lock-free */, "No_lock" /* dummy */),
|
|
493 |
_modUnionClosure(&_modUnionTable),
|
|
494 |
_modUnionClosurePar(&_modUnionTable),
|
|
495 |
_is_alive_closure(&_markBitMap),
|
|
496 |
_restart_addr(NULL),
|
|
497 |
_overflow_list(NULL),
|
|
498 |
_preserved_oop_stack(NULL),
|
|
499 |
_preserved_mark_stack(NULL),
|
|
500 |
_stats(cmsGen),
|
|
501 |
_eden_chunk_array(NULL), // may be set in ctor body
|
|
502 |
_eden_chunk_capacity(0), // -- ditto --
|
|
503 |
_eden_chunk_index(0), // -- ditto --
|
|
504 |
_survivor_plab_array(NULL), // -- ditto --
|
|
505 |
_survivor_chunk_array(NULL), // -- ditto --
|
|
506 |
_survivor_chunk_capacity(0), // -- ditto --
|
|
507 |
_survivor_chunk_index(0), // -- ditto --
|
|
508 |
_ser_pmc_preclean_ovflw(0),
|
|
509 |
_ser_pmc_remark_ovflw(0),
|
|
510 |
_par_pmc_remark_ovflw(0),
|
|
511 |
_ser_kac_ovflw(0),
|
|
512 |
_par_kac_ovflw(0),
|
|
513 |
#ifndef PRODUCT
|
|
514 |
_num_par_pushes(0),
|
|
515 |
#endif
|
|
516 |
_collection_count_start(0),
|
|
517 |
_verifying(false),
|
|
518 |
_icms_start_limit(NULL),
|
|
519 |
_icms_stop_limit(NULL),
|
|
520 |
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
|
|
521 |
_completed_initialization(false),
|
|
522 |
_collector_policy(cp),
|
|
523 |
_unload_classes(false),
|
|
524 |
_unloaded_classes_last_cycle(false),
|
|
525 |
_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
|
|
526 |
{
|
|
527 |
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
|
|
528 |
ExplicitGCInvokesConcurrent = true;
|
|
529 |
}
|
|
530 |
// Now expand the span and allocate the collection support structures
|
|
531 |
// (MUT, marking bit map etc.) to cover both generations subject to
|
|
532 |
// collection.
|
|
533 |
|
|
534 |
// First check that _permGen is adjacent to _cmsGen and above it.
|
|
535 |
assert( _cmsGen->reserved().word_size() > 0
|
|
536 |
&& _permGen->reserved().word_size() > 0,
|
|
537 |
"generations should not be of zero size");
|
|
538 |
assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
|
|
539 |
"_cmsGen and _permGen should not overlap");
|
|
540 |
assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
|
|
541 |
"_cmsGen->end() different from _permGen->start()");
|
|
542 |
|
|
543 |
// For use by dirty card to oop closures.
|
|
544 |
_cmsGen->cmsSpace()->set_collector(this);
|
|
545 |
_permGen->cmsSpace()->set_collector(this);
|
|
546 |
|
|
547 |
// Adjust my span to cover old (cms) gen and perm gen
|
|
548 |
_span = _cmsGen->reserved()._union(_permGen->reserved());
|
|
549 |
// Initialize the span of is_alive_closure
|
|
550 |
_is_alive_closure.set_span(_span);
|
|
551 |
|
|
552 |
// Allocate MUT and marking bit map
|
|
553 |
{
|
|
554 |
MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
|
|
555 |
if (!_markBitMap.allocate(_span)) {
|
|
556 |
warning("Failed to allocate CMS Bit Map");
|
|
557 |
return;
|
|
558 |
}
|
|
559 |
assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
|
|
560 |
}
|
|
561 |
{
|
|
562 |
_modUnionTable.allocate(_span);
|
|
563 |
assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
|
|
564 |
}
|
|
565 |
|
|
566 |
if (!_markStack.allocate(CMSMarkStackSize)) {
|
|
567 |
warning("Failed to allocate CMS Marking Stack");
|
|
568 |
return;
|
|
569 |
}
|
|
570 |
if (!_revisitStack.allocate(CMSRevisitStackSize)) {
|
|
571 |
warning("Failed to allocate CMS Revisit Stack");
|
|
572 |
return;
|
|
573 |
}
|
|
574 |
|
|
575 |
// Support for multi-threaded concurrent phases
|
|
576 |
if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
|
|
577 |
if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
|
|
578 |
// just for now
|
|
579 |
FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
|
|
580 |
}
|
|
581 |
if (ParallelCMSThreads > 1) {
|
|
582 |
_conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
|
|
583 |
ParallelCMSThreads, true);
|
|
584 |
if (_conc_workers == NULL) {
|
|
585 |
warning("GC/CMS: _conc_workers allocation failure: "
|
|
586 |
"forcing -CMSConcurrentMTEnabled");
|
|
587 |
CMSConcurrentMTEnabled = false;
|
|
588 |
}
|
|
589 |
} else {
|
|
590 |
CMSConcurrentMTEnabled = false;
|
|
591 |
}
|
|
592 |
}
|
|
593 |
if (!CMSConcurrentMTEnabled) {
|
|
594 |
ParallelCMSThreads = 0;
|
|
595 |
} else {
|
|
596 |
// Turn off CMSCleanOnEnter optimization temporarily for
|
|
597 |
// the MT case where it's not fixed yet; see 6178663.
|
|
598 |
CMSCleanOnEnter = false;
|
|
599 |
}
|
|
600 |
assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
|
|
601 |
"Inconsistency");
|
|
602 |
|
|
603 |
// Parallel task queues; these are shared for the
|
|
604 |
// concurrent and stop-world phases of CMS, but
|
|
605 |
// are not shared with parallel scavenge (ParNew).
|
|
606 |
{
|
|
607 |
uint i;
|
|
608 |
uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
|
|
609 |
|
|
610 |
if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
|
|
611 |
|| ParallelRefProcEnabled)
|
|
612 |
&& num_queues > 0) {
|
|
613 |
_task_queues = new OopTaskQueueSet(num_queues);
|
|
614 |
if (_task_queues == NULL) {
|
|
615 |
warning("task_queues allocation failure.");
|
|
616 |
return;
|
|
617 |
}
|
|
618 |
_hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
|
|
619 |
if (_hash_seed == NULL) {
|
|
620 |
warning("_hash_seed array allocation failure");
|
|
621 |
return;
|
|
622 |
}
|
|
623 |
|
|
624 |
// XXX use a global constant instead of 64!
|
|
625 |
typedef struct OopTaskQueuePadded {
|
|
626 |
OopTaskQueue work_queue;
|
|
627 |
char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
|
|
628 |
} OopTaskQueuePadded;
|
|
629 |
|
|
630 |
for (i = 0; i < num_queues; i++) {
|
|
631 |
OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
|
|
632 |
if (q_padded == NULL) {
|
|
633 |
warning("work_queue allocation failure.");
|
|
634 |
return;
|
|
635 |
}
|
|
636 |
_task_queues->register_queue(i, &q_padded->work_queue);
|
|
637 |
}
|
|
638 |
for (i = 0; i < num_queues; i++) {
|
|
639 |
_task_queues->queue(i)->initialize();
|
|
640 |
_hash_seed[i] = 17; // copied from ParNew
|
|
641 |
}
|
|
642 |
}
|
|
643 |
}
|
|
644 |
|
|
645 |
// "initiatingOccupancy" is the occupancy ratio at which we trigger
|
|
646 |
// a new collection cycle. Unless explicitly specified via
|
|
647 |
// CMSTriggerRatio, it is calculated by:
|
|
648 |
// Let "f" be MinHeapFreeRatio in
|
|
649 |
//
|
|
650 |
// intiatingOccupancy = 100-f +
|
|
651 |
// f * (CMSTriggerRatio/100)
|
|
652 |
// That is, if we assume the heap is at its desired maximum occupancy at the
|
|
653 |
// end of a collection, we let CMSTriggerRatio of the (purported) free
|
|
654 |
// space be allocated before initiating a new collection cycle.
|
|
655 |
if (CMSInitiatingOccupancyFraction > 0) {
|
|
656 |
_initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
|
|
657 |
} else {
|
|
658 |
_initiatingOccupancy = ((100 - MinHeapFreeRatio) +
|
|
659 |
(double)(CMSTriggerRatio *
|
|
660 |
MinHeapFreeRatio) / 100.0)
|
|
661 |
/ 100.0;
|
|
662 |
}
|
|
663 |
// Clip CMSBootstrapOccupancy between 0 and 100.
|
|
664 |
_bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
|
|
665 |
/(double)100;
|
|
666 |
|
|
667 |
_full_gcs_since_conc_gc = 0;
|
|
668 |
|
|
669 |
// Now tell CMS generations the identity of their collector
|
|
670 |
ConcurrentMarkSweepGeneration::set_collector(this);
|
|
671 |
|
|
672 |
// Create & start a CMS thread for this CMS collector
|
|
673 |
_cmsThread = ConcurrentMarkSweepThread::start(this);
|
|
674 |
assert(cmsThread() != NULL, "CMS Thread should have been created");
|
|
675 |
assert(cmsThread()->collector() == this,
|
|
676 |
"CMS Thread should refer to this gen");
|
|
677 |
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
|
678 |
|
|
679 |
// Support for parallelizing young gen rescan
|
|
680 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
681 |
_young_gen = gch->prev_gen(_cmsGen);
|
|
682 |
if (gch->supports_inline_contig_alloc()) {
|
|
683 |
_top_addr = gch->top_addr();
|
|
684 |
_end_addr = gch->end_addr();
|
|
685 |
assert(_young_gen != NULL, "no _young_gen");
|
|
686 |
_eden_chunk_index = 0;
|
|
687 |
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
|
|
688 |
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
|
|
689 |
if (_eden_chunk_array == NULL) {
|
|
690 |
_eden_chunk_capacity = 0;
|
|
691 |
warning("GC/CMS: _eden_chunk_array allocation failure");
|
|
692 |
}
|
|
693 |
}
|
|
694 |
assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
|
|
695 |
|
|
696 |
// Support for parallelizing survivor space rescan
|
|
697 |
if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
|
|
698 |
size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
|
|
699 |
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
|
|
700 |
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
|
|
701 |
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
|
|
702 |
if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
|
|
703 |
|| _cursor == NULL) {
|
|
704 |
warning("Failed to allocate survivor plab/chunk array");
|
|
705 |
if (_survivor_plab_array != NULL) {
|
|
706 |
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
|
|
707 |
_survivor_plab_array = NULL;
|
|
708 |
}
|
|
709 |
if (_survivor_chunk_array != NULL) {
|
|
710 |
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
|
|
711 |
_survivor_chunk_array = NULL;
|
|
712 |
}
|
|
713 |
if (_cursor != NULL) {
|
|
714 |
FREE_C_HEAP_ARRAY(size_t, _cursor);
|
|
715 |
_cursor = NULL;
|
|
716 |
}
|
|
717 |
} else {
|
|
718 |
_survivor_chunk_capacity = 2*max_plab_samples;
|
|
719 |
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
720 |
HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
|
|
721 |
if (vec == NULL) {
|
|
722 |
warning("Failed to allocate survivor plab array");
|
|
723 |
for (int j = i; j > 0; j--) {
|
|
724 |
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
|
|
725 |
}
|
|
726 |
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
|
|
727 |
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
|
|
728 |
_survivor_plab_array = NULL;
|
|
729 |
_survivor_chunk_array = NULL;
|
|
730 |
_survivor_chunk_capacity = 0;
|
|
731 |
break;
|
|
732 |
} else {
|
|
733 |
ChunkArray* cur =
|
|
734 |
::new (&_survivor_plab_array[i]) ChunkArray(vec,
|
|
735 |
max_plab_samples);
|
|
736 |
assert(cur->end() == 0, "Should be 0");
|
|
737 |
assert(cur->array() == vec, "Should be vec");
|
|
738 |
assert(cur->capacity() == max_plab_samples, "Error");
|
|
739 |
}
|
|
740 |
}
|
|
741 |
}
|
|
742 |
}
|
|
743 |
assert( ( _survivor_plab_array != NULL
|
|
744 |
&& _survivor_chunk_array != NULL)
|
|
745 |
|| ( _survivor_chunk_capacity == 0
|
|
746 |
&& _survivor_chunk_index == 0),
|
|
747 |
"Error");
|
|
748 |
|
|
749 |
// Choose what strong roots should be scanned depending on verification options
|
|
750 |
// and perm gen collection mode.
|
|
751 |
if (!CMSClassUnloadingEnabled) {
|
|
752 |
// If class unloading is disabled we want to include all classes into the root set.
|
|
753 |
add_root_scanning_option(SharedHeap::SO_AllClasses);
|
|
754 |
} else {
|
|
755 |
add_root_scanning_option(SharedHeap::SO_SystemClasses);
|
|
756 |
}
|
|
757 |
|
|
758 |
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
|
|
759 |
_gc_counters = new CollectorCounters("CMS", 1);
|
|
760 |
_completed_initialization = true;
|
|
761 |
_sweep_timer.start(); // start of time
|
|
762 |
}
|
|
763 |
|
|
764 |
const char* ConcurrentMarkSweepGeneration::name() const {
|
|
765 |
return "concurrent mark-sweep generation";
|
|
766 |
}
|
|
767 |
void ConcurrentMarkSweepGeneration::update_counters() {
|
|
768 |
if (UsePerfData) {
|
|
769 |
_space_counters->update_all();
|
|
770 |
_gen_counters->update_all();
|
|
771 |
}
|
|
772 |
}
|
|
773 |
|
|
774 |
// this is an optimized version of update_counters(). it takes the
|
|
775 |
// used value as a parameter rather than computing it.
|
|
776 |
//
|
|
777 |
void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
|
|
778 |
if (UsePerfData) {
|
|
779 |
_space_counters->update_used(used);
|
|
780 |
_space_counters->update_capacity();
|
|
781 |
_gen_counters->update_all();
|
|
782 |
}
|
|
783 |
}
|
|
784 |
|
|
785 |
void ConcurrentMarkSweepGeneration::print() const {
|
|
786 |
Generation::print();
|
|
787 |
cmsSpace()->print();
|
|
788 |
}
|
|
789 |
|
|
790 |
#ifndef PRODUCT
|
|
791 |
void ConcurrentMarkSweepGeneration::print_statistics() {
|
|
792 |
cmsSpace()->printFLCensus(0);
|
|
793 |
}
|
|
794 |
#endif
|
|
795 |
|
|
796 |
void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
|
|
797 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
798 |
if (PrintGCDetails) {
|
|
799 |
if (Verbose) {
|
|
800 |
gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
|
|
801 |
level(), short_name(), s, used(), capacity());
|
|
802 |
} else {
|
|
803 |
gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
|
|
804 |
level(), short_name(), s, used() / K, capacity() / K);
|
|
805 |
}
|
|
806 |
}
|
|
807 |
if (Verbose) {
|
|
808 |
gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
|
|
809 |
gch->used(), gch->capacity());
|
|
810 |
} else {
|
|
811 |
gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
|
|
812 |
gch->used() / K, gch->capacity() / K);
|
|
813 |
}
|
|
814 |
}
|
|
815 |
|
|
816 |
size_t
|
|
817 |
ConcurrentMarkSweepGeneration::contiguous_available() const {
|
|
818 |
// dld proposes an improvement in precision here. If the committed
|
|
819 |
// part of the space ends in a free block we should add that to
|
|
820 |
// uncommitted size in the calculation below. Will make this
|
|
821 |
// change later, staying with the approximation below for the
|
|
822 |
// time being. -- ysr.
|
|
823 |
return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
|
|
824 |
}
|
|
825 |
|
|
826 |
size_t
|
|
827 |
ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
|
|
828 |
return _cmsSpace->max_alloc_in_words() * HeapWordSize;
|
|
829 |
}
|
|
830 |
|
|
831 |
size_t ConcurrentMarkSweepGeneration::max_available() const {
|
|
832 |
return free() + _virtual_space.uncommitted_size();
|
|
833 |
}
|
|
834 |
|
|
835 |
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
|
|
836 |
size_t max_promotion_in_bytes,
|
|
837 |
bool younger_handles_promotion_failure) const {
|
|
838 |
|
|
839 |
// This is the most conservative test. Full promotion is
|
|
840 |
// guaranteed if this is used. The multiplicative factor is to
|
|
841 |
// account for the worst case "dilatation".
|
|
842 |
double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
|
|
843 |
if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
|
|
844 |
adjusted_max_promo_bytes = (double)max_uintx;
|
|
845 |
}
|
|
846 |
bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
|
|
847 |
|
|
848 |
if (younger_handles_promotion_failure && !result) {
|
|
849 |
// Full promotion is not guaranteed because fragmentation
|
|
850 |
// of the cms generation can prevent the full promotion.
|
|
851 |
result = (max_available() >= (size_t)adjusted_max_promo_bytes);
|
|
852 |
|
|
853 |
if (!result) {
|
|
854 |
// With promotion failure handling the test for the ability
|
|
855 |
// to support the promotion does not have to be guaranteed.
|
|
856 |
// Use an average of the amount promoted.
|
|
857 |
result = max_available() >= (size_t)
|
|
858 |
gc_stats()->avg_promoted()->padded_average();
|
|
859 |
if (PrintGC && Verbose && result) {
|
|
860 |
gclog_or_tty->print_cr(
|
|
861 |
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
862 |
" max_available: " SIZE_FORMAT
|
|
863 |
" avg_promoted: " SIZE_FORMAT,
|
|
864 |
max_available(), (size_t)
|
|
865 |
gc_stats()->avg_promoted()->padded_average());
|
|
866 |
}
|
|
867 |
} else {
|
|
868 |
if (PrintGC && Verbose) {
|
|
869 |
gclog_or_tty->print_cr(
|
|
870 |
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
871 |
" max_available: " SIZE_FORMAT
|
|
872 |
" adj_max_promo_bytes: " SIZE_FORMAT,
|
|
873 |
max_available(), (size_t)adjusted_max_promo_bytes);
|
|
874 |
}
|
|
875 |
}
|
|
876 |
} else {
|
|
877 |
if (PrintGC && Verbose) {
|
|
878 |
gclog_or_tty->print_cr(
|
|
879 |
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
880 |
" contiguous_available: " SIZE_FORMAT
|
|
881 |
" adj_max_promo_bytes: " SIZE_FORMAT,
|
|
882 |
max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
|
|
883 |
}
|
|
884 |
}
|
|
885 |
return result;
|
|
886 |
}
|
|
887 |
|
|
888 |
CompactibleSpace*
|
|
889 |
ConcurrentMarkSweepGeneration::first_compaction_space() const {
|
|
890 |
return _cmsSpace;
|
|
891 |
}
|
|
892 |
|
|
893 |
void ConcurrentMarkSweepGeneration::reset_after_compaction() {
|
|
894 |
// Clear the promotion information. These pointers can be adjusted
|
|
895 |
// along with all the other pointers into the heap but
|
|
896 |
// compaction is expected to be a rare event with
|
|
897 |
// a heap using cms so don't do it without seeing the need.
|
|
898 |
if (ParallelGCThreads > 0) {
|
|
899 |
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
900 |
_par_gc_thread_states[i]->promo.reset();
|
|
901 |
}
|
|
902 |
}
|
|
903 |
}
|
|
904 |
|
|
905 |
void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
|
|
906 |
blk->do_space(_cmsSpace);
|
|
907 |
}
|
|
908 |
|
|
909 |
void ConcurrentMarkSweepGeneration::compute_new_size() {
|
|
910 |
assert_locked_or_safepoint(Heap_lock);
|
|
911 |
|
|
912 |
// If incremental collection failed, we just want to expand
|
|
913 |
// to the limit.
|
|
914 |
if (incremental_collection_failed()) {
|
|
915 |
clear_incremental_collection_failed();
|
|
916 |
grow_to_reserved();
|
|
917 |
return;
|
|
918 |
}
|
|
919 |
|
|
920 |
size_t expand_bytes = 0;
|
|
921 |
double free_percentage = ((double) free()) / capacity();
|
|
922 |
double desired_free_percentage = (double) MinHeapFreeRatio / 100;
|
|
923 |
double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
|
|
924 |
|
|
925 |
// compute expansion delta needed for reaching desired free percentage
|
|
926 |
if (free_percentage < desired_free_percentage) {
|
|
927 |
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
|
|
928 |
assert(desired_capacity >= capacity(), "invalid expansion size");
|
|
929 |
expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
|
|
930 |
}
|
|
931 |
if (expand_bytes > 0) {
|
|
932 |
if (PrintGCDetails && Verbose) {
|
|
933 |
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
|
|
934 |
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
|
|
935 |
gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
|
|
936 |
gclog_or_tty->print_cr(" Desired free fraction %f",
|
|
937 |
desired_free_percentage);
|
|
938 |
gclog_or_tty->print_cr(" Maximum free fraction %f",
|
|
939 |
maximum_free_percentage);
|
|
940 |
gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
|
|
941 |
gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
|
|
942 |
desired_capacity/1000);
|
|
943 |
int prev_level = level() - 1;
|
|
944 |
if (prev_level >= 0) {
|
|
945 |
size_t prev_size = 0;
|
|
946 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
947 |
Generation* prev_gen = gch->_gens[prev_level];
|
|
948 |
prev_size = prev_gen->capacity();
|
|
949 |
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
|
|
950 |
prev_size/1000);
|
|
951 |
}
|
|
952 |
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
|
|
953 |
unsafe_max_alloc_nogc()/1000);
|
|
954 |
gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
|
|
955 |
contiguous_available()/1000);
|
|
956 |
gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
|
|
957 |
expand_bytes);
|
|
958 |
}
|
|
959 |
// safe if expansion fails
|
|
960 |
expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
|
|
961 |
if (PrintGCDetails && Verbose) {
|
|
962 |
gclog_or_tty->print_cr(" Expanded free fraction %f",
|
|
963 |
((double) free()) / capacity());
|
|
964 |
}
|
|
965 |
}
|
|
966 |
}
|
|
967 |
|
|
968 |
Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
|
|
969 |
return cmsSpace()->freelistLock();
|
|
970 |
}
|
|
971 |
|
|
972 |
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
|
|
973 |
bool tlab) {
|
|
974 |
CMSSynchronousYieldRequest yr;
|
|
975 |
MutexLockerEx x(freelistLock(),
|
|
976 |
Mutex::_no_safepoint_check_flag);
|
|
977 |
return have_lock_and_allocate(size, tlab);
|
|
978 |
}
|
|
979 |
|
|
980 |
HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
|
|
981 |
bool tlab) {
|
|
982 |
assert_lock_strong(freelistLock());
|
|
983 |
size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
|
|
984 |
HeapWord* res = cmsSpace()->allocate(adjustedSize);
|
|
985 |
// Allocate the object live (grey) if the background collector has
|
|
986 |
// started marking. This is necessary because the marker may
|
|
987 |
// have passed this address and consequently this object will
|
|
988 |
// not otherwise be greyed and would be incorrectly swept up.
|
|
989 |
// Note that if this object contains references, the writing
|
|
990 |
// of those references will dirty the card containing this object
|
|
991 |
// allowing the object to be blackened (and its references scanned)
|
|
992 |
// either during a preclean phase or at the final checkpoint.
|
|
993 |
if (res != NULL) {
|
|
994 |
collector()->direct_allocated(res, adjustedSize);
|
|
995 |
_direct_allocated_words += adjustedSize;
|
|
996 |
// allocation counters
|
|
997 |
NOT_PRODUCT(
|
|
998 |
_numObjectsAllocated++;
|
|
999 |
_numWordsAllocated += (int)adjustedSize;
|
|
1000 |
)
|
|
1001 |
}
|
|
1002 |
return res;
|
|
1003 |
}
|
|
1004 |
|
|
1005 |
// In the case of direct allocation by mutators in a generation that
|
|
1006 |
// is being concurrently collected, the object must be allocated
|
|
1007 |
// live (grey) if the background collector has started marking.
|
|
1008 |
// This is necessary because the marker may
|
|
1009 |
// have passed this address and consequently this object will
|
|
1010 |
// not otherwise be greyed and would be incorrectly swept up.
|
|
1011 |
// Note that if this object contains references, the writing
|
|
1012 |
// of those references will dirty the card containing this object
|
|
1013 |
// allowing the object to be blackened (and its references scanned)
|
|
1014 |
// either during a preclean phase or at the final checkpoint.
|
|
1015 |
void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
|
|
1016 |
assert(_markBitMap.covers(start, size), "Out of bounds");
|
|
1017 |
if (_collectorState >= Marking) {
|
|
1018 |
MutexLockerEx y(_markBitMap.lock(),
|
|
1019 |
Mutex::_no_safepoint_check_flag);
|
|
1020 |
// [see comments preceding SweepClosure::do_blk() below for details]
|
|
1021 |
// 1. need to mark the object as live so it isn't collected
|
|
1022 |
// 2. need to mark the 2nd bit to indicate the object may be uninitialized
|
|
1023 |
// 3. need to mark the end of the object so sweeper can skip over it
|
|
1024 |
// if it's uninitialized when the sweeper reaches it.
|
|
1025 |
_markBitMap.mark(start); // object is live
|
|
1026 |
_markBitMap.mark(start + 1); // object is potentially uninitialized?
|
|
1027 |
_markBitMap.mark(start + size - 1);
|
|
1028 |
// mark end of object
|
|
1029 |
}
|
|
1030 |
// check that oop looks uninitialized
|
|
1031 |
assert(oop(start)->klass() == NULL, "_klass should be NULL");
|
|
1032 |
}
|
|
1033 |
|
|
1034 |
void CMSCollector::promoted(bool par, HeapWord* start,
|
|
1035 |
bool is_obj_array, size_t obj_size) {
|
|
1036 |
assert(_markBitMap.covers(start), "Out of bounds");
|
|
1037 |
// See comment in direct_allocated() about when objects should
|
|
1038 |
// be allocated live.
|
|
1039 |
if (_collectorState >= Marking) {
|
|
1040 |
// we already hold the marking bit map lock, taken in
|
|
1041 |
// the prologue
|
|
1042 |
if (par) {
|
|
1043 |
_markBitMap.par_mark(start);
|
|
1044 |
} else {
|
|
1045 |
_markBitMap.mark(start);
|
|
1046 |
}
|
|
1047 |
// We don't need to mark the object as uninitialized (as
|
|
1048 |
// in direct_allocated above) because this is being done with the
|
|
1049 |
// world stopped and the object will be initialized by the
|
|
1050 |
// time the sweeper gets to look at it.
|
|
1051 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
1052 |
"expect promotion only at safepoints");
|
|
1053 |
|
|
1054 |
if (_collectorState < Sweeping) {
|
|
1055 |
// Mark the appropriate cards in the modUnionTable, so that
|
|
1056 |
// this object gets scanned before the sweep. If this is
|
|
1057 |
// not done, CMS generation references in the object might
|
|
1058 |
// not get marked.
|
|
1059 |
// For the case of arrays, which are otherwise precisely
|
|
1060 |
// marked, we need to dirty the entire array, not just its head.
|
|
1061 |
if (is_obj_array) {
|
|
1062 |
// The [par_]mark_range() method expects mr.end() below to
|
|
1063 |
// be aligned to the granularity of a bit's representation
|
|
1064 |
// in the heap. In the case of the MUT below, that's a
|
|
1065 |
// card size.
|
|
1066 |
MemRegion mr(start,
|
|
1067 |
(HeapWord*)round_to((intptr_t)(start + obj_size),
|
|
1068 |
CardTableModRefBS::card_size /* bytes */));
|
|
1069 |
if (par) {
|
|
1070 |
_modUnionTable.par_mark_range(mr);
|
|
1071 |
} else {
|
|
1072 |
_modUnionTable.mark_range(mr);
|
|
1073 |
}
|
|
1074 |
} else { // not an obj array; we can just mark the head
|
|
1075 |
if (par) {
|
|
1076 |
_modUnionTable.par_mark(start);
|
|
1077 |
} else {
|
|
1078 |
_modUnionTable.mark(start);
|
|
1079 |
}
|
|
1080 |
}
|
|
1081 |
}
|
|
1082 |
}
|
|
1083 |
}
|
|
1084 |
|
|
1085 |
static inline size_t percent_of_space(Space* space, HeapWord* addr)
|
|
1086 |
{
|
|
1087 |
size_t delta = pointer_delta(addr, space->bottom());
|
|
1088 |
return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
|
|
1089 |
}
|
|
1090 |
|
|
1091 |
void CMSCollector::icms_update_allocation_limits()
|
|
1092 |
{
|
|
1093 |
Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
|
|
1094 |
EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
|
|
1095 |
|
|
1096 |
const unsigned int duty_cycle = stats().icms_update_duty_cycle();
|
|
1097 |
if (CMSTraceIncrementalPacing) {
|
|
1098 |
stats().print();
|
|
1099 |
}
|
|
1100 |
|
|
1101 |
assert(duty_cycle <= 100, "invalid duty cycle");
|
|
1102 |
if (duty_cycle != 0) {
|
|
1103 |
// The duty_cycle is a percentage between 0 and 100; convert to words and
|
|
1104 |
// then compute the offset from the endpoints of the space.
|
|
1105 |
size_t free_words = eden->free() / HeapWordSize;
|
|
1106 |
double free_words_dbl = (double)free_words;
|
|
1107 |
size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
|
|
1108 |
size_t offset_words = (free_words - duty_cycle_words) / 2;
|
|
1109 |
|
|
1110 |
_icms_start_limit = eden->top() + offset_words;
|
|
1111 |
_icms_stop_limit = eden->end() - offset_words;
|
|
1112 |
|
|
1113 |
// The limits may be adjusted (shifted to the right) by
|
|
1114 |
// CMSIncrementalOffset, to allow the application more mutator time after a
|
|
1115 |
// young gen gc (when all mutators were stopped) and before CMS starts and
|
|
1116 |
// takes away one or more cpus.
|
|
1117 |
if (CMSIncrementalOffset != 0) {
|
|
1118 |
double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
|
|
1119 |
size_t adjustment = (size_t)adjustment_dbl;
|
|
1120 |
HeapWord* tmp_stop = _icms_stop_limit + adjustment;
|
|
1121 |
if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
|
|
1122 |
_icms_start_limit += adjustment;
|
|
1123 |
_icms_stop_limit = tmp_stop;
|
|
1124 |
}
|
|
1125 |
}
|
|
1126 |
}
|
|
1127 |
if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
|
|
1128 |
_icms_start_limit = _icms_stop_limit = eden->end();
|
|
1129 |
}
|
|
1130 |
|
|
1131 |
// Install the new start limit.
|
|
1132 |
eden->set_soft_end(_icms_start_limit);
|
|
1133 |
|
|
1134 |
if (CMSTraceIncrementalMode) {
|
|
1135 |
gclog_or_tty->print(" icms alloc limits: "
|
|
1136 |
PTR_FORMAT "," PTR_FORMAT
|
|
1137 |
" (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
|
|
1138 |
_icms_start_limit, _icms_stop_limit,
|
|
1139 |
percent_of_space(eden, _icms_start_limit),
|
|
1140 |
percent_of_space(eden, _icms_stop_limit));
|
|
1141 |
if (Verbose) {
|
|
1142 |
gclog_or_tty->print("eden: ");
|
|
1143 |
eden->print_on(gclog_or_tty);
|
|
1144 |
}
|
|
1145 |
}
|
|
1146 |
}
|
|
1147 |
|
|
1148 |
// Any changes here should try to maintain the invariant
|
|
1149 |
// that if this method is called with _icms_start_limit
|
|
1150 |
// and _icms_stop_limit both NULL, then it should return NULL
|
|
1151 |
// and not notify the icms thread.
|
|
1152 |
HeapWord*
|
|
1153 |
CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
|
|
1154 |
size_t word_size)
|
|
1155 |
{
|
|
1156 |
// A start_limit equal to end() means the duty cycle is 0, so treat that as a
|
|
1157 |
// nop.
|
|
1158 |
if (CMSIncrementalMode && _icms_start_limit != space->end()) {
|
|
1159 |
if (top <= _icms_start_limit) {
|
|
1160 |
if (CMSTraceIncrementalMode) {
|
|
1161 |
space->print_on(gclog_or_tty);
|
|
1162 |
gclog_or_tty->stamp();
|
|
1163 |
gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
|
|
1164 |
", new limit=" PTR_FORMAT
|
|
1165 |
" (" SIZE_FORMAT "%%)",
|
|
1166 |
top, _icms_stop_limit,
|
|
1167 |
percent_of_space(space, _icms_stop_limit));
|
|
1168 |
}
|
|
1169 |
ConcurrentMarkSweepThread::start_icms();
|
|
1170 |
assert(top < _icms_stop_limit, "Tautology");
|
|
1171 |
if (word_size < pointer_delta(_icms_stop_limit, top)) {
|
|
1172 |
return _icms_stop_limit;
|
|
1173 |
}
|
|
1174 |
|
|
1175 |
// The allocation will cross both the _start and _stop limits, so do the
|
|
1176 |
// stop notification also and return end().
|
|
1177 |
if (CMSTraceIncrementalMode) {
|
|
1178 |
space->print_on(gclog_or_tty);
|
|
1179 |
gclog_or_tty->stamp();
|
|
1180 |
gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
|
|
1181 |
", new limit=" PTR_FORMAT
|
|
1182 |
" (" SIZE_FORMAT "%%)",
|
|
1183 |
top, space->end(),
|
|
1184 |
percent_of_space(space, space->end()));
|
|
1185 |
}
|
|
1186 |
ConcurrentMarkSweepThread::stop_icms();
|
|
1187 |
return space->end();
|
|
1188 |
}
|
|
1189 |
|
|
1190 |
if (top <= _icms_stop_limit) {
|
|
1191 |
if (CMSTraceIncrementalMode) {
|
|
1192 |
space->print_on(gclog_or_tty);
|
|
1193 |
gclog_or_tty->stamp();
|
|
1194 |
gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
|
|
1195 |
", new limit=" PTR_FORMAT
|
|
1196 |
" (" SIZE_FORMAT "%%)",
|
|
1197 |
top, space->end(),
|
|
1198 |
percent_of_space(space, space->end()));
|
|
1199 |
}
|
|
1200 |
ConcurrentMarkSweepThread::stop_icms();
|
|
1201 |
return space->end();
|
|
1202 |
}
|
|
1203 |
|
|
1204 |
if (CMSTraceIncrementalMode) {
|
|
1205 |
space->print_on(gclog_or_tty);
|
|
1206 |
gclog_or_tty->stamp();
|
|
1207 |
gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
|
|
1208 |
", new limit=" PTR_FORMAT,
|
|
1209 |
top, NULL);
|
|
1210 |
}
|
|
1211 |
}
|
|
1212 |
|
|
1213 |
return NULL;
|
|
1214 |
}
|
|
1215 |
|
|
1216 |
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
|
|
1217 |
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
|
1218 |
// allocate, copy and if necessary update promoinfo --
|
|
1219 |
// delegate to underlying space.
|
|
1220 |
assert_lock_strong(freelistLock());
|
|
1221 |
|
|
1222 |
#ifndef PRODUCT
|
|
1223 |
if (Universe::heap()->promotion_should_fail()) {
|
|
1224 |
return NULL;
|
|
1225 |
}
|
|
1226 |
#endif // #ifndef PRODUCT
|
|
1227 |
|
|
1228 |
oop res = _cmsSpace->promote(obj, obj_size, ref);
|
|
1229 |
if (res == NULL) {
|
|
1230 |
// expand and retry
|
|
1231 |
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
|
|
1232 |
expand(s*HeapWordSize, MinHeapDeltaBytes,
|
|
1233 |
CMSExpansionCause::_satisfy_promotion);
|
|
1234 |
// Since there's currently no next generation, we don't try to promote
|
|
1235 |
// into a more senior generation.
|
|
1236 |
assert(next_gen() == NULL, "assumption, based upon which no attempt "
|
|
1237 |
"is made to pass on a possibly failing "
|
|
1238 |
"promotion to next generation");
|
|
1239 |
res = _cmsSpace->promote(obj, obj_size, ref);
|
|
1240 |
}
|
|
1241 |
if (res != NULL) {
|
|
1242 |
// See comment in allocate() about when objects should
|
|
1243 |
// be allocated live.
|
|
1244 |
assert(obj->is_oop(), "Will dereference klass pointer below");
|
|
1245 |
collector()->promoted(false, // Not parallel
|
|
1246 |
(HeapWord*)res, obj->is_objArray(), obj_size);
|
|
1247 |
// promotion counters
|
|
1248 |
NOT_PRODUCT(
|
|
1249 |
_numObjectsPromoted++;
|
|
1250 |
_numWordsPromoted +=
|
|
1251 |
(int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
|
|
1252 |
)
|
|
1253 |
}
|
|
1254 |
return res;
|
|
1255 |
}
|
|
1256 |
|
|
1257 |
|
|
1258 |
HeapWord*
|
|
1259 |
ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
|
|
1260 |
HeapWord* top,
|
|
1261 |
size_t word_sz)
|
|
1262 |
{
|
|
1263 |
return collector()->allocation_limit_reached(space, top, word_sz);
|
|
1264 |
}
|
|
1265 |
|
|
1266 |
// Things to support parallel young-gen collection.
|
|
1267 |
oop
|
|
1268 |
ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
|
1269 |
oop old, markOop m,
|
|
1270 |
size_t word_sz) {
|
|
1271 |
#ifndef PRODUCT
|
|
1272 |
if (Universe::heap()->promotion_should_fail()) {
|
|
1273 |
return NULL;
|
|
1274 |
}
|
|
1275 |
#endif // #ifndef PRODUCT
|
|
1276 |
|
|
1277 |
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
|
|
1278 |
PromotionInfo* promoInfo = &ps->promo;
|
|
1279 |
// if we are tracking promotions, then first ensure space for
|
|
1280 |
// promotion (including spooling space for saving header if necessary).
|
|
1281 |
// then allocate and copy, then track promoted info if needed.
|
|
1282 |
// When tracking (see PromotionInfo::track()), the mark word may
|
|
1283 |
// be displaced and in this case restoration of the mark word
|
|
1284 |
// occurs in the (oop_since_save_marks_)iterate phase.
|
|
1285 |
if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
|
|
1286 |
// Out of space for allocating spooling buffers;
|
|
1287 |
// try expanding and allocating spooling buffers.
|
|
1288 |
if (!expand_and_ensure_spooling_space(promoInfo)) {
|
|
1289 |
return NULL;
|
|
1290 |
}
|
|
1291 |
}
|
|
1292 |
assert(promoInfo->has_spooling_space(), "Control point invariant");
|
|
1293 |
HeapWord* obj_ptr = ps->lab.alloc(word_sz);
|
|
1294 |
if (obj_ptr == NULL) {
|
|
1295 |
obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
|
|
1296 |
if (obj_ptr == NULL) {
|
|
1297 |
return NULL;
|
|
1298 |
}
|
|
1299 |
}
|
|
1300 |
oop obj = oop(obj_ptr);
|
|
1301 |
assert(obj->klass() == NULL, "Object should be uninitialized here.");
|
|
1302 |
// Otherwise, copy the object. Here we must be careful to insert the
|
|
1303 |
// klass pointer last, since this marks the block as an allocated object.
|
|
1304 |
HeapWord* old_ptr = (HeapWord*)old;
|
|
1305 |
if (word_sz > (size_t)oopDesc::header_size()) {
|
|
1306 |
Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
|
|
1307 |
obj_ptr + oopDesc::header_size(),
|
|
1308 |
word_sz - oopDesc::header_size());
|
|
1309 |
}
|
|
1310 |
// Restore the mark word copied above.
|
|
1311 |
obj->set_mark(m);
|
|
1312 |
// Now we can track the promoted object, if necessary. We take care
|
|
1313 |
// To delay the transition from uninitialized to full object
|
|
1314 |
// (i.e., insertion of klass pointer) until after, so that it
|
|
1315 |
// atomically becomes a promoted object.
|
|
1316 |
if (promoInfo->tracking()) {
|
|
1317 |
promoInfo->track((PromotedObject*)obj, old->klass());
|
|
1318 |
}
|
|
1319 |
// Finally, install the klass pointer.
|
|
1320 |
obj->set_klass(old->klass());
|
|
1321 |
|
|
1322 |
assert(old->is_oop(), "Will dereference klass ptr below");
|
|
1323 |
collector()->promoted(true, // parallel
|
|
1324 |
obj_ptr, old->is_objArray(), word_sz);
|
|
1325 |
|
|
1326 |
NOT_PRODUCT(
|
|
1327 |
Atomic::inc(&_numObjectsPromoted);
|
|
1328 |
Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
|
|
1329 |
&_numWordsPromoted);
|
|
1330 |
)
|
|
1331 |
|
|
1332 |
return obj;
|
|
1333 |
}
|
|
1334 |
|
|
1335 |
void
|
|
1336 |
ConcurrentMarkSweepGeneration::
|
|
1337 |
par_promote_alloc_undo(int thread_num,
|
|
1338 |
HeapWord* obj, size_t word_sz) {
|
|
1339 |
// CMS does not support promotion undo.
|
|
1340 |
ShouldNotReachHere();
|
|
1341 |
}
|
|
1342 |
|
|
1343 |
void
|
|
1344 |
ConcurrentMarkSweepGeneration::
|
|
1345 |
par_promote_alloc_done(int thread_num) {
|
|
1346 |
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
|
|
1347 |
ps->lab.retire();
|
|
1348 |
#if CFLS_LAB_REFILL_STATS
|
|
1349 |
if (thread_num == 0) {
|
|
1350 |
_cmsSpace->print_par_alloc_stats();
|
|
1351 |
}
|
|
1352 |
#endif
|
|
1353 |
}
|
|
1354 |
|
|
1355 |
void
|
|
1356 |
ConcurrentMarkSweepGeneration::
|
|
1357 |
par_oop_since_save_marks_iterate_done(int thread_num) {
|
|
1358 |
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
|
|
1359 |
ParScanWithoutBarrierClosure* dummy_cl = NULL;
|
|
1360 |
ps->promo.promoted_oops_iterate_nv(dummy_cl);
|
|
1361 |
}
|
|
1362 |
|
|
1363 |
// XXXPERM
|
|
1364 |
bool ConcurrentMarkSweepGeneration::should_collect(bool full,
|
|
1365 |
size_t size,
|
|
1366 |
bool tlab)
|
|
1367 |
{
|
|
1368 |
// We allow a STW collection only if a full
|
|
1369 |
// collection was requested.
|
|
1370 |
return full || should_allocate(size, tlab); // FIX ME !!!
|
|
1371 |
// This and promotion failure handling are connected at the
|
|
1372 |
// hip and should be fixed by untying them.
|
|
1373 |
}
|
|
1374 |
|
|
1375 |
bool CMSCollector::shouldConcurrentCollect() {
|
|
1376 |
if (_full_gc_requested) {
|
|
1377 |
assert(ExplicitGCInvokesConcurrent, "Unexpected state");
|
|
1378 |
if (Verbose && PrintGCDetails) {
|
|
1379 |
gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
|
|
1380 |
" gc request");
|
|
1381 |
}
|
|
1382 |
return true;
|
|
1383 |
}
|
|
1384 |
|
|
1385 |
// For debugging purposes, change the type of collection.
|
|
1386 |
// If the rotation is not on the concurrent collection
|
|
1387 |
// type, don't start a concurrent collection.
|
|
1388 |
NOT_PRODUCT(
|
|
1389 |
if (RotateCMSCollectionTypes &&
|
|
1390 |
(_cmsGen->debug_collection_type() !=
|
|
1391 |
ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
|
|
1392 |
assert(_cmsGen->debug_collection_type() !=
|
|
1393 |
ConcurrentMarkSweepGeneration::Unknown_collection_type,
|
|
1394 |
"Bad cms collection type");
|
|
1395 |
return false;
|
|
1396 |
}
|
|
1397 |
)
|
|
1398 |
|
|
1399 |
FreelistLocker x(this);
|
|
1400 |
// ------------------------------------------------------------------
|
|
1401 |
// Print out lots of information which affects the initiation of
|
|
1402 |
// a collection.
|
|
1403 |
if (PrintCMSInitiationStatistics && stats().valid()) {
|
|
1404 |
gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
|
|
1405 |
gclog_or_tty->stamp();
|
|
1406 |
gclog_or_tty->print_cr("");
|
|
1407 |
stats().print_on(gclog_or_tty);
|
|
1408 |
gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
|
|
1409 |
stats().time_until_cms_gen_full());
|
|
1410 |
gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
|
|
1411 |
gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
|
|
1412 |
_cmsGen->contiguous_available());
|
|
1413 |
gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
|
|
1414 |
gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
|
|
1415 |
gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
|
|
1416 |
gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
|
|
1417 |
}
|
|
1418 |
// ------------------------------------------------------------------
|
|
1419 |
|
|
1420 |
// If the estimated time to complete a cms collection (cms_duration())
|
|
1421 |
// is less than the estimated time remaining until the cms generation
|
|
1422 |
// is full, start a collection.
|
|
1423 |
if (!UseCMSInitiatingOccupancyOnly) {
|
|
1424 |
if (stats().valid()) {
|
|
1425 |
if (stats().time_until_cms_start() == 0.0) {
|
|
1426 |
return true;
|
|
1427 |
}
|
|
1428 |
} else {
|
|
1429 |
// We want to conservatively collect somewhat early in order
|
|
1430 |
// to try and "bootstrap" our CMS/promotion statistics;
|
|
1431 |
// this branch will not fire after the first successful CMS
|
|
1432 |
// collection because the stats should then be valid.
|
|
1433 |
if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
|
|
1434 |
if (Verbose && PrintGCDetails) {
|
|
1435 |
gclog_or_tty->print_cr(
|
|
1436 |
" CMSCollector: collect for bootstrapping statistics:"
|
|
1437 |
" occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
|
|
1438 |
_bootstrap_occupancy);
|
|
1439 |
}
|
|
1440 |
return true;
|
|
1441 |
}
|
|
1442 |
}
|
|
1443 |
}
|
|
1444 |
|
|
1445 |
// Otherwise, we start a collection cycle if either the perm gen or
|
|
1446 |
// old gen want a collection cycle started. Each may use
|
|
1447 |
// an appropriate criterion for making this decision.
|
|
1448 |
// XXX We need to make sure that the gen expansion
|
|
1449 |
// criterion dovetails well with this.
|
|
1450 |
if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
|
|
1451 |
if (Verbose && PrintGCDetails) {
|
|
1452 |
gclog_or_tty->print_cr("CMS old gen initiated");
|
|
1453 |
}
|
|
1454 |
return true;
|
|
1455 |
}
|
|
1456 |
|
|
1457 |
if (cms_should_unload_classes() &&
|
|
1458 |
_permGen->shouldConcurrentCollect(initiatingOccupancy())) {
|
|
1459 |
if (Verbose && PrintGCDetails) {
|
|
1460 |
gclog_or_tty->print_cr("CMS perm gen initiated");
|
|
1461 |
}
|
|
1462 |
return true;
|
|
1463 |
}
|
|
1464 |
|
|
1465 |
return false;
|
|
1466 |
}
|
|
1467 |
|
|
1468 |
// Clear _expansion_cause fields of constituent generations
|
|
1469 |
void CMSCollector::clear_expansion_cause() {
|
|
1470 |
_cmsGen->clear_expansion_cause();
|
|
1471 |
_permGen->clear_expansion_cause();
|
|
1472 |
}
|
|
1473 |
|
|
1474 |
bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
|
|
1475 |
double initiatingOccupancy) {
|
|
1476 |
// We should be conservative in starting a collection cycle. To
|
|
1477 |
// start too eagerly runs the risk of collecting too often in the
|
|
1478 |
// extreme. To collect too rarely falls back on full collections,
|
|
1479 |
// which works, even if not optimum in terms of concurrent work.
|
|
1480 |
// As a work around for too eagerly collecting, use the flag
|
|
1481 |
// UseCMSInitiatingOccupancyOnly. This also has the advantage of
|
|
1482 |
// giving the user an easily understandable way of controlling the
|
|
1483 |
// collections.
|
|
1484 |
// We want to start a new collection cycle if any of the following
|
|
1485 |
// conditions hold:
|
|
1486 |
// . our current occupancy exceeds the initiating occupancy, or
|
|
1487 |
// . we recently needed to expand and have not since that expansion,
|
|
1488 |
// collected, or
|
|
1489 |
// . we are not using adaptive free lists and linear allocation is
|
|
1490 |
// going to fail, or
|
|
1491 |
// . (for old gen) incremental collection has already failed or
|
|
1492 |
// may soon fail in the near future as we may not be able to absorb
|
|
1493 |
// promotions.
|
|
1494 |
assert_lock_strong(freelistLock());
|
|
1495 |
|
|
1496 |
if (occupancy() > initiatingOccupancy) {
|
|
1497 |
if (PrintGCDetails && Verbose) {
|
|
1498 |
gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
|
|
1499 |
short_name(), occupancy(), initiatingOccupancy);
|
|
1500 |
}
|
|
1501 |
return true;
|
|
1502 |
}
|
|
1503 |
if (UseCMSInitiatingOccupancyOnly) {
|
|
1504 |
return false;
|
|
1505 |
}
|
|
1506 |
if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
|
|
1507 |
if (PrintGCDetails && Verbose) {
|
|
1508 |
gclog_or_tty->print(" %s: collect because expanded for allocation ",
|
|
1509 |
short_name());
|
|
1510 |
}
|
|
1511 |
return true;
|
|
1512 |
}
|
|
1513 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
1514 |
assert(gch->collector_policy()->is_two_generation_policy(),
|
|
1515 |
"You may want to check the correctness of the following");
|
|
1516 |
if (gch->incremental_collection_will_fail()) {
|
|
1517 |
if (PrintGCDetails && Verbose) {
|
|
1518 |
gclog_or_tty->print(" %s: collect because incremental collection will fail ",
|
|
1519 |
short_name());
|
|
1520 |
}
|
|
1521 |
return true;
|
|
1522 |
}
|
|
1523 |
if (!_cmsSpace->adaptive_freelists() &&
|
|
1524 |
_cmsSpace->linearAllocationWouldFail()) {
|
|
1525 |
if (PrintGCDetails && Verbose) {
|
|
1526 |
gclog_or_tty->print(" %s: collect because of linAB ",
|
|
1527 |
short_name());
|
|
1528 |
}
|
|
1529 |
return true;
|
|
1530 |
}
|
|
1531 |
return false;
|
|
1532 |
}
|
|
1533 |
|
|
1534 |
void ConcurrentMarkSweepGeneration::collect(bool full,
|
|
1535 |
bool clear_all_soft_refs,
|
|
1536 |
size_t size,
|
|
1537 |
bool tlab)
|
|
1538 |
{
|
|
1539 |
collector()->collect(full, clear_all_soft_refs, size, tlab);
|
|
1540 |
}
|
|
1541 |
|
|
1542 |
void CMSCollector::collect(bool full,
|
|
1543 |
bool clear_all_soft_refs,
|
|
1544 |
size_t size,
|
|
1545 |
bool tlab)
|
|
1546 |
{
|
|
1547 |
if (!UseCMSCollectionPassing && _collectorState > Idling) {
|
|
1548 |
// For debugging purposes skip the collection if the state
|
|
1549 |
// is not currently idle
|
|
1550 |
if (TraceCMSState) {
|
|
1551 |
gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
|
|
1552 |
Thread::current(), full, _collectorState);
|
|
1553 |
}
|
|
1554 |
return;
|
|
1555 |
}
|
|
1556 |
|
|
1557 |
// The following "if" branch is present for defensive reasons.
|
|
1558 |
// In the current uses of this interface, it can be replaced with:
|
|
1559 |
// assert(!GC_locker.is_active(), "Can't be called otherwise");
|
|
1560 |
// But I am not placing that assert here to allow future
|
|
1561 |
// generality in invoking this interface.
|
|
1562 |
if (GC_locker::is_active()) {
|
|
1563 |
// A consistency test for GC_locker
|
|
1564 |
assert(GC_locker::needs_gc(), "Should have been set already");
|
|
1565 |
// Skip this foreground collection, instead
|
|
1566 |
// expanding the heap if necessary.
|
|
1567 |
// Need the free list locks for the call to free() in compute_new_size()
|
|
1568 |
compute_new_size();
|
|
1569 |
return;
|
|
1570 |
}
|
|
1571 |
acquire_control_and_collect(full, clear_all_soft_refs);
|
|
1572 |
_full_gcs_since_conc_gc++;
|
|
1573 |
|
|
1574 |
}
|
|
1575 |
|
|
1576 |
void CMSCollector::request_full_gc(unsigned int full_gc_count) {
|
|
1577 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
1578 |
unsigned int gc_count = gch->total_full_collections();
|
|
1579 |
if (gc_count == full_gc_count) {
|
|
1580 |
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
|
|
1581 |
_full_gc_requested = true;
|
|
1582 |
CGC_lock->notify(); // nudge CMS thread
|
|
1583 |
}
|
|
1584 |
}
|
|
1585 |
|
|
1586 |
|
|
1587 |
// The foreground and background collectors need to coordinate in order
|
|
1588 |
// to make sure that they do not mutually interfere with CMS collections.
|
|
1589 |
// When a background collection is active,
|
|
1590 |
// the foreground collector may need to take over (preempt) and
|
|
1591 |
// synchronously complete an ongoing collection. Depending on the
|
|
1592 |
// frequency of the background collections and the heap usage
|
|
1593 |
// of the application, this preemption can be seldom or frequent.
|
|
1594 |
// There are only certain
|
|
1595 |
// points in the background collection that the "collection-baton"
|
|
1596 |
// can be passed to the foreground collector.
|
|
1597 |
//
|
|
1598 |
// The foreground collector will wait for the baton before
|
|
1599 |
// starting any part of the collection. The foreground collector
|
|
1600 |
// will only wait at one location.
|
|
1601 |
//
|
|
1602 |
// The background collector will yield the baton before starting a new
|
|
1603 |
// phase of the collection (e.g., before initial marking, marking from roots,
|
|
1604 |
// precleaning, final re-mark, sweep etc.) This is normally done at the head
|
|
1605 |
// of the loop which switches the phases. The background collector does some
|
|
1606 |
// of the phases (initial mark, final re-mark) with the world stopped.
|
|
1607 |
// Because of locking involved in stopping the world,
|
|
1608 |
// the foreground collector should not block waiting for the background
|
|
1609 |
// collector when it is doing a stop-the-world phase. The background
|
|
1610 |
// collector will yield the baton at an additional point just before
|
|
1611 |
// it enters a stop-the-world phase. Once the world is stopped, the
|
|
1612 |
// background collector checks the phase of the collection. If the
|
|
1613 |
// phase has not changed, it proceeds with the collection. If the
|
|
1614 |
// phase has changed, it skips that phase of the collection. See
|
|
1615 |
// the comments on the use of the Heap_lock in collect_in_background().
|
|
1616 |
//
|
|
1617 |
// Variable used in baton passing.
|
|
1618 |
// _foregroundGCIsActive - Set to true by the foreground collector when
|
|
1619 |
// it wants the baton. The foreground clears it when it has finished
|
|
1620 |
// the collection.
|
|
1621 |
// _foregroundGCShouldWait - Set to true by the background collector
|
|
1622 |
// when it is running. The foreground collector waits while
|
|
1623 |
// _foregroundGCShouldWait is true.
|
|
1624 |
// CGC_lock - monitor used to protect access to the above variables
|
|
1625 |
// and to notify the foreground and background collectors.
|
|
1626 |
// _collectorState - current state of the CMS collection.
|
|
1627 |
//
|
|
1628 |
// The foreground collector
|
|
1629 |
// acquires the CGC_lock
|
|
1630 |
// sets _foregroundGCIsActive
|
|
1631 |
// waits on the CGC_lock for _foregroundGCShouldWait to be false
|
|
1632 |
// various locks acquired in preparation for the collection
|
|
1633 |
// are released so as not to block the background collector
|
|
1634 |
// that is in the midst of a collection
|
|
1635 |
// proceeds with the collection
|
|
1636 |
// clears _foregroundGCIsActive
|
|
1637 |
// returns
|
|
1638 |
//
|
|
1639 |
// The background collector in a loop iterating on the phases of the
|
|
1640 |
// collection
|
|
1641 |
// acquires the CGC_lock
|
|
1642 |
// sets _foregroundGCShouldWait
|
|
1643 |
// if _foregroundGCIsActive is set
|
|
1644 |
// clears _foregroundGCShouldWait, notifies _CGC_lock
|
|
1645 |
// waits on _CGC_lock for _foregroundGCIsActive to become false
|
|
1646 |
// and exits the loop.
|
|
1647 |
// otherwise
|
|
1648 |
// proceed with that phase of the collection
|
|
1649 |
// if the phase is a stop-the-world phase,
|
|
1650 |
// yield the baton once more just before enqueueing
|
|
1651 |
// the stop-world CMS operation (executed by the VM thread).
|
|
1652 |
// returns after all phases of the collection are done
|
|
1653 |
//
|
|
1654 |
|
|
1655 |
void CMSCollector::acquire_control_and_collect(bool full,
|
|
1656 |
bool clear_all_soft_refs) {
|
|
1657 |
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
1658 |
assert(!Thread::current()->is_ConcurrentGC_thread(),
|
|
1659 |
"shouldn't try to acquire control from self!");
|
|
1660 |
|
|
1661 |
// Start the protocol for acquiring control of the
|
|
1662 |
// collection from the background collector (aka CMS thread).
|
|
1663 |
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
|
|
1664 |
"VM thread should have CMS token");
|
|
1665 |
// Remember the possibly interrupted state of an ongoing
|
|
1666 |
// concurrent collection
|
|
1667 |
CollectorState first_state = _collectorState;
|
|
1668 |
|
|
1669 |
// Signal to a possibly ongoing concurrent collection that
|
|
1670 |
// we want to do a foreground collection.
|
|
1671 |
_foregroundGCIsActive = true;
|
|
1672 |
|
|
1673 |
// Disable incremental mode during a foreground collection.
|
|
1674 |
ICMSDisabler icms_disabler;
|
|
1675 |
|
|
1676 |
// release locks and wait for a notify from the background collector
|
|
1677 |
// releasing the locks in only necessary for phases which
|
|
1678 |
// do yields to improve the granularity of the collection.
|
|
1679 |
assert_lock_strong(bitMapLock());
|
|
1680 |
// We need to lock the Free list lock for the space that we are
|
|
1681 |
// currently collecting.
|
|
1682 |
assert(haveFreelistLocks(), "Must be holding free list locks");
|
|
1683 |
bitMapLock()->unlock();
|
|
1684 |
releaseFreelistLocks();
|
|
1685 |
{
|
|
1686 |
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
|
1687 |
if (_foregroundGCShouldWait) {
|
|
1688 |
// We are going to be waiting for action for the CMS thread;
|
|
1689 |
// it had better not be gone (for instance at shutdown)!
|
|
1690 |
assert(ConcurrentMarkSweepThread::cmst() != NULL,
|
|
1691 |
"CMS thread must be running");
|
|
1692 |
// Wait here until the background collector gives us the go-ahead
|
|
1693 |
ConcurrentMarkSweepThread::clear_CMS_flag(
|
|
1694 |
ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
|
|
1695 |
// Get a possibly blocked CMS thread going:
|
|
1696 |
// Note that we set _foregroundGCIsActive true above,
|
|
1697 |
// without protection of the CGC_lock.
|
|
1698 |
CGC_lock->notify();
|
|
1699 |
assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
|
|
1700 |
"Possible deadlock");
|
|
1701 |
while (_foregroundGCShouldWait) {
|
|
1702 |
// wait for notification
|
|
1703 |
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
|
|
1704 |
// Possibility of delay/starvation here, since CMS token does
|
|
1705 |
// not know to give priority to VM thread? Actually, i think
|
|
1706 |
// there wouldn't be any delay/starvation, but the proof of
|
|
1707 |
// that "fact" (?) appears non-trivial. XXX 20011219YSR
|
|
1708 |
}
|
|
1709 |
ConcurrentMarkSweepThread::set_CMS_flag(
|
|
1710 |
ConcurrentMarkSweepThread::CMS_vm_has_token);
|
|
1711 |
}
|
|
1712 |
}
|
|
1713 |
// The CMS_token is already held. Get back the other locks.
|
|
1714 |
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
|
|
1715 |
"VM thread should have CMS token");
|
|
1716 |
getFreelistLocks();
|
|
1717 |
bitMapLock()->lock_without_safepoint_check();
|
|
1718 |
if (TraceCMSState) {
|
|
1719 |
gclog_or_tty->print_cr("CMS foreground collector has asked for control "
|
|
1720 |
INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
|
|
1721 |
gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
|
|
1722 |
}
|
|
1723 |
|
|
1724 |
// Check if we need to do a compaction, or if not, whether
|
|
1725 |
// we need to start the mark-sweep from scratch.
|
|
1726 |
bool should_compact = false;
|
|
1727 |
bool should_start_over = false;
|
|
1728 |
decide_foreground_collection_type(clear_all_soft_refs,
|
|
1729 |
&should_compact, &should_start_over);
|
|
1730 |
|
|
1731 |
NOT_PRODUCT(
|
|
1732 |
if (RotateCMSCollectionTypes) {
|
|
1733 |
if (_cmsGen->debug_collection_type() ==
|
|
1734 |
ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
|
|
1735 |
should_compact = true;
|
|
1736 |
} else if (_cmsGen->debug_collection_type() ==
|
|
1737 |
ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
|
|
1738 |
should_compact = false;
|
|
1739 |
}
|
|
1740 |
}
|
|
1741 |
)
|
|
1742 |
|
|
1743 |
if (PrintGCDetails && first_state > Idling) {
|
|
1744 |
GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
|
|
1745 |
if (GCCause::is_user_requested_gc(cause) ||
|
|
1746 |
GCCause::is_serviceability_requested_gc(cause)) {
|
|
1747 |
gclog_or_tty->print(" (concurrent mode interrupted)");
|
|
1748 |
} else {
|
|
1749 |
gclog_or_tty->print(" (concurrent mode failure)");
|
|
1750 |
}
|
|
1751 |
}
|
|
1752 |
|
|
1753 |
if (should_compact) {
|
|
1754 |
// If the collection is being acquired from the background
|
|
1755 |
// collector, there may be references on the discovered
|
|
1756 |
// references lists that have NULL referents (being those
|
|
1757 |
// that were concurrently cleared by a mutator) or
|
|
1758 |
// that are no longer active (having been enqueued concurrently
|
|
1759 |
// by the mutator).
|
|
1760 |
// Scrub the list of those references because Mark-Sweep-Compact
|
|
1761 |
// code assumes referents are not NULL and that all discovered
|
|
1762 |
// Reference objects are active.
|
|
1763 |
ref_processor()->clean_up_discovered_references();
|
|
1764 |
|
|
1765 |
do_compaction_work(clear_all_soft_refs);
|
|
1766 |
|
|
1767 |
// Has the GC time limit been exceeded?
|
|
1768 |
check_gc_time_limit();
|
|
1769 |
|
|
1770 |
} else {
|
|
1771 |
do_mark_sweep_work(clear_all_soft_refs, first_state,
|
|
1772 |
should_start_over);
|
|
1773 |
}
|
|
1774 |
// Reset the expansion cause, now that we just completed
|
|
1775 |
// a collection cycle.
|
|
1776 |
clear_expansion_cause();
|
|
1777 |
_foregroundGCIsActive = false;
|
|
1778 |
return;
|
|
1779 |
}
|
|
1780 |
|
|
1781 |
void CMSCollector::check_gc_time_limit() {
|
|
1782 |
|
|
1783 |
// Ignore explicit GC's. Exiting here does not set the flag and
|
|
1784 |
// does not reset the count. Updating of the averages for system
|
|
1785 |
// GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
|
|
1786 |
GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
|
|
1787 |
if (GCCause::is_user_requested_gc(gc_cause) ||
|
|
1788 |
GCCause::is_serviceability_requested_gc(gc_cause)) {
|
|
1789 |
return;
|
|
1790 |
}
|
|
1791 |
|
|
1792 |
// Calculate the fraction of the CMS generation was freed during
|
|
1793 |
// the last collection.
|
|
1794 |
// Only consider the STW compacting cost for now.
|
|
1795 |
//
|
|
1796 |
// Note that the gc time limit test only works for the collections
|
|
1797 |
// of the young gen + tenured gen and not for collections of the
|
|
1798 |
// permanent gen. That is because the calculation of the space
|
|
1799 |
// freed by the collection is the free space in the young gen +
|
|
1800 |
// tenured gen.
|
|
1801 |
|
|
1802 |
double fraction_free =
|
|
1803 |
((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
|
|
1804 |
if ((100.0 * size_policy()->compacting_gc_cost()) >
|
|
1805 |
((double) GCTimeLimit) &&
|
|
1806 |
((fraction_free * 100) < GCHeapFreeLimit)) {
|
|
1807 |
size_policy()->inc_gc_time_limit_count();
|
|
1808 |
if (UseGCOverheadLimit &&
|
|
1809 |
(size_policy()->gc_time_limit_count() >
|
|
1810 |
AdaptiveSizePolicyGCTimeLimitThreshold)) {
|
|
1811 |
size_policy()->set_gc_time_limit_exceeded(true);
|
|
1812 |
// Avoid consecutive OOM due to the gc time limit by resetting
|
|
1813 |
// the counter.
|
|
1814 |
size_policy()->reset_gc_time_limit_count();
|
|
1815 |
if (PrintGCDetails) {
|
|
1816 |
gclog_or_tty->print_cr(" GC is exceeding overhead limit "
|
|
1817 |
"of %d%%", GCTimeLimit);
|
|
1818 |
}
|
|
1819 |
} else {
|
|
1820 |
if (PrintGCDetails) {
|
|
1821 |
gclog_or_tty->print_cr(" GC would exceed overhead limit "
|
|
1822 |
"of %d%%", GCTimeLimit);
|
|
1823 |
}
|
|
1824 |
}
|
|
1825 |
} else {
|
|
1826 |
size_policy()->reset_gc_time_limit_count();
|
|
1827 |
}
|
|
1828 |
}
|
|
1829 |
|
|
1830 |
// Resize the perm generation and the tenured generation
|
|
1831 |
// after obtaining the free list locks for the
|
|
1832 |
// two generations.
|
|
1833 |
void CMSCollector::compute_new_size() {
|
|
1834 |
assert_locked_or_safepoint(Heap_lock);
|
|
1835 |
FreelistLocker z(this);
|
|
1836 |
_permGen->compute_new_size();
|
|
1837 |
_cmsGen->compute_new_size();
|
|
1838 |
}
|
|
1839 |
|
|
1840 |
// A work method used by foreground collection to determine
|
|
1841 |
// what type of collection (compacting or not, continuing or fresh)
|
|
1842 |
// it should do.
|
|
1843 |
// NOTE: the intent is to make UseCMSCompactAtFullCollection
|
|
1844 |
// and CMSCompactWhenClearAllSoftRefs the default in the future
|
|
1845 |
// and do away with the flags after a suitable period.
|
|
1846 |
void CMSCollector::decide_foreground_collection_type(
|
|
1847 |
bool clear_all_soft_refs, bool* should_compact,
|
|
1848 |
bool* should_start_over) {
|
|
1849 |
// Normally, we'll compact only if the UseCMSCompactAtFullCollection
|
|
1850 |
// flag is set, and we have either requested a System.gc() or
|
|
1851 |
// the number of full gc's since the last concurrent cycle
|
|
1852 |
// has exceeded the threshold set by CMSFullGCsBeforeCompaction,
|
|
1853 |
// or if an incremental collection has failed
|
|
1854 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
1855 |
assert(gch->collector_policy()->is_two_generation_policy(),
|
|
1856 |
"You may want to check the correctness of the following");
|
|
1857 |
// Inform cms gen if this was due to partial collection failing.
|
|
1858 |
// The CMS gen may use this fact to determine its expansion policy.
|
|
1859 |
if (gch->incremental_collection_will_fail()) {
|
|
1860 |
assert(!_cmsGen->incremental_collection_failed(),
|
|
1861 |
"Should have been noticed, reacted to and cleared");
|
|
1862 |
_cmsGen->set_incremental_collection_failed();
|
|
1863 |
}
|
|
1864 |
*should_compact =
|
|
1865 |
UseCMSCompactAtFullCollection &&
|
|
1866 |
((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
|
|
1867 |
GCCause::is_user_requested_gc(gch->gc_cause()) ||
|
|
1868 |
gch->incremental_collection_will_fail());
|
|
1869 |
*should_start_over = false;
|
|
1870 |
if (clear_all_soft_refs && !*should_compact) {
|
|
1871 |
// We are about to do a last ditch collection attempt
|
|
1872 |
// so it would normally make sense to do a compaction
|
|
1873 |
// to reclaim as much space as possible.
|
|
1874 |
if (CMSCompactWhenClearAllSoftRefs) {
|
|
1875 |
// Default: The rationale is that in this case either
|
|
1876 |
// we are past the final marking phase, in which case
|
|
1877 |
// we'd have to start over, or so little has been done
|
|
1878 |
// that there's little point in saving that work. Compaction
|
|
1879 |
// appears to be the sensible choice in either case.
|
|
1880 |
*should_compact = true;
|
|
1881 |
} else {
|
|
1882 |
// We have been asked to clear all soft refs, but not to
|
|
1883 |
// compact. Make sure that we aren't past the final checkpoint
|
|
1884 |
// phase, for that is where we process soft refs. If we are already
|
|
1885 |
// past that phase, we'll need to redo the refs discovery phase and
|
|
1886 |
// if necessary clear soft refs that weren't previously
|
|
1887 |
// cleared. We do so by remembering the phase in which
|
|
1888 |
// we came in, and if we are past the refs processing
|
|
1889 |
// phase, we'll choose to just redo the mark-sweep
|
|
1890 |
// collection from scratch.
|
|
1891 |
if (_collectorState > FinalMarking) {
|
|
1892 |
// We are past the refs processing phase;
|
|
1893 |
// start over and do a fresh synchronous CMS cycle
|
|
1894 |
_collectorState = Resetting; // skip to reset to start new cycle
|
|
1895 |
reset(false /* == !asynch */);
|
|
1896 |
*should_start_over = true;
|
|
1897 |
} // else we can continue a possibly ongoing current cycle
|
|
1898 |
}
|
|
1899 |
}
|
|
1900 |
}
|
|
1901 |
|
|
1902 |
// A work method used by the foreground collector to do
|
|
1903 |
// a mark-sweep-compact.
|
|
1904 |
void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
|
1905 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
1906 |
TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
|
|
1907 |
if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
|
|
1908 |
gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
|
|
1909 |
"collections passed to foreground collector", _full_gcs_since_conc_gc);
|
|
1910 |
}
|
|
1911 |
|
|
1912 |
// Sample collection interval time and reset for collection pause.
|
|
1913 |
if (UseAdaptiveSizePolicy) {
|
|
1914 |
size_policy()->msc_collection_begin();
|
|
1915 |
}
|
|
1916 |
|
|
1917 |
// Temporarily widen the span of the weak reference processing to
|
|
1918 |
// the entire heap.
|
|
1919 |
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
|
|
1920 |
ReferenceProcessorSpanMutator x(ref_processor(), new_span);
|
|
1921 |
|
|
1922 |
// Temporarily, clear the "is_alive_non_header" field of the
|
|
1923 |
// reference processor.
|
|
1924 |
ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
|
|
1925 |
|
|
1926 |
// Temporarily make reference _processing_ single threaded (non-MT).
|
|
1927 |
ReferenceProcessorMTProcMutator z(ref_processor(), false);
|
|
1928 |
|
|
1929 |
// Temporarily make refs discovery atomic
|
|
1930 |
ReferenceProcessorAtomicMutator w(ref_processor(), true);
|
|
1931 |
|
|
1932 |
ref_processor()->set_enqueuing_is_done(false);
|
|
1933 |
ref_processor()->enable_discovery();
|
|
1934 |
// If an asynchronous collection finishes, the _modUnionTable is
|
|
1935 |
// all clear. If we are assuming the collection from an asynchronous
|
|
1936 |
// collection, clear the _modUnionTable.
|
|
1937 |
assert(_collectorState != Idling || _modUnionTable.isAllClear(),
|
|
1938 |
"_modUnionTable should be clear if the baton was not passed");
|
|
1939 |
_modUnionTable.clear_all();
|
|
1940 |
|
|
1941 |
// We must adjust the allocation statistics being maintained
|
|
1942 |
// in the free list space. We do so by reading and clearing
|
|
1943 |
// the sweep timer and updating the block flux rate estimates below.
|
|
1944 |
assert(_sweep_timer.is_active(), "We should never see the timer inactive");
|
|
1945 |
_sweep_timer.stop();
|
|
1946 |
// Note that we do not use this sample to update the _sweep_estimate.
|
|
1947 |
_cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
|
|
1948 |
_sweep_estimate.padded_average());
|
|
1949 |
|
|
1950 |
GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
|
|
1951 |
ref_processor(), clear_all_soft_refs);
|
|
1952 |
#ifdef ASSERT
|
|
1953 |
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
|
1954 |
size_t free_size = cms_space->free();
|
|
1955 |
assert(free_size ==
|
|
1956 |
pointer_delta(cms_space->end(), cms_space->compaction_top())
|
|
1957 |
* HeapWordSize,
|
|
1958 |
"All the free space should be compacted into one chunk at top");
|
|
1959 |
assert(cms_space->dictionary()->totalChunkSize(
|
|
1960 |
debug_only(cms_space->freelistLock())) == 0 ||
|
|
1961 |
cms_space->totalSizeInIndexedFreeLists() == 0,
|
|
1962 |
"All the free space should be in a single chunk");
|
|
1963 |
size_t num = cms_space->totalCount();
|
|
1964 |
assert((free_size == 0 && num == 0) ||
|
|
1965 |
(free_size > 0 && (num == 1 || num == 2)),
|
|
1966 |
"There should be at most 2 free chunks after compaction");
|
|
1967 |
#endif // ASSERT
|
|
1968 |
_collectorState = Resetting;
|
|
1969 |
assert(_restart_addr == NULL,
|
|
1970 |
"Should have been NULL'd before baton was passed");
|
|
1971 |
reset(false /* == !asynch */);
|
|
1972 |
_cmsGen->reset_after_compaction();
|
|
1973 |
|
|
1974 |
if (verifying() && !cms_should_unload_classes()) {
|
|
1975 |
perm_gen_verify_bit_map()->clear_all();
|
|
1976 |
}
|
|
1977 |
|
|
1978 |
// Clear any data recorded in the PLAB chunk arrays.
|
|
1979 |
if (_survivor_plab_array != NULL) {
|
|
1980 |
reset_survivor_plab_arrays();
|
|
1981 |
}
|
|
1982 |
|
|
1983 |
// Adjust the per-size allocation stats for the next epoch.
|
|
1984 |
_cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
|
|
1985 |
// Restart the "sweep timer" for next epoch.
|
|
1986 |
_sweep_timer.reset();
|
|
1987 |
_sweep_timer.start();
|
|
1988 |
|
|
1989 |
// Sample collection pause time and reset for collection interval.
|
|
1990 |
if (UseAdaptiveSizePolicy) {
|
|
1991 |
size_policy()->msc_collection_end(gch->gc_cause());
|
|
1992 |
}
|
|
1993 |
|
|
1994 |
// For a mark-sweep-compact, compute_new_size() will be called
|
|
1995 |
// in the heap's do_collection() method.
|
|
1996 |
}
|
|
1997 |
|
|
1998 |
// A work method used by the foreground collector to do
|
|
1999 |
// a mark-sweep, after taking over from a possibly on-going
|
|
2000 |
// concurrent mark-sweep collection.
|
|
2001 |
void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
|
|
2002 |
CollectorState first_state, bool should_start_over) {
|
|
2003 |
if (PrintGC && Verbose) {
|
|
2004 |
gclog_or_tty->print_cr("Pass concurrent collection to foreground "
|
|
2005 |
"collector with count %d",
|
|
2006 |
_full_gcs_since_conc_gc);
|
|
2007 |
}
|
|
2008 |
switch (_collectorState) {
|
|
2009 |
case Idling:
|
|
2010 |
if (first_state == Idling || should_start_over) {
|
|
2011 |
// The background GC was not active, or should
|
|
2012 |
// restarted from scratch; start the cycle.
|
|
2013 |
_collectorState = InitialMarking;
|
|
2014 |
}
|
|
2015 |
// If first_state was not Idling, then a background GC
|
|
2016 |
// was in progress and has now finished. No need to do it
|
|
2017 |
// again. Leave the state as Idling.
|
|
2018 |
break;
|
|
2019 |
case Precleaning:
|
|
2020 |
// In the foreground case don't do the precleaning since
|
|
2021 |
// it is not done concurrently and there is extra work
|
|
2022 |
// required.
|
|
2023 |
_collectorState = FinalMarking;
|
|
2024 |
}
|
|
2025 |
if (PrintGCDetails &&
|
|
2026 |
(_collectorState > Idling ||
|
|
2027 |
!GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
|
|
2028 |
gclog_or_tty->print(" (concurrent mode failure)");
|
|
2029 |
}
|
|
2030 |
collect_in_foreground(clear_all_soft_refs);
|
|
2031 |
|
|
2032 |
// For a mark-sweep, compute_new_size() will be called
|
|
2033 |
// in the heap's do_collection() method.
|
|
2034 |
}
|
|
2035 |
|
|
2036 |
|
|
2037 |
void CMSCollector::getFreelistLocks() const {
|
|
2038 |
// Get locks for all free lists in all generations that this
|
|
2039 |
// collector is responsible for
|
|
2040 |
_cmsGen->freelistLock()->lock_without_safepoint_check();
|
|
2041 |
_permGen->freelistLock()->lock_without_safepoint_check();
|
|
2042 |
}
|
|
2043 |
|
|
2044 |
void CMSCollector::releaseFreelistLocks() const {
|
|
2045 |
// Release locks for all free lists in all generations that this
|
|
2046 |
// collector is responsible for
|
|
2047 |
_cmsGen->freelistLock()->unlock();
|
|
2048 |
_permGen->freelistLock()->unlock();
|
|
2049 |
}
|
|
2050 |
|
|
2051 |
bool CMSCollector::haveFreelistLocks() const {
|
|
2052 |
// Check locks for all free lists in all generations that this
|
|
2053 |
// collector is responsible for
|
|
2054 |
assert_lock_strong(_cmsGen->freelistLock());
|
|
2055 |
assert_lock_strong(_permGen->freelistLock());
|
|
2056 |
PRODUCT_ONLY(ShouldNotReachHere());
|
|
2057 |
return true;
|
|
2058 |
}
|
|
2059 |
|
|
2060 |
// A utility class that is used by the CMS collector to
|
|
2061 |
// temporarily "release" the foreground collector from its
|
|
2062 |
// usual obligation to wait for the background collector to
|
|
2063 |
// complete an ongoing phase before proceeding.
|
|
2064 |
class ReleaseForegroundGC: public StackObj {
|
|
2065 |
private:
|
|
2066 |
CMSCollector* _c;
|
|
2067 |
public:
|
|
2068 |
ReleaseForegroundGC(CMSCollector* c) : _c(c) {
|
|
2069 |
assert(_c->_foregroundGCShouldWait, "Else should not need to call");
|
|
2070 |
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
|
2071 |
// allow a potentially blocked foreground collector to proceed
|
|
2072 |
_c->_foregroundGCShouldWait = false;
|
|
2073 |
if (_c->_foregroundGCIsActive) {
|
|
2074 |
CGC_lock->notify();
|
|
2075 |
}
|
|
2076 |
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
2077 |
"Possible deadlock");
|
|
2078 |
}
|
|
2079 |
|
|
2080 |
~ReleaseForegroundGC() {
|
|
2081 |
assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
|
|
2082 |
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
|
2083 |
_c->_foregroundGCShouldWait = true;
|
|
2084 |
}
|
|
2085 |
};
|
|
2086 |
|
|
2087 |
// There are separate collect_in_background and collect_in_foreground because of
|
|
2088 |
// the different locking requirements of the background collector and the
|
|
2089 |
// foreground collector. There was originally an attempt to share
|
|
2090 |
// one "collect" method between the background collector and the foreground
|
|
2091 |
// collector but the if-then-else required made it cleaner to have
|
|
2092 |
// separate methods.
|
|
2093 |
void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
|
|
2094 |
assert(Thread::current()->is_ConcurrentGC_thread(),
|
|
2095 |
"A CMS asynchronous collection is only allowed on a CMS thread.");
|
|
2096 |
|
|
2097 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
2098 |
{
|
|
2099 |
bool safepoint_check = Mutex::_no_safepoint_check_flag;
|
|
2100 |
MutexLockerEx hl(Heap_lock, safepoint_check);
|
|
2101 |
MutexLockerEx x(CGC_lock, safepoint_check);
|
|
2102 |
if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
|
|
2103 |
// The foreground collector is active or we're
|
|
2104 |
// not using asynchronous collections. Skip this
|
|
2105 |
// background collection.
|
|
2106 |
assert(!_foregroundGCShouldWait, "Should be clear");
|
|
2107 |
return;
|
|
2108 |
} else {
|
|
2109 |
assert(_collectorState == Idling, "Should be idling before start.");
|
|
2110 |
_collectorState = InitialMarking;
|
|
2111 |
// Reset the expansion cause, now that we are about to begin
|
|
2112 |
// a new cycle.
|
|
2113 |
clear_expansion_cause();
|
|
2114 |
}
|
|
2115 |
_unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
|
|
2116 |
// This controls class unloading in response to an explicit gc request.
|
|
2117 |
// If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
|
|
2118 |
// we will unload classes even if CMSClassUnloadingEnabled is not set.
|
|
2119 |
// See CR 6541037 and related CRs.
|
|
2120 |
_unload_classes = _full_gc_requested // ... for this cycle
|
|
2121 |
&& ExplicitGCInvokesConcurrentAndUnloadsClasses;
|
|
2122 |
_full_gc_requested = false; // acks all outstanding full gc requests
|
|
2123 |
// Signal that we are about to start a collection
|
|
2124 |
gch->increment_total_full_collections(); // ... starting a collection cycle
|
|
2125 |
_collection_count_start = gch->total_full_collections();
|
|
2126 |
}
|
|
2127 |
|
|
2128 |
// Used for PrintGC
|
|
2129 |
size_t prev_used;
|
|
2130 |
if (PrintGC && Verbose) {
|
|
2131 |
prev_used = _cmsGen->used(); // XXXPERM
|
|
2132 |
}
|
|
2133 |
|
|
2134 |
// The change of the collection state is normally done at this level;
|
|
2135 |
// the exceptions are phases that are executed while the world is
|
|
2136 |
// stopped. For those phases the change of state is done while the
|
|
2137 |
// world is stopped. For baton passing purposes this allows the
|
|
2138 |
// background collector to finish the phase and change state atomically.
|
|
2139 |
// The foreground collector cannot wait on a phase that is done
|
|
2140 |
// while the world is stopped because the foreground collector already
|
|
2141 |
// has the world stopped and would deadlock.
|
|
2142 |
while (_collectorState != Idling) {
|
|
2143 |
if (TraceCMSState) {
|
|
2144 |
gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
|
|
2145 |
Thread::current(), _collectorState);
|
|
2146 |
}
|
|
2147 |
// The foreground collector
|
|
2148 |
// holds the Heap_lock throughout its collection.
|
|
2149 |
// holds the CMS token (but not the lock)
|
|
2150 |
// except while it is waiting for the background collector to yield.
|
|
2151 |
//
|
|
2152 |
// The foreground collector should be blocked (not for long)
|
|
2153 |
// if the background collector is about to start a phase
|
|
2154 |
// executed with world stopped. If the background
|
|
2155 |
// collector has already started such a phase, the
|
|
2156 |
// foreground collector is blocked waiting for the
|
|
2157 |
// Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
|
|
2158 |
// are executed in the VM thread.
|
|
2159 |
//
|
|
2160 |
// The locking order is
|
|
2161 |
// PendingListLock (PLL) -- if applicable (FinalMarking)
|
|
2162 |
// Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
|
|
2163 |
// CMS token (claimed in
|
|
2164 |
// stop_world_and_do() -->
|
|
2165 |
// safepoint_synchronize() -->
|
|
2166 |
// CMSThread::synchronize())
|
|
2167 |
|
|
2168 |
{
|
|
2169 |
// Check if the FG collector wants us to yield.
|
|
2170 |
CMSTokenSync x(true); // is cms thread
|
|
2171 |
if (waitForForegroundGC()) {
|
|
2172 |
// We yielded to a foreground GC, nothing more to be
|
|
2173 |
// done this round.
|
|
2174 |
assert(_foregroundGCShouldWait == false, "We set it to false in "
|
|
2175 |
"waitForForegroundGC()");
|
|
2176 |
if (TraceCMSState) {
|
|
2177 |
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
|
|
2178 |
" exiting collection CMS state %d",
|
|
2179 |
Thread::current(), _collectorState);
|
|
2180 |
}
|
|
2181 |
return;
|
|
2182 |
} else {
|
|
2183 |
// The background collector can run but check to see if the
|
|
2184 |
// foreground collector has done a collection while the
|
|
2185 |
// background collector was waiting to get the CGC_lock
|
|
2186 |
// above. If yes, break so that _foregroundGCShouldWait
|
|
2187 |
// is cleared before returning.
|
|
2188 |
if (_collectorState == Idling) {
|
|
2189 |
break;
|
|
2190 |
}
|
|
2191 |
}
|
|
2192 |
}
|
|
2193 |
|
|
2194 |
assert(_foregroundGCShouldWait, "Foreground collector, if active, "
|
|
2195 |
"should be waiting");
|
|
2196 |
|
|
2197 |
switch (_collectorState) {
|
|
2198 |
case InitialMarking:
|
|
2199 |
{
|
|
2200 |
ReleaseForegroundGC x(this);
|
|
2201 |
stats().record_cms_begin();
|
|
2202 |
|
|
2203 |
VM_CMS_Initial_Mark initial_mark_op(this);
|
|
2204 |
VMThread::execute(&initial_mark_op);
|
|
2205 |
}
|
|
2206 |
// The collector state may be any legal state at this point
|
|
2207 |
// since the background collector may have yielded to the
|
|
2208 |
// foreground collector.
|
|
2209 |
break;
|
|
2210 |
case Marking:
|
|
2211 |
// initial marking in checkpointRootsInitialWork has been completed
|
|
2212 |
if (markFromRoots(true)) { // we were successful
|
|
2213 |
assert(_collectorState == Precleaning, "Collector state should "
|
|
2214 |
"have changed");
|
|
2215 |
} else {
|
|
2216 |
assert(_foregroundGCIsActive, "Internal state inconsistency");
|
|
2217 |
}
|
|
2218 |
break;
|
|
2219 |
case Precleaning:
|
|
2220 |
if (UseAdaptiveSizePolicy) {
|
|
2221 |
size_policy()->concurrent_precleaning_begin();
|
|
2222 |
}
|
|
2223 |
// marking from roots in markFromRoots has been completed
|
|
2224 |
preclean();
|
|
2225 |
if (UseAdaptiveSizePolicy) {
|
|
2226 |
size_policy()->concurrent_precleaning_end();
|
|
2227 |
}
|
|
2228 |
assert(_collectorState == AbortablePreclean ||
|
|
2229 |
_collectorState == FinalMarking,
|
|
2230 |
"Collector state should have changed");
|
|
2231 |
break;
|
|
2232 |
case AbortablePreclean:
|
|
2233 |
if (UseAdaptiveSizePolicy) {
|
|
2234 |
size_policy()->concurrent_phases_resume();
|
|
2235 |
}
|
|
2236 |
abortable_preclean();
|
|
2237 |
if (UseAdaptiveSizePolicy) {
|
|
2238 |
size_policy()->concurrent_precleaning_end();
|
|
2239 |
}
|
|
2240 |
assert(_collectorState == FinalMarking, "Collector state should "
|
|
2241 |
"have changed");
|
|
2242 |
break;
|
|
2243 |
case FinalMarking:
|
|
2244 |
{
|
|
2245 |
ReleaseForegroundGC x(this);
|
|
2246 |
|
|
2247 |
VM_CMS_Final_Remark final_remark_op(this);
|
|
2248 |
VMThread::execute(&final_remark_op);
|
|
2249 |
}
|
|
2250 |
assert(_foregroundGCShouldWait, "block post-condition");
|
|
2251 |
break;
|
|
2252 |
case Sweeping:
|
|
2253 |
if (UseAdaptiveSizePolicy) {
|
|
2254 |
size_policy()->concurrent_sweeping_begin();
|
|
2255 |
}
|
|
2256 |
// final marking in checkpointRootsFinal has been completed
|
|
2257 |
sweep(true);
|
|
2258 |
assert(_collectorState == Resizing, "Collector state change "
|
|
2259 |
"to Resizing must be done under the free_list_lock");
|
|
2260 |
_full_gcs_since_conc_gc = 0;
|
|
2261 |
|
|
2262 |
// Stop the timers for adaptive size policy for the concurrent phases
|
|
2263 |
if (UseAdaptiveSizePolicy) {
|
|
2264 |
size_policy()->concurrent_sweeping_end();
|
|
2265 |
size_policy()->concurrent_phases_end(gch->gc_cause(),
|
|
2266 |
gch->prev_gen(_cmsGen)->capacity(),
|
|
2267 |
_cmsGen->free());
|
|
2268 |
}
|
|
2269 |
|
|
2270 |
case Resizing: {
|
|
2271 |
// Sweeping has been completed...
|
|
2272 |
// At this point the background collection has completed.
|
|
2273 |
// Don't move the call to compute_new_size() down
|
|
2274 |
// into code that might be executed if the background
|
|
2275 |
// collection was preempted.
|
|
2276 |
{
|
|
2277 |
ReleaseForegroundGC x(this); // unblock FG collection
|
|
2278 |
MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
|
|
2279 |
CMSTokenSync z(true); // not strictly needed.
|
|
2280 |
if (_collectorState == Resizing) {
|
|
2281 |
compute_new_size();
|
|
2282 |
_collectorState = Resetting;
|
|
2283 |
} else {
|
|
2284 |
assert(_collectorState == Idling, "The state should only change"
|
|
2285 |
" because the foreground collector has finished the collection");
|
|
2286 |
}
|
|
2287 |
}
|
|
2288 |
break;
|
|
2289 |
}
|
|
2290 |
case Resetting:
|
|
2291 |
// CMS heap resizing has been completed
|
|
2292 |
reset(true);
|
|
2293 |
assert(_collectorState == Idling, "Collector state should "
|
|
2294 |
"have changed");
|
|
2295 |
stats().record_cms_end();
|
|
2296 |
// Don't move the concurrent_phases_end() and compute_new_size()
|
|
2297 |
// calls to here because a preempted background collection
|
|
2298 |
// has it's state set to "Resetting".
|
|
2299 |
break;
|
|
2300 |
case Idling:
|
|
2301 |
default:
|
|
2302 |
ShouldNotReachHere();
|
|
2303 |
break;
|
|
2304 |
}
|
|
2305 |
if (TraceCMSState) {
|
|
2306 |
gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
|
|
2307 |
Thread::current(), _collectorState);
|
|
2308 |
}
|
|
2309 |
assert(_foregroundGCShouldWait, "block post-condition");
|
|
2310 |
}
|
|
2311 |
|
|
2312 |
// Should this be in gc_epilogue?
|
|
2313 |
collector_policy()->counters()->update_counters();
|
|
2314 |
|
|
2315 |
{
|
|
2316 |
// Clear _foregroundGCShouldWait and, in the event that the
|
|
2317 |
// foreground collector is waiting, notify it, before
|
|
2318 |
// returning.
|
|
2319 |
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
|
2320 |
_foregroundGCShouldWait = false;
|
|
2321 |
if (_foregroundGCIsActive) {
|
|
2322 |
CGC_lock->notify();
|
|
2323 |
}
|
|
2324 |
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
2325 |
"Possible deadlock");
|
|
2326 |
}
|
|
2327 |
if (TraceCMSState) {
|
|
2328 |
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
|
|
2329 |
" exiting collection CMS state %d",
|
|
2330 |
Thread::current(), _collectorState);
|
|
2331 |
}
|
|
2332 |
if (PrintGC && Verbose) {
|
|
2333 |
_cmsGen->print_heap_change(prev_used);
|
|
2334 |
}
|
|
2335 |
}
|
|
2336 |
|
|
2337 |
void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
|
2338 |
assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
|
|
2339 |
"Foreground collector should be waiting, not executing");
|
|
2340 |
assert(Thread::current()->is_VM_thread(), "A foreground collection"
|
|
2341 |
"may only be done by the VM Thread with the world stopped");
|
|
2342 |
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
|
|
2343 |
"VM thread should have CMS token");
|
|
2344 |
|
|
2345 |
NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
|
|
2346 |
true, gclog_or_tty);)
|
|
2347 |
if (UseAdaptiveSizePolicy) {
|
|
2348 |
size_policy()->ms_collection_begin();
|
|
2349 |
}
|
|
2350 |
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
|
2351 |
|
|
2352 |
HandleMark hm; // Discard invalid handles created during verification
|
|
2353 |
|
|
2354 |
if (VerifyBeforeGC &&
|
|
2355 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
2356 |
Universe::verify(true);
|
|
2357 |
}
|
|
2358 |
|
|
2359 |
bool init_mark_was_synchronous = false; // until proven otherwise
|
|
2360 |
while (_collectorState != Idling) {
|
|
2361 |
if (TraceCMSState) {
|
|
2362 |
gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
|
|
2363 |
Thread::current(), _collectorState);
|
|
2364 |
}
|
|
2365 |
switch (_collectorState) {
|
|
2366 |
case InitialMarking:
|
|
2367 |
init_mark_was_synchronous = true; // fact to be exploited in re-mark
|
|
2368 |
checkpointRootsInitial(false);
|
|
2369 |
assert(_collectorState == Marking, "Collector state should have changed"
|
|
2370 |
" within checkpointRootsInitial()");
|
|
2371 |
break;
|
|
2372 |
case Marking:
|
|
2373 |
// initial marking in checkpointRootsInitialWork has been completed
|
|
2374 |
if (VerifyDuringGC &&
|
|
2375 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
2376 |
gclog_or_tty->print("Verify before initial mark: ");
|
|
2377 |
Universe::verify(true);
|
|
2378 |
}
|
|
2379 |
{
|
|
2380 |
bool res = markFromRoots(false);
|
|
2381 |
assert(res && _collectorState == FinalMarking, "Collector state should "
|
|
2382 |
"have changed");
|
|
2383 |
break;
|
|
2384 |
}
|
|
2385 |
case FinalMarking:
|
|
2386 |
if (VerifyDuringGC &&
|
|
2387 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
2388 |
gclog_or_tty->print("Verify before re-mark: ");
|
|
2389 |
Universe::verify(true);
|
|
2390 |
}
|
|
2391 |
checkpointRootsFinal(false, clear_all_soft_refs,
|
|
2392 |
init_mark_was_synchronous);
|
|
2393 |
assert(_collectorState == Sweeping, "Collector state should not "
|
|
2394 |
"have changed within checkpointRootsFinal()");
|
|
2395 |
break;
|
|
2396 |
case Sweeping:
|
|
2397 |
// final marking in checkpointRootsFinal has been completed
|
|
2398 |
if (VerifyDuringGC &&
|
|
2399 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
2400 |
gclog_or_tty->print("Verify before sweep: ");
|
|
2401 |
Universe::verify(true);
|
|
2402 |
}
|
|
2403 |
sweep(false);
|
|
2404 |
assert(_collectorState == Resizing, "Incorrect state");
|
|
2405 |
break;
|
|
2406 |
case Resizing: {
|
|
2407 |
// Sweeping has been completed; the actual resize in this case
|
|
2408 |
// is done separately; nothing to be done in this state.
|
|
2409 |
_collectorState = Resetting;
|
|
2410 |
break;
|
|
2411 |
}
|
|
2412 |
case Resetting:
|
|
2413 |
// The heap has been resized.
|
|
2414 |
if (VerifyDuringGC &&
|
|
2415 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
2416 |
gclog_or_tty->print("Verify before reset: ");
|
|
2417 |
Universe::verify(true);
|
|
2418 |
}
|
|
2419 |
reset(false);
|
|
2420 |
assert(_collectorState == Idling, "Collector state should "
|
|
2421 |
"have changed");
|
|
2422 |
break;
|
|
2423 |
case Precleaning:
|
|
2424 |
case AbortablePreclean:
|
|
2425 |
// Elide the preclean phase
|
|
2426 |
_collectorState = FinalMarking;
|
|
2427 |
break;
|
|
2428 |
default:
|
|
2429 |
ShouldNotReachHere();
|
|
2430 |
}
|
|
2431 |
if (TraceCMSState) {
|
|
2432 |
gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
|
|
2433 |
Thread::current(), _collectorState);
|
|
2434 |
}
|
|
2435 |
}
|
|
2436 |
|
|
2437 |
if (UseAdaptiveSizePolicy) {
|
|
2438 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
2439 |
size_policy()->ms_collection_end(gch->gc_cause());
|
|
2440 |
}
|
|
2441 |
|
|
2442 |
if (VerifyAfterGC &&
|
|
2443 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
2444 |
Universe::verify(true);
|
|
2445 |
}
|
|
2446 |
if (TraceCMSState) {
|
|
2447 |
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
|
|
2448 |
" exiting collection CMS state %d",
|
|
2449 |
Thread::current(), _collectorState);
|
|
2450 |
}
|
|
2451 |
}
|
|
2452 |
|
|
2453 |
bool CMSCollector::waitForForegroundGC() {
|
|
2454 |
bool res = false;
|
|
2455 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
2456 |
"CMS thread should have CMS token");
|
|
2457 |
// Block the foreground collector until the
|
|
2458 |
// background collectors decides whether to
|
|
2459 |
// yield.
|
|
2460 |
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
|
2461 |
_foregroundGCShouldWait = true;
|
|
2462 |
if (_foregroundGCIsActive) {
|
|
2463 |
// The background collector yields to the
|
|
2464 |
// foreground collector and returns a value
|
|
2465 |
// indicating that it has yielded. The foreground
|
|
2466 |
// collector can proceed.
|
|
2467 |
res = true;
|
|
2468 |
_foregroundGCShouldWait = false;
|
|
2469 |
ConcurrentMarkSweepThread::clear_CMS_flag(
|
|
2470 |
ConcurrentMarkSweepThread::CMS_cms_has_token);
|
|
2471 |
ConcurrentMarkSweepThread::set_CMS_flag(
|
|
2472 |
ConcurrentMarkSweepThread::CMS_cms_wants_token);
|
|
2473 |
// Get a possibly blocked foreground thread going
|
|
2474 |
CGC_lock->notify();
|
|
2475 |
if (TraceCMSState) {
|
|
2476 |
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
|
|
2477 |
Thread::current(), _collectorState);
|
|
2478 |
}
|
|
2479 |
while (_foregroundGCIsActive) {
|
|
2480 |
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
|
|
2481 |
}
|
|
2482 |
ConcurrentMarkSweepThread::set_CMS_flag(
|
|
2483 |
ConcurrentMarkSweepThread::CMS_cms_has_token);
|
|
2484 |
ConcurrentMarkSweepThread::clear_CMS_flag(
|
|
2485 |
ConcurrentMarkSweepThread::CMS_cms_wants_token);
|
|
2486 |
}
|
|
2487 |
if (TraceCMSState) {
|
|
2488 |
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
|
|
2489 |
Thread::current(), _collectorState);
|
|
2490 |
}
|
|
2491 |
return res;
|
|
2492 |
}
|
|
2493 |
|
|
2494 |
// Because of the need to lock the free lists and other structures in
|
|
2495 |
// the collector, common to all the generations that the collector is
|
|
2496 |
// collecting, we need the gc_prologues of individual CMS generations
|
|
2497 |
// delegate to their collector. It may have been simpler had the
|
|
2498 |
// current infrastructure allowed one to call a prologue on a
|
|
2499 |
// collector. In the absence of that we have the generation's
|
|
2500 |
// prologue delegate to the collector, which delegates back
|
|
2501 |
// some "local" work to a worker method in the individual generations
|
|
2502 |
// that it's responsible for collecting, while itself doing any
|
|
2503 |
// work common to all generations it's responsible for. A similar
|
|
2504 |
// comment applies to the gc_epilogue()'s.
|
|
2505 |
// The role of the varaible _between_prologue_and_epilogue is to
|
|
2506 |
// enforce the invocation protocol.
|
|
2507 |
void CMSCollector::gc_prologue(bool full) {
|
|
2508 |
// Call gc_prologue_work() for each CMSGen and PermGen that
|
|
2509 |
// we are responsible for.
|
|
2510 |
|
|
2511 |
// The following locking discipline assumes that we are only called
|
|
2512 |
// when the world is stopped.
|
|
2513 |
assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
|
|
2514 |
|
|
2515 |
// The CMSCollector prologue must call the gc_prologues for the
|
|
2516 |
// "generations" (including PermGen if any) that it's responsible
|
|
2517 |
// for.
|
|
2518 |
|
|
2519 |
assert( Thread::current()->is_VM_thread()
|
|
2520 |
|| ( CMSScavengeBeforeRemark
|
|
2521 |
&& Thread::current()->is_ConcurrentGC_thread()),
|
|
2522 |
"Incorrect thread type for prologue execution");
|
|
2523 |
|
|
2524 |
if (_between_prologue_and_epilogue) {
|
|
2525 |
// We have already been invoked; this is a gc_prologue delegation
|
|
2526 |
// from yet another CMS generation that we are responsible for, just
|
|
2527 |
// ignore it since all relevant work has already been done.
|
|
2528 |
return;
|
|
2529 |
}
|
|
2530 |
|
|
2531 |
// set a bit saying prologue has been called; cleared in epilogue
|
|
2532 |
_between_prologue_and_epilogue = true;
|
|
2533 |
// Claim locks for common data structures, then call gc_prologue_work()
|
|
2534 |
// for each CMSGen and PermGen that we are responsible for.
|
|
2535 |
|
|
2536 |
getFreelistLocks(); // gets free list locks on constituent spaces
|
|
2537 |
bitMapLock()->lock_without_safepoint_check();
|
|
2538 |
|
|
2539 |
// Should call gc_prologue_work() for all cms gens we are responsible for
|
|
2540 |
bool registerClosure = _collectorState >= Marking
|
|
2541 |
&& _collectorState < Sweeping;
|
|
2542 |
ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
|
|
2543 |
: &_modUnionClosure;
|
|
2544 |
_cmsGen->gc_prologue_work(full, registerClosure, muc);
|
|
2545 |
_permGen->gc_prologue_work(full, registerClosure, muc);
|
|
2546 |
|
|
2547 |
if (!full) {
|
|
2548 |
stats().record_gc0_begin();
|
|
2549 |
}
|
|
2550 |
}
|
|
2551 |
|
|
2552 |
void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
|
|
2553 |
// Delegate to CMScollector which knows how to coordinate between
|
|
2554 |
// this and any other CMS generations that it is responsible for
|
|
2555 |
// collecting.
|
|
2556 |
collector()->gc_prologue(full);
|
|
2557 |
}
|
|
2558 |
|
|
2559 |
// This is a "private" interface for use by this generation's CMSCollector.
|
|
2560 |
// Not to be called directly by any other entity (for instance,
|
|
2561 |
// GenCollectedHeap, which calls the "public" gc_prologue method above).
|
|
2562 |
void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
|
|
2563 |
bool registerClosure, ModUnionClosure* modUnionClosure) {
|
|
2564 |
assert(!incremental_collection_failed(), "Shouldn't be set yet");
|
|
2565 |
assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
|
|
2566 |
"Should be NULL");
|
|
2567 |
if (registerClosure) {
|
|
2568 |
cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
|
|
2569 |
}
|
|
2570 |
cmsSpace()->gc_prologue();
|
|
2571 |
// Clear stat counters
|
|
2572 |
NOT_PRODUCT(
|
|
2573 |
assert(_numObjectsPromoted == 0, "check");
|
|
2574 |
assert(_numWordsPromoted == 0, "check");
|
|
2575 |
if (Verbose && PrintGC) {
|
|
2576 |
gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
|
|
2577 |
SIZE_FORMAT" bytes concurrently",
|
|
2578 |
_numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
|
|
2579 |
}
|
|
2580 |
_numObjectsAllocated = 0;
|
|
2581 |
_numWordsAllocated = 0;
|
|
2582 |
)
|
|
2583 |
}
|
|
2584 |
|
|
2585 |
void CMSCollector::gc_epilogue(bool full) {
|
|
2586 |
// The following locking discipline assumes that we are only called
|
|
2587 |
// when the world is stopped.
|
|
2588 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
2589 |
"world is stopped assumption");
|
|
2590 |
|
|
2591 |
// Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
|
|
2592 |
// if linear allocation blocks need to be appropriately marked to allow the
|
|
2593 |
// the blocks to be parsable. We also check here whether we need to nudge the
|
|
2594 |
// CMS collector thread to start a new cycle (if it's not already active).
|
|
2595 |
assert( Thread::current()->is_VM_thread()
|
|
2596 |
|| ( CMSScavengeBeforeRemark
|
|
2597 |
&& Thread::current()->is_ConcurrentGC_thread()),
|
|
2598 |
"Incorrect thread type for epilogue execution");
|
|
2599 |
|
|
2600 |
if (!_between_prologue_and_epilogue) {
|
|
2601 |
// We have already been invoked; this is a gc_epilogue delegation
|
|
2602 |
// from yet another CMS generation that we are responsible for, just
|
|
2603 |
// ignore it since all relevant work has already been done.
|
|
2604 |
return;
|
|
2605 |
}
|
|
2606 |
assert(haveFreelistLocks(), "must have freelist locks");
|
|
2607 |
assert_lock_strong(bitMapLock());
|
|
2608 |
|
|
2609 |
_cmsGen->gc_epilogue_work(full);
|
|
2610 |
_permGen->gc_epilogue_work(full);
|
|
2611 |
|
|
2612 |
if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
|
|
2613 |
// in case sampling was not already enabled, enable it
|
|
2614 |
_start_sampling = true;
|
|
2615 |
}
|
|
2616 |
// reset _eden_chunk_array so sampling starts afresh
|
|
2617 |
_eden_chunk_index = 0;
|
|
2618 |
|
|
2619 |
size_t cms_used = _cmsGen->cmsSpace()->used();
|
|
2620 |
size_t perm_used = _permGen->cmsSpace()->used();
|
|
2621 |
|
|
2622 |
// update performance counters - this uses a special version of
|
|
2623 |
// update_counters() that allows the utilization to be passed as a
|
|
2624 |
// parameter, avoiding multiple calls to used().
|
|
2625 |
//
|
|
2626 |
_cmsGen->update_counters(cms_used);
|
|
2627 |
_permGen->update_counters(perm_used);
|
|
2628 |
|
|
2629 |
if (CMSIncrementalMode) {
|
|
2630 |
icms_update_allocation_limits();
|
|
2631 |
}
|
|
2632 |
|
|
2633 |
bitMapLock()->unlock();
|
|
2634 |
releaseFreelistLocks();
|
|
2635 |
|
|
2636 |
_between_prologue_and_epilogue = false; // ready for next cycle
|
|
2637 |
}
|
|
2638 |
|
|
2639 |
void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
|
|
2640 |
collector()->gc_epilogue(full);
|
|
2641 |
|
|
2642 |
// Also reset promotion tracking in par gc thread states.
|
|
2643 |
if (ParallelGCThreads > 0) {
|
|
2644 |
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
2645 |
_par_gc_thread_states[i]->promo.stopTrackingPromotions();
|
|
2646 |
}
|
|
2647 |
}
|
|
2648 |
}
|
|
2649 |
|
|
2650 |
void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
|
|
2651 |
assert(!incremental_collection_failed(), "Should have been cleared");
|
|
2652 |
cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
|
|
2653 |
cmsSpace()->gc_epilogue();
|
|
2654 |
// Print stat counters
|
|
2655 |
NOT_PRODUCT(
|
|
2656 |
assert(_numObjectsAllocated == 0, "check");
|
|
2657 |
assert(_numWordsAllocated == 0, "check");
|
|
2658 |
if (Verbose && PrintGC) {
|
|
2659 |
gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
|
|
2660 |
SIZE_FORMAT" bytes",
|
|
2661 |
_numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
|
|
2662 |
}
|
|
2663 |
_numObjectsPromoted = 0;
|
|
2664 |
_numWordsPromoted = 0;
|
|
2665 |
)
|
|
2666 |
|
|
2667 |
if (PrintGC && Verbose) {
|
|
2668 |
// Call down the chain in contiguous_available needs the freelistLock
|
|
2669 |
// so print this out before releasing the freeListLock.
|
|
2670 |
gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
|
|
2671 |
contiguous_available());
|
|
2672 |
}
|
|
2673 |
}
|
|
2674 |
|
|
2675 |
#ifndef PRODUCT
|
|
2676 |
bool CMSCollector::have_cms_token() {
|
|
2677 |
Thread* thr = Thread::current();
|
|
2678 |
if (thr->is_VM_thread()) {
|
|
2679 |
return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
|
|
2680 |
} else if (thr->is_ConcurrentGC_thread()) {
|
|
2681 |
return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
|
|
2682 |
} else if (thr->is_GC_task_thread()) {
|
|
2683 |
return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
|
|
2684 |
ParGCRareEvent_lock->owned_by_self();
|
|
2685 |
}
|
|
2686 |
return false;
|
|
2687 |
}
|
|
2688 |
#endif
|
|
2689 |
|
|
2690 |
// Check reachability of the given heap address in CMS generation,
|
|
2691 |
// treating all other generations as roots.
|
|
2692 |
bool CMSCollector::is_cms_reachable(HeapWord* addr) {
|
|
2693 |
// We could "guarantee" below, rather than assert, but i'll
|
|
2694 |
// leave these as "asserts" so that an adventurous debugger
|
|
2695 |
// could try this in the product build provided some subset of
|
|
2696 |
// the conditions were met, provided they were intersted in the
|
|
2697 |
// results and knew that the computation below wouldn't interfere
|
|
2698 |
// with other concurrent computations mutating the structures
|
|
2699 |
// being read or written.
|
|
2700 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
2701 |
"Else mutations in object graph will make answer suspect");
|
|
2702 |
assert(have_cms_token(), "Should hold cms token");
|
|
2703 |
assert(haveFreelistLocks(), "must hold free list locks");
|
|
2704 |
assert_lock_strong(bitMapLock());
|
|
2705 |
|
|
2706 |
// Clear the marking bit map array before starting, but, just
|
|
2707 |
// for kicks, first report if the given address is already marked
|
|
2708 |
gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
|
|
2709 |
_markBitMap.isMarked(addr) ? "" : " not");
|
|
2710 |
|
|
2711 |
if (verify_after_remark()) {
|
|
2712 |
MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
|
|
2713 |
bool result = verification_mark_bm()->isMarked(addr);
|
|
2714 |
gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
|
|
2715 |
result ? "IS" : "is NOT");
|
|
2716 |
return result;
|
|
2717 |
} else {
|
|
2718 |
gclog_or_tty->print_cr("Could not compute result");
|
|
2719 |
return false;
|
|
2720 |
}
|
|
2721 |
}
|
|
2722 |
|
|
2723 |
////////////////////////////////////////////////////////
|
|
2724 |
// CMS Verification Support
|
|
2725 |
////////////////////////////////////////////////////////
|
|
2726 |
// Following the remark phase, the following invariant
|
|
2727 |
// should hold -- each object in the CMS heap which is
|
|
2728 |
// marked in markBitMap() should be marked in the verification_mark_bm().
|
|
2729 |
|
|
2730 |
class VerifyMarkedClosure: public BitMapClosure {
|
|
2731 |
CMSBitMap* _marks;
|
|
2732 |
bool _failed;
|
|
2733 |
|
|
2734 |
public:
|
|
2735 |
VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
|
|
2736 |
|
|
2737 |
void do_bit(size_t offset) {
|
|
2738 |
HeapWord* addr = _marks->offsetToHeapWord(offset);
|
|
2739 |
if (!_marks->isMarked(addr)) {
|
|
2740 |
oop(addr)->print();
|
|
2741 |
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
|
|
2742 |
_failed = true;
|
|
2743 |
}
|
|
2744 |
}
|
|
2745 |
|
|
2746 |
bool failed() { return _failed; }
|
|
2747 |
};
|
|
2748 |
|
|
2749 |
bool CMSCollector::verify_after_remark() {
|
|
2750 |
gclog_or_tty->print(" [Verifying CMS Marking... ");
|
|
2751 |
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
|
|
2752 |
static bool init = false;
|
|
2753 |
|
|
2754 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
2755 |
"Else mutations in object graph will make answer suspect");
|
|
2756 |
assert(have_cms_token(),
|
|
2757 |
"Else there may be mutual interference in use of "
|
|
2758 |
" verification data structures");
|
|
2759 |
assert(_collectorState > Marking && _collectorState <= Sweeping,
|
|
2760 |
"Else marking info checked here may be obsolete");
|
|
2761 |
assert(haveFreelistLocks(), "must hold free list locks");
|
|
2762 |
assert_lock_strong(bitMapLock());
|
|
2763 |
|
|
2764 |
|
|
2765 |
// Allocate marking bit map if not already allocated
|
|
2766 |
if (!init) { // first time
|
|
2767 |
if (!verification_mark_bm()->allocate(_span)) {
|
|
2768 |
return false;
|
|
2769 |
}
|
|
2770 |
init = true;
|
|
2771 |
}
|
|
2772 |
|
|
2773 |
assert(verification_mark_stack()->isEmpty(), "Should be empty");
|
|
2774 |
|
|
2775 |
// Turn off refs discovery -- so we will be tracing through refs.
|
|
2776 |
// This is as intended, because by this time
|
|
2777 |
// GC must already have cleared any refs that need to be cleared,
|
|
2778 |
// and traced those that need to be marked; moreover,
|
|
2779 |
// the marking done here is not going to intefere in any
|
|
2780 |
// way with the marking information used by GC.
|
|
2781 |
NoRefDiscovery no_discovery(ref_processor());
|
|
2782 |
|
|
2783 |
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
|
|
2784 |
|
|
2785 |
// Clear any marks from a previous round
|
|
2786 |
verification_mark_bm()->clear_all();
|
|
2787 |
assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
|
|
2788 |
assert(overflow_list_is_empty(), "overflow list should be empty");
|
|
2789 |
|
|
2790 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
2791 |
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
|
|
2792 |
// Update the saved marks which may affect the root scans.
|
|
2793 |
gch->save_marks();
|
|
2794 |
|
|
2795 |
if (CMSRemarkVerifyVariant == 1) {
|
|
2796 |
// In this first variant of verification, we complete
|
|
2797 |
// all marking, then check if the new marks-verctor is
|
|
2798 |
// a subset of the CMS marks-vector.
|
|
2799 |
verify_after_remark_work_1();
|
|
2800 |
} else if (CMSRemarkVerifyVariant == 2) {
|
|
2801 |
// In this second variant of verification, we flag an error
|
|
2802 |
// (i.e. an object reachable in the new marks-vector not reachable
|
|
2803 |
// in the CMS marks-vector) immediately, also indicating the
|
|
2804 |
// identify of an object (A) that references the unmarked object (B) --
|
|
2805 |
// presumably, a mutation to A failed to be picked up by preclean/remark?
|
|
2806 |
verify_after_remark_work_2();
|
|
2807 |
} else {
|
|
2808 |
warning("Unrecognized value %d for CMSRemarkVerifyVariant",
|
|
2809 |
CMSRemarkVerifyVariant);
|
|
2810 |
}
|
|
2811 |
gclog_or_tty->print(" done] ");
|
|
2812 |
return true;
|
|
2813 |
}
|
|
2814 |
|
|
2815 |
void CMSCollector::verify_after_remark_work_1() {
|
|
2816 |
ResourceMark rm;
|
|
2817 |
HandleMark hm;
|
|
2818 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
2819 |
|
|
2820 |
// Mark from roots one level into CMS
|
|
2821 |
MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
|
|
2822 |
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
|
2823 |
|
|
2824 |
gch->gen_process_strong_roots(_cmsGen->level(),
|
|
2825 |
true, // younger gens are roots
|
|
2826 |
true, // collecting perm gen
|
|
2827 |
SharedHeap::ScanningOption(roots_scanning_options()),
|
|
2828 |
NULL, ¬Older);
|
|
2829 |
|
|
2830 |
// Now mark from the roots
|
|
2831 |
assert(_revisitStack.isEmpty(), "Should be empty");
|
|
2832 |
MarkFromRootsClosure markFromRootsClosure(this, _span,
|
|
2833 |
verification_mark_bm(), verification_mark_stack(), &_revisitStack,
|
|
2834 |
false /* don't yield */, true /* verifying */);
|
|
2835 |
assert(_restart_addr == NULL, "Expected pre-condition");
|
|
2836 |
verification_mark_bm()->iterate(&markFromRootsClosure);
|
|
2837 |
while (_restart_addr != NULL) {
|
|
2838 |
// Deal with stack overflow: by restarting at the indicated
|
|
2839 |
// address.
|
|
2840 |
HeapWord* ra = _restart_addr;
|
|
2841 |
markFromRootsClosure.reset(ra);
|
|
2842 |
_restart_addr = NULL;
|
|
2843 |
verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
|
|
2844 |
}
|
|
2845 |
assert(verification_mark_stack()->isEmpty(), "Should have been drained");
|
|
2846 |
verify_work_stacks_empty();
|
|
2847 |
// Should reset the revisit stack above, since no class tree
|
|
2848 |
// surgery is forthcoming.
|
|
2849 |
_revisitStack.reset(); // throwing away all contents
|
|
2850 |
|
|
2851 |
// Marking completed -- now verify that each bit marked in
|
|
2852 |
// verification_mark_bm() is also marked in markBitMap(); flag all
|
|
2853 |
// errors by printing corresponding objects.
|
|
2854 |
VerifyMarkedClosure vcl(markBitMap());
|
|
2855 |
verification_mark_bm()->iterate(&vcl);
|
|
2856 |
if (vcl.failed()) {
|
|
2857 |
gclog_or_tty->print("Verification failed");
|
|
2858 |
Universe::heap()->print();
|
|
2859 |
fatal(" ... aborting");
|
|
2860 |
}
|
|
2861 |
}
|
|
2862 |
|
|
2863 |
void CMSCollector::verify_after_remark_work_2() {
|
|
2864 |
ResourceMark rm;
|
|
2865 |
HandleMark hm;
|
|
2866 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
2867 |
|
|
2868 |
// Mark from roots one level into CMS
|
|
2869 |
MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
|
|
2870 |
markBitMap(), true /* nmethods */);
|
|
2871 |
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
|
2872 |
gch->gen_process_strong_roots(_cmsGen->level(),
|
|
2873 |
true, // younger gens are roots
|
|
2874 |
true, // collecting perm gen
|
|
2875 |
SharedHeap::ScanningOption(roots_scanning_options()),
|
|
2876 |
NULL, ¬Older);
|
|
2877 |
|
|
2878 |
// Now mark from the roots
|
|
2879 |
assert(_revisitStack.isEmpty(), "Should be empty");
|
|
2880 |
MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
|
|
2881 |
verification_mark_bm(), markBitMap(), verification_mark_stack());
|
|
2882 |
assert(_restart_addr == NULL, "Expected pre-condition");
|
|
2883 |
verification_mark_bm()->iterate(&markFromRootsClosure);
|
|
2884 |
while (_restart_addr != NULL) {
|
|
2885 |
// Deal with stack overflow: by restarting at the indicated
|
|
2886 |
// address.
|
|
2887 |
HeapWord* ra = _restart_addr;
|
|
2888 |
markFromRootsClosure.reset(ra);
|
|
2889 |
_restart_addr = NULL;
|
|
2890 |
verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
|
|
2891 |
}
|
|
2892 |
assert(verification_mark_stack()->isEmpty(), "Should have been drained");
|
|
2893 |
verify_work_stacks_empty();
|
|
2894 |
// Should reset the revisit stack above, since no class tree
|
|
2895 |
// surgery is forthcoming.
|
|
2896 |
_revisitStack.reset(); // throwing away all contents
|
|
2897 |
|
|
2898 |
// Marking completed -- now verify that each bit marked in
|
|
2899 |
// verification_mark_bm() is also marked in markBitMap(); flag all
|
|
2900 |
// errors by printing corresponding objects.
|
|
2901 |
VerifyMarkedClosure vcl(markBitMap());
|
|
2902 |
verification_mark_bm()->iterate(&vcl);
|
|
2903 |
assert(!vcl.failed(), "Else verification above should not have succeeded");
|
|
2904 |
}
|
|
2905 |
|
|
2906 |
void ConcurrentMarkSweepGeneration::save_marks() {
|
|
2907 |
// delegate to CMS space
|
|
2908 |
cmsSpace()->save_marks();
|
|
2909 |
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
2910 |
_par_gc_thread_states[i]->promo.startTrackingPromotions();
|
|
2911 |
}
|
|
2912 |
}
|
|
2913 |
|
|
2914 |
bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
|
|
2915 |
return cmsSpace()->no_allocs_since_save_marks();
|
|
2916 |
}
|
|
2917 |
|
|
2918 |
#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
|
|
2919 |
\
|
|
2920 |
void ConcurrentMarkSweepGeneration:: \
|
|
2921 |
oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
|
|
2922 |
cl->set_generation(this); \
|
|
2923 |
cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
|
|
2924 |
cl->reset_generation(); \
|
|
2925 |
save_marks(); \
|
|
2926 |
}
|
|
2927 |
|
|
2928 |
ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
|
|
2929 |
|
|
2930 |
void
|
|
2931 |
ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
|
|
2932 |
{
|
|
2933 |
// Not currently implemented; need to do the following. -- ysr.
|
|
2934 |
// dld -- I think that is used for some sort of allocation profiler. So it
|
|
2935 |
// really means the objects allocated by the mutator since the last
|
|
2936 |
// GC. We could potentially implement this cheaply by recording only
|
|
2937 |
// the direct allocations in a side data structure.
|
|
2938 |
//
|
|
2939 |
// I think we probably ought not to be required to support these
|
|
2940 |
// iterations at any arbitrary point; I think there ought to be some
|
|
2941 |
// call to enable/disable allocation profiling in a generation/space,
|
|
2942 |
// and the iterator ought to return the objects allocated in the
|
|
2943 |
// gen/space since the enable call, or the last iterator call (which
|
|
2944 |
// will probably be at a GC.) That way, for gens like CM&S that would
|
|
2945 |
// require some extra data structure to support this, we only pay the
|
|
2946 |
// cost when it's in use...
|
|
2947 |
cmsSpace()->object_iterate_since_last_GC(blk);
|
|
2948 |
}
|
|
2949 |
|
|
2950 |
void
|
|
2951 |
ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
|
|
2952 |
cl->set_generation(this);
|
|
2953 |
younger_refs_in_space_iterate(_cmsSpace, cl);
|
|
2954 |
cl->reset_generation();
|
|
2955 |
}
|
|
2956 |
|
|
2957 |
void
|
|
2958 |
ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
|
|
2959 |
if (freelistLock()->owned_by_self()) {
|
|
2960 |
Generation::oop_iterate(mr, cl);
|
|
2961 |
} else {
|
|
2962 |
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
2963 |
Generation::oop_iterate(mr, cl);
|
|
2964 |
}
|
|
2965 |
}
|
|
2966 |
|
|
2967 |
void
|
|
2968 |
ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
|
|
2969 |
if (freelistLock()->owned_by_self()) {
|
|
2970 |
Generation::oop_iterate(cl);
|
|
2971 |
} else {
|
|
2972 |
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
2973 |
Generation::oop_iterate(cl);
|
|
2974 |
}
|
|
2975 |
}
|
|
2976 |
|
|
2977 |
void
|
|
2978 |
ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
|
|
2979 |
if (freelistLock()->owned_by_self()) {
|
|
2980 |
Generation::object_iterate(cl);
|
|
2981 |
} else {
|
|
2982 |
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
2983 |
Generation::object_iterate(cl);
|
|
2984 |
}
|
|
2985 |
}
|
|
2986 |
|
|
2987 |
void
|
|
2988 |
ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
|
|
2989 |
}
|
|
2990 |
|
|
2991 |
void
|
|
2992 |
ConcurrentMarkSweepGeneration::post_compact() {
|
|
2993 |
}
|
|
2994 |
|
|
2995 |
void
|
|
2996 |
ConcurrentMarkSweepGeneration::prepare_for_verify() {
|
|
2997 |
// Fix the linear allocation blocks to look like free blocks.
|
|
2998 |
|
|
2999 |
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
|
|
3000 |
// are not called when the heap is verified during universe initialization and
|
|
3001 |
// at vm shutdown.
|
|
3002 |
if (freelistLock()->owned_by_self()) {
|
|
3003 |
cmsSpace()->prepare_for_verify();
|
|
3004 |
} else {
|
|
3005 |
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
3006 |
cmsSpace()->prepare_for_verify();
|
|
3007 |
}
|
|
3008 |
}
|
|
3009 |
|
|
3010 |
void
|
|
3011 |
ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
|
|
3012 |
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
|
|
3013 |
// are not called when the heap is verified during universe initialization and
|
|
3014 |
// at vm shutdown.
|
|
3015 |
if (freelistLock()->owned_by_self()) {
|
|
3016 |
cmsSpace()->verify(false /* ignored */);
|
|
3017 |
} else {
|
|
3018 |
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
3019 |
cmsSpace()->verify(false /* ignored */);
|
|
3020 |
}
|
|
3021 |
}
|
|
3022 |
|
|
3023 |
void CMSCollector::verify(bool allow_dirty /* ignored */) {
|
|
3024 |
_cmsGen->verify(allow_dirty);
|
|
3025 |
_permGen->verify(allow_dirty);
|
|
3026 |
}
|
|
3027 |
|
|
3028 |
#ifndef PRODUCT
|
|
3029 |
bool CMSCollector::overflow_list_is_empty() const {
|
|
3030 |
assert(_num_par_pushes >= 0, "Inconsistency");
|
|
3031 |
if (_overflow_list == NULL) {
|
|
3032 |
assert(_num_par_pushes == 0, "Inconsistency");
|
|
3033 |
}
|
|
3034 |
return _overflow_list == NULL;
|
|
3035 |
}
|
|
3036 |
|
|
3037 |
// The methods verify_work_stacks_empty() and verify_overflow_empty()
|
|
3038 |
// merely consolidate assertion checks that appear to occur together frequently.
|
|
3039 |
void CMSCollector::verify_work_stacks_empty() const {
|
|
3040 |
assert(_markStack.isEmpty(), "Marking stack should be empty");
|
|
3041 |
assert(overflow_list_is_empty(), "Overflow list should be empty");
|
|
3042 |
}
|
|
3043 |
|
|
3044 |
void CMSCollector::verify_overflow_empty() const {
|
|
3045 |
assert(overflow_list_is_empty(), "Overflow list should be empty");
|
|
3046 |
assert(no_preserved_marks(), "No preserved marks");
|
|
3047 |
}
|
|
3048 |
#endif // PRODUCT
|
|
3049 |
|
|
3050 |
void CMSCollector::setup_cms_unloading_and_verification_state() {
|
|
3051 |
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|
|
3052 |
|| VerifyBeforeExit;
|
|
3053 |
const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
|
|
3054 |
| SharedHeap::SO_CodeCache;
|
|
3055 |
|
|
3056 |
if (cms_should_unload_classes()) { // Should unload classes this cycle
|
|
3057 |
remove_root_scanning_option(rso); // Shrink the root set appropriately
|
|
3058 |
set_verifying(should_verify); // Set verification state for this cycle
|
|
3059 |
return; // Nothing else needs to be done at this time
|
|
3060 |
}
|
|
3061 |
|
|
3062 |
// Not unloading classes this cycle
|
|
3063 |
assert(!cms_should_unload_classes(), "Inconsitency!");
|
|
3064 |
if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
|
|
3065 |
// We were not verifying, or we _were_ unloading classes in the last cycle,
|
|
3066 |
// AND some verification options are enabled this cycle; in this case,
|
|
3067 |
// we must make sure that the deadness map is allocated if not already so,
|
|
3068 |
// and cleared (if already allocated previously --
|
|
3069 |
// CMSBitMap::sizeInBits() is used to determine if it's allocated).
|
|
3070 |
if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
|
|
3071 |
if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
|
|
3072 |
warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
|
|
3073 |
"permanent generation verification disabled");
|
|
3074 |
return; // Note that we leave verification disabled, so we'll retry this
|
|
3075 |
// allocation next cycle. We _could_ remember this failure
|
|
3076 |
// and skip further attempts and permanently disable verification
|
|
3077 |
// attempts if that is considered more desirable.
|
|
3078 |
}
|
|
3079 |
assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
|
|
3080 |
"_perm_gen_ver_bit_map inconsistency?");
|
|
3081 |
} else {
|
|
3082 |
perm_gen_verify_bit_map()->clear_all();
|
|
3083 |
}
|
|
3084 |
// Include symbols, strings and code cache elements to prevent their resurrection.
|
|
3085 |
add_root_scanning_option(rso);
|
|
3086 |
set_verifying(true);
|
|
3087 |
} else if (verifying() && !should_verify) {
|
|
3088 |
// We were verifying, but some verification flags got disabled.
|
|
3089 |
set_verifying(false);
|
|
3090 |
// Exclude symbols, strings and code cache elements from root scanning to
|
|
3091 |
// reduce IM and RM pauses.
|
|
3092 |
remove_root_scanning_option(rso);
|
|
3093 |
}
|
|
3094 |
}
|
|
3095 |
|
|
3096 |
|
|
3097 |
#ifndef PRODUCT
|
|
3098 |
HeapWord* CMSCollector::block_start(const void* p) const {
|
|
3099 |
const HeapWord* addr = (HeapWord*)p;
|
|
3100 |
if (_span.contains(p)) {
|
|
3101 |
if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
|
|
3102 |
return _cmsGen->cmsSpace()->block_start(p);
|
|
3103 |
} else {
|
|
3104 |
assert(_permGen->cmsSpace()->is_in_reserved(addr),
|
|
3105 |
"Inconsistent _span?");
|
|
3106 |
return _permGen->cmsSpace()->block_start(p);
|
|
3107 |
}
|
|
3108 |
}
|
|
3109 |
return NULL;
|
|
3110 |
}
|
|
3111 |
#endif
|
|
3112 |
|
|
3113 |
HeapWord*
|
|
3114 |
ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
|
|
3115 |
bool tlab,
|
|
3116 |
bool parallel) {
|
|
3117 |
assert(!tlab, "Can't deal with TLAB allocation");
|
|
3118 |
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
3119 |
expand(word_size*HeapWordSize, MinHeapDeltaBytes,
|
|
3120 |
CMSExpansionCause::_satisfy_allocation);
|
|
3121 |
if (GCExpandToAllocateDelayMillis > 0) {
|
|
3122 |
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
|
|
3123 |
}
|
|
3124 |
size_t adj_word_sz = CompactibleFreeListSpace::adjustObjectSize(word_size);
|
|
3125 |
if (parallel) {
|
|
3126 |
return cmsSpace()->par_allocate(adj_word_sz);
|
|
3127 |
} else {
|
|
3128 |
return cmsSpace()->allocate(adj_word_sz);
|
|
3129 |
}
|
|
3130 |
}
|
|
3131 |
|
|
3132 |
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
|
|
3133 |
// OneContigSpaceCardGeneration, which makes me wonder if we should move this
|
|
3134 |
// to CardGeneration and share it...
|
|
3135 |
void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
|
|
3136 |
CMSExpansionCause::Cause cause)
|
|
3137 |
{
|
|
3138 |
assert_locked_or_safepoint(Heap_lock);
|
|
3139 |
|
|
3140 |
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
|
|
3141 |
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
|
|
3142 |
bool success = false;
|
|
3143 |
if (aligned_expand_bytes > aligned_bytes) {
|
|
3144 |
success = grow_by(aligned_expand_bytes);
|
|
3145 |
}
|
|
3146 |
if (!success) {
|
|
3147 |
success = grow_by(aligned_bytes);
|
|
3148 |
}
|
|
3149 |
if (!success) {
|
|
3150 |
size_t remaining_bytes = _virtual_space.uncommitted_size();
|
|
3151 |
if (remaining_bytes > 0) {
|
|
3152 |
success = grow_by(remaining_bytes);
|
|
3153 |
}
|
|
3154 |
}
|
|
3155 |
if (GC_locker::is_active()) {
|
|
3156 |
if (PrintGC && Verbose) {
|
|
3157 |
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
|
|
3158 |
}
|
|
3159 |
}
|
|
3160 |
// remember why we expanded; this information is used
|
|
3161 |
// by shouldConcurrentCollect() when making decisions on whether to start
|
|
3162 |
// a new CMS cycle.
|
|
3163 |
if (success) {
|
|
3164 |
set_expansion_cause(cause);
|
|
3165 |
if (PrintGCDetails && Verbose) {
|
|
3166 |
gclog_or_tty->print_cr("Expanded CMS gen for %s",
|
|
3167 |
CMSExpansionCause::to_string(cause));
|
|
3168 |
}
|
|
3169 |
}
|
|
3170 |
}
|
|
3171 |
|
|
3172 |
HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
|
|
3173 |
HeapWord* res = NULL;
|
|
3174 |
MutexLocker x(ParGCRareEvent_lock);
|
|
3175 |
while (true) {
|
|
3176 |
// Expansion by some other thread might make alloc OK now:
|
|
3177 |
res = ps->lab.alloc(word_sz);
|
|
3178 |
if (res != NULL) return res;
|
|
3179 |
// If there's not enough expansion space available, give up.
|
|
3180 |
if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
|
|
3181 |
return NULL;
|
|
3182 |
}
|
|
3183 |
// Otherwise, we try expansion.
|
|
3184 |
expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
|
|
3185 |
CMSExpansionCause::_allocate_par_lab);
|
|
3186 |
// Now go around the loop and try alloc again;
|
|
3187 |
// A competing par_promote might beat us to the expansion space,
|
|
3188 |
// so we may go around the loop again if promotion fails agaion.
|
|
3189 |
if (GCExpandToAllocateDelayMillis > 0) {
|
|
3190 |
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
|
|
3191 |
}
|
|
3192 |
}
|
|
3193 |
}
|
|
3194 |
|
|
3195 |
|
|
3196 |
bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
|
|
3197 |
PromotionInfo* promo) {
|
|
3198 |
MutexLocker x(ParGCRareEvent_lock);
|
|
3199 |
size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
|
|
3200 |
while (true) {
|
|
3201 |
// Expansion by some other thread might make alloc OK now:
|
|
3202 |
if (promo->ensure_spooling_space()) {
|
|
3203 |
assert(promo->has_spooling_space(),
|
|
3204 |
"Post-condition of successful ensure_spooling_space()");
|
|
3205 |
return true;
|
|
3206 |
}
|
|
3207 |
// If there's not enough expansion space available, give up.
|
|
3208 |
if (_virtual_space.uncommitted_size() < refill_size_bytes) {
|
|
3209 |
return false;
|
|
3210 |
}
|
|
3211 |
// Otherwise, we try expansion.
|
|
3212 |
expand(refill_size_bytes, MinHeapDeltaBytes,
|
|
3213 |
CMSExpansionCause::_allocate_par_spooling_space);
|
|
3214 |
// Now go around the loop and try alloc again;
|
|
3215 |
// A competing allocation might beat us to the expansion space,
|
|
3216 |
// so we may go around the loop again if allocation fails again.
|
|
3217 |
if (GCExpandToAllocateDelayMillis > 0) {
|
|
3218 |
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
|
|
3219 |
}
|
|
3220 |
}
|
|
3221 |
}
|
|
3222 |
|
|
3223 |
|
|
3224 |
|
|
3225 |
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
|
|
3226 |
assert_locked_or_safepoint(Heap_lock);
|
|
3227 |
size_t size = ReservedSpace::page_align_size_down(bytes);
|
|
3228 |
if (size > 0) {
|
|
3229 |
shrink_by(size);
|
|
3230 |
}
|
|
3231 |
}
|
|
3232 |
|
|
3233 |
bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
|
|
3234 |
assert_locked_or_safepoint(Heap_lock);
|
|
3235 |
bool result = _virtual_space.expand_by(bytes);
|
|
3236 |
if (result) {
|
|
3237 |
HeapWord* old_end = _cmsSpace->end();
|
|
3238 |
size_t new_word_size =
|
|
3239 |
heap_word_size(_virtual_space.committed_size());
|
|
3240 |
MemRegion mr(_cmsSpace->bottom(), new_word_size);
|
|
3241 |
_bts->resize(new_word_size); // resize the block offset shared array
|
|
3242 |
Universe::heap()->barrier_set()->resize_covered_region(mr);
|
|
3243 |
// Hmmmm... why doesn't CFLS::set_end verify locking?
|
|
3244 |
// This is quite ugly; FIX ME XXX
|
|
3245 |
_cmsSpace->assert_locked();
|
|
3246 |
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
|
|
3247 |
|
|
3248 |
// update the space and generation capacity counters
|
|
3249 |
if (UsePerfData) {
|
|
3250 |
_space_counters->update_capacity();
|
|
3251 |
_gen_counters->update_all();
|
|
3252 |
}
|
|
3253 |
|
|
3254 |
if (Verbose && PrintGC) {
|
|
3255 |
size_t new_mem_size = _virtual_space.committed_size();
|
|
3256 |
size_t old_mem_size = new_mem_size - bytes;
|
|
3257 |
gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
|
|
3258 |
name(), old_mem_size/K, bytes/K, new_mem_size/K);
|
|
3259 |
}
|
|
3260 |
}
|
|
3261 |
return result;
|
|
3262 |
}
|
|
3263 |
|
|
3264 |
bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
|
|
3265 |
assert_locked_or_safepoint(Heap_lock);
|
|
3266 |
bool success = true;
|
|
3267 |
const size_t remaining_bytes = _virtual_space.uncommitted_size();
|
|
3268 |
if (remaining_bytes > 0) {
|
|
3269 |
success = grow_by(remaining_bytes);
|
|
3270 |
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
|
|
3271 |
}
|
|
3272 |
return success;
|
|
3273 |
}
|
|
3274 |
|
|
3275 |
void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
|
|
3276 |
assert_locked_or_safepoint(Heap_lock);
|
|
3277 |
assert_lock_strong(freelistLock());
|
|
3278 |
// XXX Fix when compaction is implemented.
|
|
3279 |
warning("Shrinking of CMS not yet implemented");
|
|
3280 |
return;
|
|
3281 |
}
|
|
3282 |
|
|
3283 |
|
|
3284 |
// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
|
|
3285 |
// phases.
|
|
3286 |
class CMSPhaseAccounting: public StackObj {
|
|
3287 |
public:
|
|
3288 |
CMSPhaseAccounting(CMSCollector *collector,
|
|
3289 |
const char *phase,
|
|
3290 |
bool print_cr = true);
|
|
3291 |
~CMSPhaseAccounting();
|
|
3292 |
|
|
3293 |
private:
|
|
3294 |
CMSCollector *_collector;
|
|
3295 |
const char *_phase;
|
|
3296 |
elapsedTimer _wallclock;
|
|
3297 |
bool _print_cr;
|
|
3298 |
|
|
3299 |
public:
|
|
3300 |
// Not MT-safe; so do not pass around these StackObj's
|
|
3301 |
// where they may be accessed by other threads.
|
|
3302 |
jlong wallclock_millis() {
|
|
3303 |
assert(_wallclock.is_active(), "Wall clock should not stop");
|
|
3304 |
_wallclock.stop(); // to record time
|
|
3305 |
jlong ret = _wallclock.milliseconds();
|
|
3306 |
_wallclock.start(); // restart
|
|
3307 |
return ret;
|
|
3308 |
}
|
|
3309 |
};
|
|
3310 |
|
|
3311 |
CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
|
|
3312 |
const char *phase,
|
|
3313 |
bool print_cr) :
|
|
3314 |
_collector(collector), _phase(phase), _print_cr(print_cr) {
|
|
3315 |
|
|
3316 |
if (PrintCMSStatistics != 0) {
|
|
3317 |
_collector->resetYields();
|
|
3318 |
}
|
|
3319 |
if (PrintGCDetails && PrintGCTimeStamps) {
|
|
3320 |
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
|
3321 |
gclog_or_tty->stamp();
|
|
3322 |
gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
|
|
3323 |
_collector->cmsGen()->short_name(), _phase);
|
|
3324 |
}
|
|
3325 |
_collector->resetTimer();
|
|
3326 |
_wallclock.start();
|
|
3327 |
_collector->startTimer();
|
|
3328 |
}
|
|
3329 |
|
|
3330 |
CMSPhaseAccounting::~CMSPhaseAccounting() {
|
|
3331 |
assert(_wallclock.is_active(), "Wall clock should not have stopped");
|
|
3332 |
_collector->stopTimer();
|
|
3333 |
_wallclock.stop();
|
|
3334 |
if (PrintGCDetails) {
|
|
3335 |
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
|
3336 |
if (PrintGCTimeStamps) {
|
|
3337 |
gclog_or_tty->stamp();
|
|
3338 |
gclog_or_tty->print(": ");
|
|
3339 |
}
|
|
3340 |
gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
|
|
3341 |
_collector->cmsGen()->short_name(),
|
|
3342 |
_phase, _collector->timerValue(), _wallclock.seconds());
|
|
3343 |
if (_print_cr) {
|
|
3344 |
gclog_or_tty->print_cr("");
|
|
3345 |
}
|
|
3346 |
if (PrintCMSStatistics != 0) {
|
|
3347 |
gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
|
|
3348 |
_collector->yields());
|
|
3349 |
}
|
|
3350 |
}
|
|
3351 |
}
|
|
3352 |
|
|
3353 |
// CMS work
|
|
3354 |
|
|
3355 |
// Checkpoint the roots into this generation from outside
|
|
3356 |
// this generation. [Note this initial checkpoint need only
|
|
3357 |
// be approximate -- we'll do a catch up phase subsequently.]
|
|
3358 |
void CMSCollector::checkpointRootsInitial(bool asynch) {
|
|
3359 |
assert(_collectorState == InitialMarking, "Wrong collector state");
|
|
3360 |
check_correct_thread_executing();
|
|
3361 |
ReferenceProcessor* rp = ref_processor();
|
|
3362 |
SpecializationStats::clear();
|
|
3363 |
assert(_restart_addr == NULL, "Control point invariant");
|
|
3364 |
if (asynch) {
|
|
3365 |
// acquire locks for subsequent manipulations
|
|
3366 |
MutexLockerEx x(bitMapLock(),
|
|
3367 |
Mutex::_no_safepoint_check_flag);
|
|
3368 |
checkpointRootsInitialWork(asynch);
|
|
3369 |
rp->verify_no_references_recorded();
|
|
3370 |
rp->enable_discovery(); // enable ("weak") refs discovery
|
|
3371 |
_collectorState = Marking;
|
|
3372 |
} else {
|
|
3373 |
// (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
|
|
3374 |
// which recognizes if we are a CMS generation, and doesn't try to turn on
|
|
3375 |
// discovery; verify that they aren't meddling.
|
|
3376 |
assert(!rp->discovery_is_atomic(),
|
|
3377 |
"incorrect setting of discovery predicate");
|
|
3378 |
assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
|
|
3379 |
"ref discovery for this generation kind");
|
|
3380 |
// already have locks
|
|
3381 |
checkpointRootsInitialWork(asynch);
|
|
3382 |
rp->enable_discovery(); // now enable ("weak") refs discovery
|
|
3383 |
_collectorState = Marking;
|
|
3384 |
}
|
|
3385 |
SpecializationStats::print();
|
|
3386 |
}
|
|
3387 |
|
|
3388 |
void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
|
3389 |
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
|
|
3390 |
assert(_collectorState == InitialMarking, "just checking");
|
|
3391 |
|
|
3392 |
// If there has not been a GC[n-1] since last GC[n] cycle completed,
|
|
3393 |
// precede our marking with a collection of all
|
|
3394 |
// younger generations to keep floating garbage to a minimum.
|
|
3395 |
// XXX: we won't do this for now -- it's an optimization to be done later.
|
|
3396 |
|
|
3397 |
// already have locks
|
|
3398 |
assert_lock_strong(bitMapLock());
|
|
3399 |
assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
|
|
3400 |
|
|
3401 |
// Setup the verification and class unloading state for this
|
|
3402 |
// CMS collection cycle.
|
|
3403 |
setup_cms_unloading_and_verification_state();
|
|
3404 |
|
|
3405 |
NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
|
|
3406 |
PrintGCDetails && Verbose, true, gclog_or_tty);)
|
|
3407 |
if (UseAdaptiveSizePolicy) {
|
|
3408 |
size_policy()->checkpoint_roots_initial_begin();
|
|
3409 |
}
|
|
3410 |
|
|
3411 |
// Reset all the PLAB chunk arrays if necessary.
|
|
3412 |
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
|
|
3413 |
reset_survivor_plab_arrays();
|
|
3414 |
}
|
|
3415 |
|
|
3416 |
ResourceMark rm;
|
|
3417 |
HandleMark hm;
|
|
3418 |
|
|
3419 |
FalseClosure falseClosure;
|
|
3420 |
// In the case of a synchronous collection, we will elide the
|
|
3421 |
// remark step, so it's important to catch all the nmethod oops
|
|
3422 |
// in this step; hence the last argument to the constrcutor below.
|
|
3423 |
MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
|
|
3424 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
3425 |
|
|
3426 |
verify_work_stacks_empty();
|
|
3427 |
verify_overflow_empty();
|
|
3428 |
|
|
3429 |
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
|
|
3430 |
// Update the saved marks which may affect the root scans.
|
|
3431 |
gch->save_marks();
|
|
3432 |
|
|
3433 |
// weak reference processing has not started yet.
|
|
3434 |
ref_processor()->set_enqueuing_is_done(false);
|
|
3435 |
|
|
3436 |
{
|
|
3437 |
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
|
|
3438 |
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
|
3439 |
gch->gen_process_strong_roots(_cmsGen->level(),
|
|
3440 |
true, // younger gens are roots
|
|
3441 |
true, // collecting perm gen
|
|
3442 |
SharedHeap::ScanningOption(roots_scanning_options()),
|
|
3443 |
NULL, ¬Older);
|
|
3444 |
}
|
|
3445 |
|
|
3446 |
// Clear mod-union table; it will be dirtied in the prologue of
|
|
3447 |
// CMS generation per each younger generation collection.
|
|
3448 |
|
|
3449 |
assert(_modUnionTable.isAllClear(),
|
|
3450 |
"Was cleared in most recent final checkpoint phase"
|
|
3451 |
" or no bits are set in the gc_prologue before the start of the next "
|
|
3452 |
"subsequent marking phase.");
|
|
3453 |
|
|
3454 |
// Temporarily disabled, since pre/post-consumption closures don't
|
|
3455 |
// care about precleaned cards
|
|
3456 |
#if 0
|
|
3457 |
{
|
|
3458 |
MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
|
|
3459 |
(HeapWord*)_virtual_space.high());
|
|
3460 |
_ct->ct_bs()->preclean_dirty_cards(mr);
|
|
3461 |
}
|
|
3462 |
#endif
|
|
3463 |
|
|
3464 |
// Save the end of the used_region of the constituent generations
|
|
3465 |
// to be used to limit the extent of sweep in each generation.
|
|
3466 |
save_sweep_limits();
|
|
3467 |
if (UseAdaptiveSizePolicy) {
|
|
3468 |
size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
|
|
3469 |
}
|
|
3470 |
verify_overflow_empty();
|
|
3471 |
}
|
|
3472 |
|
|
3473 |
bool CMSCollector::markFromRoots(bool asynch) {
|
|
3474 |
// we might be tempted to assert that:
|
|
3475 |
// assert(asynch == !SafepointSynchronize::is_at_safepoint(),
|
|
3476 |
// "inconsistent argument?");
|
|
3477 |
// However that wouldn't be right, because it's possible that
|
|
3478 |
// a safepoint is indeed in progress as a younger generation
|
|
3479 |
// stop-the-world GC happens even as we mark in this generation.
|
|
3480 |
assert(_collectorState == Marking, "inconsistent state?");
|
|
3481 |
check_correct_thread_executing();
|
|
3482 |
verify_overflow_empty();
|
|
3483 |
|
|
3484 |
bool res;
|
|
3485 |
if (asynch) {
|
|
3486 |
|
|
3487 |
// Start the timers for adaptive size policy for the concurrent phases
|
|
3488 |
// Do it here so that the foreground MS can use the concurrent
|
|
3489 |
// timer since a foreground MS might has the sweep done concurrently
|
|
3490 |
// or STW.
|
|
3491 |
if (UseAdaptiveSizePolicy) {
|
|
3492 |
size_policy()->concurrent_marking_begin();
|
|
3493 |
}
|
|
3494 |
|
|
3495 |
// Weak ref discovery note: We may be discovering weak
|
|
3496 |
// refs in this generation concurrent (but interleaved) with
|
|
3497 |
// weak ref discovery by a younger generation collector.
|
|
3498 |
|
|
3499 |
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
|
3500 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
3501 |
CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
|
|
3502 |
res = markFromRootsWork(asynch);
|
|
3503 |
if (res) {
|
|
3504 |
_collectorState = Precleaning;
|
|
3505 |
} else { // We failed and a foreground collection wants to take over
|
|
3506 |
assert(_foregroundGCIsActive, "internal state inconsistency");
|
|
3507 |
assert(_restart_addr == NULL, "foreground will restart from scratch");
|
|
3508 |
if (PrintGCDetails) {
|
|
3509 |
gclog_or_tty->print_cr("bailing out to foreground collection");
|
|
3510 |
}
|
|
3511 |
}
|
|
3512 |
if (UseAdaptiveSizePolicy) {
|
|
3513 |
size_policy()->concurrent_marking_end();
|
|
3514 |
}
|
|
3515 |
} else {
|
|
3516 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
3517 |
"inconsistent with asynch == false");
|
|
3518 |
if (UseAdaptiveSizePolicy) {
|
|
3519 |
size_policy()->ms_collection_marking_begin();
|
|
3520 |
}
|
|
3521 |
// already have locks
|
|
3522 |
res = markFromRootsWork(asynch);
|
|
3523 |
_collectorState = FinalMarking;
|
|
3524 |
if (UseAdaptiveSizePolicy) {
|
|
3525 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
3526 |
size_policy()->ms_collection_marking_end(gch->gc_cause());
|
|
3527 |
}
|
|
3528 |
}
|
|
3529 |
verify_overflow_empty();
|
|
3530 |
return res;
|
|
3531 |
}
|
|
3532 |
|
|
3533 |
bool CMSCollector::markFromRootsWork(bool asynch) {
|
|
3534 |
// iterate over marked bits in bit map, doing a full scan and mark
|
|
3535 |
// from these roots using the following algorithm:
|
|
3536 |
// . if oop is to the right of the current scan pointer,
|
|
3537 |
// mark corresponding bit (we'll process it later)
|
|
3538 |
// . else (oop is to left of current scan pointer)
|
|
3539 |
// push oop on marking stack
|
|
3540 |
// . drain the marking stack
|
|
3541 |
|
|
3542 |
// Note that when we do a marking step we need to hold the
|
|
3543 |
// bit map lock -- recall that direct allocation (by mutators)
|
|
3544 |
// and promotion (by younger generation collectors) is also
|
|
3545 |
// marking the bit map. [the so-called allocate live policy.]
|
|
3546 |
// Because the implementation of bit map marking is not
|
|
3547 |
// robust wrt simultaneous marking of bits in the same word,
|
|
3548 |
// we need to make sure that there is no such interference
|
|
3549 |
// between concurrent such updates.
|
|
3550 |
|
|
3551 |
// already have locks
|
|
3552 |
assert_lock_strong(bitMapLock());
|
|
3553 |
|
|
3554 |
// Clear the revisit stack, just in case there are any
|
|
3555 |
// obsolete contents from a short-circuited previous CMS cycle.
|
|
3556 |
_revisitStack.reset();
|
|
3557 |
verify_work_stacks_empty();
|
|
3558 |
verify_overflow_empty();
|
|
3559 |
assert(_revisitStack.isEmpty(), "tabula rasa");
|
|
3560 |
|
|
3561 |
bool result = false;
|
|
3562 |
if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
|
|
3563 |
result = do_marking_mt(asynch);
|
|
3564 |
} else {
|
|
3565 |
result = do_marking_st(asynch);
|
|
3566 |
}
|
|
3567 |
return result;
|
|
3568 |
}
|
|
3569 |
|
|
3570 |
// Forward decl
|
|
3571 |
class CMSConcMarkingTask;
|
|
3572 |
|
|
3573 |
class CMSConcMarkingTerminator: public ParallelTaskTerminator {
|
|
3574 |
CMSCollector* _collector;
|
|
3575 |
CMSConcMarkingTask* _task;
|
|
3576 |
bool _yield;
|
|
3577 |
protected:
|
|
3578 |
virtual void yield();
|
|
3579 |
public:
|
|
3580 |
// "n_threads" is the number of threads to be terminated.
|
|
3581 |
// "queue_set" is a set of work queues of other threads.
|
|
3582 |
// "collector" is the CMS collector associated with this task terminator.
|
|
3583 |
// "yield" indicates whether we need the gang as a whole to yield.
|
|
3584 |
CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
|
|
3585 |
CMSCollector* collector, bool yield) :
|
|
3586 |
ParallelTaskTerminator(n_threads, queue_set),
|
|
3587 |
_collector(collector),
|
|
3588 |
_yield(yield) { }
|
|
3589 |
|
|
3590 |
void set_task(CMSConcMarkingTask* task) {
|
|
3591 |
_task = task;
|
|
3592 |
}
|
|
3593 |
};
|
|
3594 |
|
|
3595 |
// MT Concurrent Marking Task
|
|
3596 |
class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
|
3597 |
CMSCollector* _collector;
|
|
3598 |
YieldingFlexibleWorkGang* _workers; // the whole gang
|
|
3599 |
int _n_workers; // requested/desired # workers
|
|
3600 |
bool _asynch;
|
|
3601 |
bool _result;
|
|
3602 |
CompactibleFreeListSpace* _cms_space;
|
|
3603 |
CompactibleFreeListSpace* _perm_space;
|
|
3604 |
HeapWord* _global_finger;
|
|
3605 |
|
|
3606 |
// Exposed here for yielding support
|
|
3607 |
Mutex* const _bit_map_lock;
|
|
3608 |
|
|
3609 |
// The per thread work queues, available here for stealing
|
|
3610 |
OopTaskQueueSet* _task_queues;
|
|
3611 |
CMSConcMarkingTerminator _term;
|
|
3612 |
|
|
3613 |
public:
|
|
3614 |
CMSConcMarkingTask(CMSCollector* collector,
|
|
3615 |
CompactibleFreeListSpace* cms_space,
|
|
3616 |
CompactibleFreeListSpace* perm_space,
|
|
3617 |
bool asynch, int n_workers,
|
|
3618 |
YieldingFlexibleWorkGang* workers,
|
|
3619 |
OopTaskQueueSet* task_queues):
|
|
3620 |
YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
|
|
3621 |
_collector(collector),
|
|
3622 |
_cms_space(cms_space),
|
|
3623 |
_perm_space(perm_space),
|
|
3624 |
_asynch(asynch), _n_workers(n_workers), _result(true),
|
|
3625 |
_workers(workers), _task_queues(task_queues),
|
|
3626 |
_term(n_workers, task_queues, _collector, asynch),
|
|
3627 |
_bit_map_lock(collector->bitMapLock())
|
|
3628 |
{
|
|
3629 |
assert(n_workers <= workers->total_workers(),
|
|
3630 |
"Else termination won't work correctly today"); // XXX FIX ME!
|
|
3631 |
_requested_size = n_workers;
|
|
3632 |
_term.set_task(this);
|
|
3633 |
assert(_cms_space->bottom() < _perm_space->bottom(),
|
|
3634 |
"Finger incorrectly initialized below");
|
|
3635 |
_global_finger = _cms_space->bottom();
|
|
3636 |
}
|
|
3637 |
|
|
3638 |
|
|
3639 |
OopTaskQueueSet* task_queues() { return _task_queues; }
|
|
3640 |
|
|
3641 |
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
|
3642 |
|
|
3643 |
HeapWord** global_finger_addr() { return &_global_finger; }
|
|
3644 |
|
|
3645 |
CMSConcMarkingTerminator* terminator() { return &_term; }
|
|
3646 |
|
|
3647 |
void work(int i);
|
|
3648 |
|
|
3649 |
virtual void coordinator_yield(); // stuff done by coordinator
|
|
3650 |
bool result() { return _result; }
|
|
3651 |
|
|
3652 |
void reset(HeapWord* ra) {
|
|
3653 |
_term.reset_for_reuse();
|
|
3654 |
}
|
|
3655 |
|
|
3656 |
static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
|
|
3657 |
OopTaskQueue* work_q);
|
|
3658 |
|
|
3659 |
private:
|
|
3660 |
void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
|
|
3661 |
void do_work_steal(int i);
|
|
3662 |
void bump_global_finger(HeapWord* f);
|
|
3663 |
};
|
|
3664 |
|
|
3665 |
void CMSConcMarkingTerminator::yield() {
|
|
3666 |
if (ConcurrentMarkSweepThread::should_yield() &&
|
|
3667 |
!_collector->foregroundGCIsActive() &&
|
|
3668 |
_yield) {
|
|
3669 |
_task->yield();
|
|
3670 |
} else {
|
|
3671 |
ParallelTaskTerminator::yield();
|
|
3672 |
}
|
|
3673 |
}
|
|
3674 |
|
|
3675 |
////////////////////////////////////////////////////////////////
|
|
3676 |
// Concurrent Marking Algorithm Sketch
|
|
3677 |
////////////////////////////////////////////////////////////////
|
|
3678 |
// Until all tasks exhausted (both spaces):
|
|
3679 |
// -- claim next available chunk
|
|
3680 |
// -- bump global finger via CAS
|
|
3681 |
// -- find first object that starts in this chunk
|
|
3682 |
// and start scanning bitmap from that position
|
|
3683 |
// -- scan marked objects for oops
|
|
3684 |
// -- CAS-mark target, and if successful:
|
|
3685 |
// . if target oop is above global finger (volatile read)
|
|
3686 |
// nothing to do
|
|
3687 |
// . if target oop is in chunk and above local finger
|
|
3688 |
// then nothing to do
|
|
3689 |
// . else push on work-queue
|
|
3690 |
// -- Deal with possible overflow issues:
|
|
3691 |
// . local work-queue overflow causes stuff to be pushed on
|
|
3692 |
// global (common) overflow queue
|
|
3693 |
// . always first empty local work queue
|
|
3694 |
// . then get a batch of oops from global work queue if any
|
|
3695 |
// . then do work stealing
|
|
3696 |
// -- When all tasks claimed (both spaces)
|
|
3697 |
// and local work queue empty,
|
|
3698 |
// then in a loop do:
|
|
3699 |
// . check global overflow stack; steal a batch of oops and trace
|
|
3700 |
// . try to steal from other threads oif GOS is empty
|
|
3701 |
// . if neither is available, offer termination
|
|
3702 |
// -- Terminate and return result
|
|
3703 |
//
|
|
3704 |
void CMSConcMarkingTask::work(int i) {
|
|
3705 |
elapsedTimer _timer;
|
|
3706 |
ResourceMark rm;
|
|
3707 |
HandleMark hm;
|
|
3708 |
|
|
3709 |
DEBUG_ONLY(_collector->verify_overflow_empty();)
|
|
3710 |
|
|
3711 |
// Before we begin work, our work queue should be empty
|
|
3712 |
assert(work_queue(i)->size() == 0, "Expected to be empty");
|
|
3713 |
// Scan the bitmap covering _cms_space, tracing through grey objects.
|
|
3714 |
_timer.start();
|
|
3715 |
do_scan_and_mark(i, _cms_space);
|
|
3716 |
_timer.stop();
|
|
3717 |
if (PrintCMSStatistics != 0) {
|
|
3718 |
gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
|
|
3719 |
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
|
|
3720 |
}
|
|
3721 |
|
|
3722 |
// ... do the same for the _perm_space
|
|
3723 |
_timer.reset();
|
|
3724 |
_timer.start();
|
|
3725 |
do_scan_and_mark(i, _perm_space);
|
|
3726 |
_timer.stop();
|
|
3727 |
if (PrintCMSStatistics != 0) {
|
|
3728 |
gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
|
|
3729 |
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
|
|
3730 |
}
|
|
3731 |
|
|
3732 |
// ... do work stealing
|
|
3733 |
_timer.reset();
|
|
3734 |
_timer.start();
|
|
3735 |
do_work_steal(i);
|
|
3736 |
_timer.stop();
|
|
3737 |
if (PrintCMSStatistics != 0) {
|
|
3738 |
gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
|
|
3739 |
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
|
|
3740 |
}
|
|
3741 |
assert(_collector->_markStack.isEmpty(), "Should have been emptied");
|
|
3742 |
assert(work_queue(i)->size() == 0, "Should have been emptied");
|
|
3743 |
// Note that under the current task protocol, the
|
|
3744 |
// following assertion is true even of the spaces
|
|
3745 |
// expanded since the completion of the concurrent
|
|
3746 |
// marking. XXX This will likely change under a strict
|
|
3747 |
// ABORT semantics.
|
|
3748 |
assert(_global_finger > _cms_space->end() &&
|
|
3749 |
_global_finger >= _perm_space->end(),
|
|
3750 |
"All tasks have been completed");
|
|
3751 |
DEBUG_ONLY(_collector->verify_overflow_empty();)
|
|
3752 |
}
|
|
3753 |
|
|
3754 |
void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
|
|
3755 |
HeapWord* read = _global_finger;
|
|
3756 |
HeapWord* cur = read;
|
|
3757 |
while (f > read) {
|
|
3758 |
cur = read;
|
|
3759 |
read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
|
|
3760 |
if (cur == read) {
|
|
3761 |
// our cas succeeded
|
|
3762 |
assert(_global_finger >= f, "protocol consistency");
|
|
3763 |
break;
|
|
3764 |
}
|
|
3765 |
}
|
|
3766 |
}
|
|
3767 |
|
|
3768 |
// This is really inefficient, and should be redone by
|
|
3769 |
// using (not yet available) block-read and -write interfaces to the
|
|
3770 |
// stack and the work_queue. XXX FIX ME !!!
|
|
3771 |
bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
|
|
3772 |
OopTaskQueue* work_q) {
|
|
3773 |
// Fast lock-free check
|
|
3774 |
if (ovflw_stk->length() == 0) {
|
|
3775 |
return false;
|
|
3776 |
}
|
|
3777 |
assert(work_q->size() == 0, "Shouldn't steal");
|
|
3778 |
MutexLockerEx ml(ovflw_stk->par_lock(),
|
|
3779 |
Mutex::_no_safepoint_check_flag);
|
|
3780 |
// Grab up to 1/4 the size of the work queue
|
|
3781 |
size_t num = MIN2((size_t)work_q->max_elems()/4,
|
|
3782 |
(size_t)ParGCDesiredObjsFromOverflowList);
|
|
3783 |
num = MIN2(num, ovflw_stk->length());
|
|
3784 |
for (int i = (int) num; i > 0; i--) {
|
|
3785 |
oop cur = ovflw_stk->pop();
|
|
3786 |
assert(cur != NULL, "Counted wrong?");
|
|
3787 |
work_q->push(cur);
|
|
3788 |
}
|
|
3789 |
return num > 0;
|
|
3790 |
}
|
|
3791 |
|
|
3792 |
void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
|
|
3793 |
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
|
|
3794 |
int n_tasks = pst->n_tasks();
|
|
3795 |
// We allow that there may be no tasks to do here because
|
|
3796 |
// we are restarting after a stack overflow.
|
|
3797 |
assert(pst->valid() || n_tasks == 0, "Uninitializd use?");
|
|
3798 |
int nth_task = 0;
|
|
3799 |
|
|
3800 |
HeapWord* start = sp->bottom();
|
|
3801 |
size_t chunk_size = sp->marking_task_size();
|
|
3802 |
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
|
3803 |
// Having claimed the nth task in this space,
|
|
3804 |
// compute the chunk that it corresponds to:
|
|
3805 |
MemRegion span = MemRegion(start + nth_task*chunk_size,
|
|
3806 |
start + (nth_task+1)*chunk_size);
|
|
3807 |
// Try and bump the global finger via a CAS;
|
|
3808 |
// note that we need to do the global finger bump
|
|
3809 |
// _before_ taking the intersection below, because
|
|
3810 |
// the task corresponding to that region will be
|
|
3811 |
// deemed done even if the used_region() expands
|
|
3812 |
// because of allocation -- as it almost certainly will
|
|
3813 |
// during start-up while the threads yield in the
|
|
3814 |
// closure below.
|
|
3815 |
HeapWord* finger = span.end();
|
|
3816 |
bump_global_finger(finger); // atomically
|
|
3817 |
// There are null tasks here corresponding to chunks
|
|
3818 |
// beyond the "top" address of the space.
|
|
3819 |
span = span.intersection(sp->used_region());
|
|
3820 |
if (!span.is_empty()) { // Non-null task
|
|
3821 |
// We want to skip the first object because
|
|
3822 |
// the protocol is to scan any object in its entirety
|
|
3823 |
// that _starts_ in this span; a fortiori, any
|
|
3824 |
// object starting in an earlier span is scanned
|
|
3825 |
// as part of an earlier claimed task.
|
|
3826 |
// Below we use the "careful" version of block_start
|
|
3827 |
// so we do not try to navigate uninitialized objects.
|
|
3828 |
HeapWord* prev_obj = sp->block_start_careful(span.start());
|
|
3829 |
// Below we use a variant of block_size that uses the
|
|
3830 |
// Printezis bits to avoid waiting for allocated
|
|
3831 |
// objects to become initialized/parsable.
|
|
3832 |
while (prev_obj < span.start()) {
|
|
3833 |
size_t sz = sp->block_size_no_stall(prev_obj, _collector);
|
|
3834 |
if (sz > 0) {
|
|
3835 |
prev_obj += sz;
|
|
3836 |
} else {
|
|
3837 |
// In this case we may end up doing a bit of redundant
|
|
3838 |
// scanning, but that appears unavoidable, short of
|
|
3839 |
// locking the free list locks; see bug 6324141.
|
|
3840 |
break;
|
|
3841 |
}
|
|
3842 |
}
|
|
3843 |
if (prev_obj < span.end()) {
|
|
3844 |
MemRegion my_span = MemRegion(prev_obj, span.end());
|
|
3845 |
// Do the marking work within a non-empty span --
|
|
3846 |
// the last argument to the constructor indicates whether the
|
|
3847 |
// iteration should be incremental with periodic yields.
|
|
3848 |
Par_MarkFromRootsClosure cl(this, _collector, my_span,
|
|
3849 |
&_collector->_markBitMap,
|
|
3850 |
work_queue(i),
|
|
3851 |
&_collector->_markStack,
|
|
3852 |
&_collector->_revisitStack,
|
|
3853 |
_asynch);
|
|
3854 |
_collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
|
|
3855 |
} // else nothing to do for this task
|
|
3856 |
} // else nothing to do for this task
|
|
3857 |
}
|
|
3858 |
// We'd be tempted to assert here that since there are no
|
|
3859 |
// more tasks left to claim in this space, the global_finger
|
|
3860 |
// must exceed space->top() and a fortiori space->end(). However,
|
|
3861 |
// that would not quite be correct because the bumping of
|
|
3862 |
// global_finger occurs strictly after the claiming of a task,
|
|
3863 |
// so by the time we reach here the global finger may not yet
|
|
3864 |
// have been bumped up by the thread that claimed the last
|
|
3865 |
// task.
|
|
3866 |
pst->all_tasks_completed();
|
|
3867 |
}
|
|
3868 |
|
|
3869 |
class Par_ConcMarkingClosure: public OopClosure {
|
|
3870 |
CMSCollector* _collector;
|
|
3871 |
MemRegion _span;
|
|
3872 |
CMSBitMap* _bit_map;
|
|
3873 |
CMSMarkStack* _overflow_stack;
|
|
3874 |
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
|
|
3875 |
OopTaskQueue* _work_queue;
|
|
3876 |
|
|
3877 |
public:
|
|
3878 |
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
|
|
3879 |
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
|
|
3880 |
_collector(collector),
|
|
3881 |
_span(_collector->_span),
|
|
3882 |
_work_queue(work_queue),
|
|
3883 |
_bit_map(bit_map),
|
|
3884 |
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
|
|
3885 |
|
|
3886 |
void do_oop(oop* p);
|
|
3887 |
void trim_queue(size_t max);
|
|
3888 |
void handle_stack_overflow(HeapWord* lost);
|
|
3889 |
};
|
|
3890 |
|
|
3891 |
// Grey object rescan during work stealing phase --
|
|
3892 |
// the salient assumption here is that stolen oops must
|
|
3893 |
// always be initialized, so we do not need to check for
|
|
3894 |
// uninitialized objects before scanning here.
|
|
3895 |
void Par_ConcMarkingClosure::do_oop(oop* p) {
|
|
3896 |
oop this_oop = *p;
|
|
3897 |
assert(this_oop->is_oop_or_null(),
|
|
3898 |
"expected an oop or NULL");
|
|
3899 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
3900 |
// Check if oop points into the CMS generation
|
|
3901 |
// and is not marked
|
|
3902 |
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
|
3903 |
// a white object ...
|
|
3904 |
// If we manage to "claim" the object, by being the
|
|
3905 |
// first thread to mark it, then we push it on our
|
|
3906 |
// marking stack
|
|
3907 |
if (_bit_map->par_mark(addr)) { // ... now grey
|
|
3908 |
// push on work queue (grey set)
|
|
3909 |
bool simulate_overflow = false;
|
|
3910 |
NOT_PRODUCT(
|
|
3911 |
if (CMSMarkStackOverflowALot &&
|
|
3912 |
_collector->simulate_overflow()) {
|
|
3913 |
// simulate a stack overflow
|
|
3914 |
simulate_overflow = true;
|
|
3915 |
}
|
|
3916 |
)
|
|
3917 |
if (simulate_overflow ||
|
|
3918 |
!(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
|
|
3919 |
// stack overflow
|
|
3920 |
if (PrintCMSStatistics != 0) {
|
|
3921 |
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
|
3922 |
SIZE_FORMAT, _overflow_stack->capacity());
|
|
3923 |
}
|
|
3924 |
// We cannot assert that the overflow stack is full because
|
|
3925 |
// it may have been emptied since.
|
|
3926 |
assert(simulate_overflow ||
|
|
3927 |
_work_queue->size() == _work_queue->max_elems(),
|
|
3928 |
"Else push should have succeeded");
|
|
3929 |
handle_stack_overflow(addr);
|
|
3930 |
}
|
|
3931 |
} // Else, some other thread got there first
|
|
3932 |
}
|
|
3933 |
}
|
|
3934 |
|
|
3935 |
void Par_ConcMarkingClosure::trim_queue(size_t max) {
|
|
3936 |
while (_work_queue->size() > max) {
|
|
3937 |
oop new_oop;
|
|
3938 |
if (_work_queue->pop_local(new_oop)) {
|
|
3939 |
assert(new_oop->is_oop(), "Should be an oop");
|
|
3940 |
assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
|
|
3941 |
assert(_span.contains((HeapWord*)new_oop), "Not in span");
|
|
3942 |
assert(new_oop->is_parsable(), "Should be parsable");
|
|
3943 |
new_oop->oop_iterate(this); // do_oop() above
|
|
3944 |
}
|
|
3945 |
}
|
|
3946 |
}
|
|
3947 |
|
|
3948 |
// Upon stack overflow, we discard (part of) the stack,
|
|
3949 |
// remembering the least address amongst those discarded
|
|
3950 |
// in CMSCollector's _restart_address.
|
|
3951 |
void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
|
|
3952 |
// We need to do this under a mutex to prevent other
|
|
3953 |
// workers from interfering with the expansion below.
|
|
3954 |
MutexLockerEx ml(_overflow_stack->par_lock(),
|
|
3955 |
Mutex::_no_safepoint_check_flag);
|
|
3956 |
// Remember the least grey address discarded
|
|
3957 |
HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
|
|
3958 |
_collector->lower_restart_addr(ra);
|
|
3959 |
_overflow_stack->reset(); // discard stack contents
|
|
3960 |
_overflow_stack->expand(); // expand the stack if possible
|
|
3961 |
}
|
|
3962 |
|
|
3963 |
|
|
3964 |
void CMSConcMarkingTask::do_work_steal(int i) {
|
|
3965 |
OopTaskQueue* work_q = work_queue(i);
|
|
3966 |
oop obj_to_scan;
|
|
3967 |
CMSBitMap* bm = &(_collector->_markBitMap);
|
|
3968 |
CMSMarkStack* ovflw = &(_collector->_markStack);
|
|
3969 |
int* seed = _collector->hash_seed(i);
|
|
3970 |
Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
|
|
3971 |
while (true) {
|
|
3972 |
cl.trim_queue(0);
|
|
3973 |
assert(work_q->size() == 0, "Should have been emptied above");
|
|
3974 |
if (get_work_from_overflow_stack(ovflw, work_q)) {
|
|
3975 |
// Can't assert below because the work obtained from the
|
|
3976 |
// overflow stack may already have been stolen from us.
|
|
3977 |
// assert(work_q->size() > 0, "Work from overflow stack");
|
|
3978 |
continue;
|
|
3979 |
} else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
|
|
3980 |
assert(obj_to_scan->is_oop(), "Should be an oop");
|
|
3981 |
assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
|
|
3982 |
obj_to_scan->oop_iterate(&cl);
|
|
3983 |
} else if (terminator()->offer_termination()) {
|
|
3984 |
assert(work_q->size() == 0, "Impossible!");
|
|
3985 |
break;
|
|
3986 |
}
|
|
3987 |
}
|
|
3988 |
}
|
|
3989 |
|
|
3990 |
// This is run by the CMS (coordinator) thread.
|
|
3991 |
void CMSConcMarkingTask::coordinator_yield() {
|
|
3992 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
3993 |
"CMS thread should hold CMS token");
|
|
3994 |
|
|
3995 |
// First give up the locks, then yield, then re-lock
|
|
3996 |
// We should probably use a constructor/destructor idiom to
|
|
3997 |
// do this unlock/lock or modify the MutexUnlocker class to
|
|
3998 |
// serve our purpose. XXX
|
|
3999 |
assert_lock_strong(_bit_map_lock);
|
|
4000 |
_bit_map_lock->unlock();
|
|
4001 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
4002 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
4003 |
_collector->stopTimer();
|
|
4004 |
if (PrintCMSStatistics != 0) {
|
|
4005 |
_collector->incrementYields();
|
|
4006 |
}
|
|
4007 |
_collector->icms_wait();
|
|
4008 |
|
|
4009 |
// It is possible for whichever thread initiated the yield request
|
|
4010 |
// not to get a chance to wake up and take the bitmap lock between
|
|
4011 |
// this thread releasing it and reacquiring it. So, while the
|
|
4012 |
// should_yield() flag is on, let's sleep for a bit to give the
|
|
4013 |
// other thread a chance to wake up. The limit imposed on the number
|
|
4014 |
// of iterations is defensive, to avoid any unforseen circumstances
|
|
4015 |
// putting us into an infinite loop. Since it's always been this
|
|
4016 |
// (coordinator_yield()) method that was observed to cause the
|
|
4017 |
// problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
|
|
4018 |
// which is by default non-zero. For the other seven methods that
|
|
4019 |
// also perform the yield operation, as are using a different
|
|
4020 |
// parameter (CMSYieldSleepCount) which is by default zero. This way we
|
|
4021 |
// can enable the sleeping for those methods too, if necessary.
|
|
4022 |
// See 6442774.
|
|
4023 |
//
|
|
4024 |
// We really need to reconsider the synchronization between the GC
|
|
4025 |
// thread and the yield-requesting threads in the future and we
|
|
4026 |
// should really use wait/notify, which is the recommended
|
|
4027 |
// way of doing this type of interaction. Additionally, we should
|
|
4028 |
// consolidate the eight methods that do the yield operation and they
|
|
4029 |
// are almost identical into one for better maintenability and
|
|
4030 |
// readability. See 6445193.
|
|
4031 |
//
|
|
4032 |
// Tony 2006.06.29
|
|
4033 |
for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
|
|
4034 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
4035 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
4036 |
os::sleep(Thread::current(), 1, false);
|
|
4037 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
4038 |
}
|
|
4039 |
|
|
4040 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
4041 |
_bit_map_lock->lock_without_safepoint_check();
|
|
4042 |
_collector->startTimer();
|
|
4043 |
}
|
|
4044 |
|
|
4045 |
bool CMSCollector::do_marking_mt(bool asynch) {
|
|
4046 |
assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
|
|
4047 |
// In the future this would be determined ergonomically, based
|
|
4048 |
// on #cpu's, # active mutator threads (and load), and mutation rate.
|
|
4049 |
int num_workers = ParallelCMSThreads;
|
|
4050 |
|
|
4051 |
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
|
4052 |
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
|
|
4053 |
|
|
4054 |
CMSConcMarkingTask tsk(this, cms_space, perm_space,
|
|
4055 |
asynch, num_workers /* number requested XXX */,
|
|
4056 |
conc_workers(), task_queues());
|
|
4057 |
|
|
4058 |
// Since the actual number of workers we get may be different
|
|
4059 |
// from the number we requested above, do we need to do anything different
|
|
4060 |
// below? In particular, may be we need to subclass the SequantialSubTasksDone
|
|
4061 |
// class?? XXX
|
|
4062 |
cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
|
|
4063 |
perm_space->initialize_sequential_subtasks_for_marking(num_workers);
|
|
4064 |
|
|
4065 |
// Refs discovery is already non-atomic.
|
|
4066 |
assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
|
|
4067 |
// Mutate the Refs discovery so it is MT during the
|
|
4068 |
// multi-threaded marking phase.
|
|
4069 |
ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
|
|
4070 |
|
|
4071 |
conc_workers()->start_task(&tsk);
|
|
4072 |
while (tsk.yielded()) {
|
|
4073 |
tsk.coordinator_yield();
|
|
4074 |
conc_workers()->continue_task(&tsk);
|
|
4075 |
}
|
|
4076 |
// If the task was aborted, _restart_addr will be non-NULL
|
|
4077 |
assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
|
|
4078 |
while (_restart_addr != NULL) {
|
|
4079 |
// XXX For now we do not make use of ABORTED state and have not
|
|
4080 |
// yet implemented the right abort semantics (even in the original
|
|
4081 |
// single-threaded CMS case). That needs some more investigation
|
|
4082 |
// and is deferred for now; see CR# TBF. 07252005YSR. XXX
|
|
4083 |
assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
|
|
4084 |
// If _restart_addr is non-NULL, a marking stack overflow
|
|
4085 |
// occured; we need to do a fresh marking iteration from the
|
|
4086 |
// indicated restart address.
|
|
4087 |
if (_foregroundGCIsActive && asynch) {
|
|
4088 |
// We may be running into repeated stack overflows, having
|
|
4089 |
// reached the limit of the stack size, while making very
|
|
4090 |
// slow forward progress. It may be best to bail out and
|
|
4091 |
// let the foreground collector do its job.
|
|
4092 |
// Clear _restart_addr, so that foreground GC
|
|
4093 |
// works from scratch. This avoids the headache of
|
|
4094 |
// a "rescan" which would otherwise be needed because
|
|
4095 |
// of the dirty mod union table & card table.
|
|
4096 |
_restart_addr = NULL;
|
|
4097 |
return false;
|
|
4098 |
}
|
|
4099 |
// Adjust the task to restart from _restart_addr
|
|
4100 |
tsk.reset(_restart_addr);
|
|
4101 |
cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
|
|
4102 |
_restart_addr);
|
|
4103 |
perm_space->initialize_sequential_subtasks_for_marking(num_workers,
|
|
4104 |
_restart_addr);
|
|
4105 |
_restart_addr = NULL;
|
|
4106 |
// Get the workers going again
|
|
4107 |
conc_workers()->start_task(&tsk);
|
|
4108 |
while (tsk.yielded()) {
|
|
4109 |
tsk.coordinator_yield();
|
|
4110 |
conc_workers()->continue_task(&tsk);
|
|
4111 |
}
|
|
4112 |
}
|
|
4113 |
assert(tsk.completed(), "Inconsistency");
|
|
4114 |
assert(tsk.result() == true, "Inconsistency");
|
|
4115 |
return true;
|
|
4116 |
}
|
|
4117 |
|
|
4118 |
bool CMSCollector::do_marking_st(bool asynch) {
|
|
4119 |
ResourceMark rm;
|
|
4120 |
HandleMark hm;
|
|
4121 |
|
|
4122 |
MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
|
|
4123 |
&_markStack, &_revisitStack, CMSYield && asynch);
|
|
4124 |
// the last argument to iterate indicates whether the iteration
|
|
4125 |
// should be incremental with periodic yields.
|
|
4126 |
_markBitMap.iterate(&markFromRootsClosure);
|
|
4127 |
// If _restart_addr is non-NULL, a marking stack overflow
|
|
4128 |
// occured; we need to do a fresh iteration from the
|
|
4129 |
// indicated restart address.
|
|
4130 |
while (_restart_addr != NULL) {
|
|
4131 |
if (_foregroundGCIsActive && asynch) {
|
|
4132 |
// We may be running into repeated stack overflows, having
|
|
4133 |
// reached the limit of the stack size, while making very
|
|
4134 |
// slow forward progress. It may be best to bail out and
|
|
4135 |
// let the foreground collector do its job.
|
|
4136 |
// Clear _restart_addr, so that foreground GC
|
|
4137 |
// works from scratch. This avoids the headache of
|
|
4138 |
// a "rescan" which would otherwise be needed because
|
|
4139 |
// of the dirty mod union table & card table.
|
|
4140 |
_restart_addr = NULL;
|
|
4141 |
return false; // indicating failure to complete marking
|
|
4142 |
}
|
|
4143 |
// Deal with stack overflow:
|
|
4144 |
// we restart marking from _restart_addr
|
|
4145 |
HeapWord* ra = _restart_addr;
|
|
4146 |
markFromRootsClosure.reset(ra);
|
|
4147 |
_restart_addr = NULL;
|
|
4148 |
_markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
|
|
4149 |
}
|
|
4150 |
return true;
|
|
4151 |
}
|
|
4152 |
|
|
4153 |
void CMSCollector::preclean() {
|
|
4154 |
check_correct_thread_executing();
|
|
4155 |
assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
|
|
4156 |
verify_work_stacks_empty();
|
|
4157 |
verify_overflow_empty();
|
|
4158 |
_abort_preclean = false;
|
|
4159 |
if (CMSPrecleaningEnabled) {
|
|
4160 |
_eden_chunk_index = 0;
|
|
4161 |
size_t used = get_eden_used();
|
|
4162 |
size_t capacity = get_eden_capacity();
|
|
4163 |
// Don't start sampling unless we will get sufficiently
|
|
4164 |
// many samples.
|
|
4165 |
if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
|
|
4166 |
* CMSScheduleRemarkEdenPenetration)) {
|
|
4167 |
_start_sampling = true;
|
|
4168 |
} else {
|
|
4169 |
_start_sampling = false;
|
|
4170 |
}
|
|
4171 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
4172 |
CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
|
|
4173 |
preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
|
|
4174 |
}
|
|
4175 |
CMSTokenSync x(true); // is cms thread
|
|
4176 |
if (CMSPrecleaningEnabled) {
|
|
4177 |
sample_eden();
|
|
4178 |
_collectorState = AbortablePreclean;
|
|
4179 |
} else {
|
|
4180 |
_collectorState = FinalMarking;
|
|
4181 |
}
|
|
4182 |
verify_work_stacks_empty();
|
|
4183 |
verify_overflow_empty();
|
|
4184 |
}
|
|
4185 |
|
|
4186 |
// Try and schedule the remark such that young gen
|
|
4187 |
// occupancy is CMSScheduleRemarkEdenPenetration %.
|
|
4188 |
void CMSCollector::abortable_preclean() {
|
|
4189 |
check_correct_thread_executing();
|
|
4190 |
assert(CMSPrecleaningEnabled, "Inconsistent control state");
|
|
4191 |
assert(_collectorState == AbortablePreclean, "Inconsistent control state");
|
|
4192 |
|
|
4193 |
// If Eden's current occupancy is below this threshold,
|
|
4194 |
// immediately schedule the remark; else preclean
|
|
4195 |
// past the next scavenge in an effort to
|
|
4196 |
// schedule the pause as described avove. By choosing
|
|
4197 |
// CMSScheduleRemarkEdenSizeThreshold >= max eden size
|
|
4198 |
// we will never do an actual abortable preclean cycle.
|
|
4199 |
if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
|
|
4200 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
4201 |
CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
|
|
4202 |
// We need more smarts in the abortable preclean
|
|
4203 |
// loop below to deal with cases where allocation
|
|
4204 |
// in young gen is very very slow, and our precleaning
|
|
4205 |
// is running a losing race against a horde of
|
|
4206 |
// mutators intent on flooding us with CMS updates
|
|
4207 |
// (dirty cards).
|
|
4208 |
// One, admittedly dumb, strategy is to give up
|
|
4209 |
// after a certain number of abortable precleaning loops
|
|
4210 |
// or after a certain maximum time. We want to make
|
|
4211 |
// this smarter in the next iteration.
|
|
4212 |
// XXX FIX ME!!! YSR
|
|
4213 |
size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
|
|
4214 |
while (!(should_abort_preclean() ||
|
|
4215 |
ConcurrentMarkSweepThread::should_terminate())) {
|
|
4216 |
workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
|
|
4217 |
cumworkdone += workdone;
|
|
4218 |
loops++;
|
|
4219 |
// Voluntarily terminate abortable preclean phase if we have
|
|
4220 |
// been at it for too long.
|
|
4221 |
if ((CMSMaxAbortablePrecleanLoops != 0) &&
|
|
4222 |
loops >= CMSMaxAbortablePrecleanLoops) {
|
|
4223 |
if (PrintGCDetails) {
|
|
4224 |
gclog_or_tty->print(" CMS: abort preclean due to loops ");
|
|
4225 |
}
|
|
4226 |
break;
|
|
4227 |
}
|
|
4228 |
if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
|
|
4229 |
if (PrintGCDetails) {
|
|
4230 |
gclog_or_tty->print(" CMS: abort preclean due to time ");
|
|
4231 |
}
|
|
4232 |
break;
|
|
4233 |
}
|
|
4234 |
// If we are doing little work each iteration, we should
|
|
4235 |
// take a short break.
|
|
4236 |
if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
|
|
4237 |
// Sleep for some time, waiting for work to accumulate
|
|
4238 |
stopTimer();
|
|
4239 |
cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
|
|
4240 |
startTimer();
|
|
4241 |
waited++;
|
|
4242 |
}
|
|
4243 |
}
|
|
4244 |
if (PrintCMSStatistics > 0) {
|
|
4245 |
gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
|
|
4246 |
loops, waited, cumworkdone);
|
|
4247 |
}
|
|
4248 |
}
|
|
4249 |
CMSTokenSync x(true); // is cms thread
|
|
4250 |
if (_collectorState != Idling) {
|
|
4251 |
assert(_collectorState == AbortablePreclean,
|
|
4252 |
"Spontaneous state transition?");
|
|
4253 |
_collectorState = FinalMarking;
|
|
4254 |
} // Else, a foreground collection completed this CMS cycle.
|
|
4255 |
return;
|
|
4256 |
}
|
|
4257 |
|
|
4258 |
// Respond to an Eden sampling opportunity
|
|
4259 |
void CMSCollector::sample_eden() {
|
|
4260 |
// Make sure a young gc cannot sneak in between our
|
|
4261 |
// reading and recording of a sample.
|
|
4262 |
assert(Thread::current()->is_ConcurrentGC_thread(),
|
|
4263 |
"Only the cms thread may collect Eden samples");
|
|
4264 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
4265 |
"Should collect samples while holding CMS token");
|
|
4266 |
if (!_start_sampling) {
|
|
4267 |
return;
|
|
4268 |
}
|
|
4269 |
if (_eden_chunk_array) {
|
|
4270 |
if (_eden_chunk_index < _eden_chunk_capacity) {
|
|
4271 |
_eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
|
|
4272 |
assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
|
|
4273 |
"Unexpected state of Eden");
|
|
4274 |
// We'd like to check that what we just sampled is an oop-start address;
|
|
4275 |
// however, we cannot do that here since the object may not yet have been
|
|
4276 |
// initialized. So we'll instead do the check when we _use_ this sample
|
|
4277 |
// later.
|
|
4278 |
if (_eden_chunk_index == 0 ||
|
|
4279 |
(pointer_delta(_eden_chunk_array[_eden_chunk_index],
|
|
4280 |
_eden_chunk_array[_eden_chunk_index-1])
|
|
4281 |
>= CMSSamplingGrain)) {
|
|
4282 |
_eden_chunk_index++; // commit sample
|
|
4283 |
}
|
|
4284 |
}
|
|
4285 |
}
|
|
4286 |
if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
|
|
4287 |
size_t used = get_eden_used();
|
|
4288 |
size_t capacity = get_eden_capacity();
|
|
4289 |
assert(used <= capacity, "Unexpected state of Eden");
|
|
4290 |
if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
|
|
4291 |
_abort_preclean = true;
|
|
4292 |
}
|
|
4293 |
}
|
|
4294 |
}
|
|
4295 |
|
|
4296 |
|
|
4297 |
size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
|
4298 |
assert(_collectorState == Precleaning ||
|
|
4299 |
_collectorState == AbortablePreclean, "incorrect state");
|
|
4300 |
ResourceMark rm;
|
|
4301 |
HandleMark hm;
|
|
4302 |
// Do one pass of scrubbing the discovered reference lists
|
|
4303 |
// to remove any reference objects with strongly-reachable
|
|
4304 |
// referents.
|
|
4305 |
if (clean_refs) {
|
|
4306 |
ReferenceProcessor* rp = ref_processor();
|
|
4307 |
CMSPrecleanRefsYieldClosure yield_cl(this);
|
|
4308 |
assert(rp->span().equals(_span), "Spans should be equal");
|
|
4309 |
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
|
|
4310 |
&_markStack);
|
|
4311 |
CMSDrainMarkingStackClosure complete_trace(this,
|
|
4312 |
_span, &_markBitMap, &_markStack,
|
|
4313 |
&keep_alive);
|
|
4314 |
|
|
4315 |
// We don't want this step to interfere with a young
|
|
4316 |
// collection because we don't want to take CPU
|
|
4317 |
// or memory bandwidth away from the young GC threads
|
|
4318 |
// (which may be as many as there are CPUs).
|
|
4319 |
// Note that we don't need to protect ourselves from
|
|
4320 |
// interference with mutators because they can't
|
|
4321 |
// manipulate the discovered reference lists nor affect
|
|
4322 |
// the computed reachability of the referents, the
|
|
4323 |
// only properties manipulated by the precleaning
|
|
4324 |
// of these reference lists.
|
|
4325 |
stopTimer();
|
|
4326 |
CMSTokenSyncWithLocks x(true /* is cms thread */,
|
|
4327 |
bitMapLock());
|
|
4328 |
startTimer();
|
|
4329 |
sample_eden();
|
|
4330 |
// The following will yield to allow foreground
|
|
4331 |
// collection to proceed promptly. XXX YSR:
|
|
4332 |
// The code in this method may need further
|
|
4333 |
// tweaking for better performance and some restructuring
|
|
4334 |
// for cleaner interfaces.
|
|
4335 |
rp->preclean_discovered_references(
|
|
4336 |
rp->is_alive_non_header(), &keep_alive, &complete_trace,
|
|
4337 |
&yield_cl);
|
|
4338 |
}
|
|
4339 |
|
|
4340 |
if (clean_survivor) { // preclean the active survivor space(s)
|
|
4341 |
assert(_young_gen->kind() == Generation::DefNew ||
|
|
4342 |
_young_gen->kind() == Generation::ParNew ||
|
|
4343 |
_young_gen->kind() == Generation::ASParNew,
|
|
4344 |
"incorrect type for cast");
|
|
4345 |
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
|
4346 |
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
|
|
4347 |
&_markBitMap, &_modUnionTable,
|
|
4348 |
&_markStack, &_revisitStack,
|
|
4349 |
true /* precleaning phase */);
|
|
4350 |
stopTimer();
|
|
4351 |
CMSTokenSyncWithLocks ts(true /* is cms thread */,
|
|
4352 |
bitMapLock());
|
|
4353 |
startTimer();
|
|
4354 |
unsigned int before_count =
|
|
4355 |
GenCollectedHeap::heap()->total_collections();
|
|
4356 |
SurvivorSpacePrecleanClosure
|
|
4357 |
sss_cl(this, _span, &_markBitMap, &_markStack,
|
|
4358 |
&pam_cl, before_count, CMSYield);
|
|
4359 |
dng->from()->object_iterate_careful(&sss_cl);
|
|
4360 |
dng->to()->object_iterate_careful(&sss_cl);
|
|
4361 |
}
|
|
4362 |
MarkRefsIntoAndScanClosure
|
|
4363 |
mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
|
|
4364 |
&_markStack, &_revisitStack, this, CMSYield,
|
|
4365 |
true /* precleaning phase */);
|
|
4366 |
// CAUTION: The following closure has persistent state that may need to
|
|
4367 |
// be reset upon a decrease in the sequence of addresses it
|
|
4368 |
// processes.
|
|
4369 |
ScanMarkedObjectsAgainCarefullyClosure
|
|
4370 |
smoac_cl(this, _span,
|
|
4371 |
&_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
|
|
4372 |
|
|
4373 |
// Preclean dirty cards in ModUnionTable and CardTable using
|
|
4374 |
// appropriate convergence criterion;
|
|
4375 |
// repeat CMSPrecleanIter times unless we find that
|
|
4376 |
// we are losing.
|
|
4377 |
assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
|
|
4378 |
assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
|
|
4379 |
"Bad convergence multiplier");
|
|
4380 |
assert(CMSPrecleanThreshold >= 100,
|
|
4381 |
"Unreasonably low CMSPrecleanThreshold");
|
|
4382 |
|
|
4383 |
size_t numIter, cumNumCards, lastNumCards, curNumCards;
|
|
4384 |
for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
|
|
4385 |
numIter < CMSPrecleanIter;
|
|
4386 |
numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
|
|
4387 |
curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
|
|
4388 |
if (CMSPermGenPrecleaningEnabled) {
|
|
4389 |
curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
|
|
4390 |
}
|
|
4391 |
if (Verbose && PrintGCDetails) {
|
|
4392 |
gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
|
|
4393 |
}
|
|
4394 |
// Either there are very few dirty cards, so re-mark
|
|
4395 |
// pause will be small anyway, or our pre-cleaning isn't
|
|
4396 |
// that much faster than the rate at which cards are being
|
|
4397 |
// dirtied, so we might as well stop and re-mark since
|
|
4398 |
// precleaning won't improve our re-mark time by much.
|
|
4399 |
if (curNumCards <= CMSPrecleanThreshold ||
|
|
4400 |
(numIter > 0 &&
|
|
4401 |
(curNumCards * CMSPrecleanDenominator >
|
|
4402 |
lastNumCards * CMSPrecleanNumerator))) {
|
|
4403 |
numIter++;
|
|
4404 |
cumNumCards += curNumCards;
|
|
4405 |
break;
|
|
4406 |
}
|
|
4407 |
}
|
|
4408 |
curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
|
|
4409 |
if (CMSPermGenPrecleaningEnabled) {
|
|
4410 |
curNumCards += preclean_card_table(_permGen, &smoac_cl);
|
|
4411 |
}
|
|
4412 |
cumNumCards += curNumCards;
|
|
4413 |
if (PrintGCDetails && PrintCMSStatistics != 0) {
|
|
4414 |
gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
|
|
4415 |
curNumCards, cumNumCards, numIter);
|
|
4416 |
}
|
|
4417 |
return cumNumCards; // as a measure of useful work done
|
|
4418 |
}
|
|
4419 |
|
|
4420 |
// PRECLEANING NOTES:
|
|
4421 |
// Precleaning involves:
|
|
4422 |
// . reading the bits of the modUnionTable and clearing the set bits.
|
|
4423 |
// . For the cards corresponding to the set bits, we scan the
|
|
4424 |
// objects on those cards. This means we need the free_list_lock
|
|
4425 |
// so that we can safely iterate over the CMS space when scanning
|
|
4426 |
// for oops.
|
|
4427 |
// . When we scan the objects, we'll be both reading and setting
|
|
4428 |
// marks in the marking bit map, so we'll need the marking bit map.
|
|
4429 |
// . For protecting _collector_state transitions, we take the CGC_lock.
|
|
4430 |
// Note that any races in the reading of of card table entries by the
|
|
4431 |
// CMS thread on the one hand and the clearing of those entries by the
|
|
4432 |
// VM thread or the setting of those entries by the mutator threads on the
|
|
4433 |
// other are quite benign. However, for efficiency it makes sense to keep
|
|
4434 |
// the VM thread from racing with the CMS thread while the latter is
|
|
4435 |
// dirty card info to the modUnionTable. We therefore also use the
|
|
4436 |
// CGC_lock to protect the reading of the card table and the mod union
|
|
4437 |
// table by the CM thread.
|
|
4438 |
// . We run concurrently with mutator updates, so scanning
|
|
4439 |
// needs to be done carefully -- we should not try to scan
|
|
4440 |
// potentially uninitialized objects.
|
|
4441 |
//
|
|
4442 |
// Locking strategy: While holding the CGC_lock, we scan over and
|
|
4443 |
// reset a maximal dirty range of the mod union / card tables, then lock
|
|
4444 |
// the free_list_lock and bitmap lock to do a full marking, then
|
|
4445 |
// release these locks; and repeat the cycle. This allows for a
|
|
4446 |
// certain amount of fairness in the sharing of these locks between
|
|
4447 |
// the CMS collector on the one hand, and the VM thread and the
|
|
4448 |
// mutators on the other.
|
|
4449 |
|
|
4450 |
// NOTE: preclean_mod_union_table() and preclean_card_table()
|
|
4451 |
// further below are largely identical; if you need to modify
|
|
4452 |
// one of these methods, please check the other method too.
|
|
4453 |
|
|
4454 |
size_t CMSCollector::preclean_mod_union_table(
|
|
4455 |
ConcurrentMarkSweepGeneration* gen,
|
|
4456 |
ScanMarkedObjectsAgainCarefullyClosure* cl) {
|
|
4457 |
verify_work_stacks_empty();
|
|
4458 |
verify_overflow_empty();
|
|
4459 |
|
|
4460 |
// strategy: starting with the first card, accumulate contiguous
|
|
4461 |
// ranges of dirty cards; clear these cards, then scan the region
|
|
4462 |
// covered by these cards.
|
|
4463 |
|
|
4464 |
// Since all of the MUT is committed ahead, we can just use
|
|
4465 |
// that, in case the generations expand while we are precleaning.
|
|
4466 |
// It might also be fine to just use the committed part of the
|
|
4467 |
// generation, but we might potentially miss cards when the
|
|
4468 |
// generation is rapidly expanding while we are in the midst
|
|
4469 |
// of precleaning.
|
|
4470 |
HeapWord* startAddr = gen->reserved().start();
|
|
4471 |
HeapWord* endAddr = gen->reserved().end();
|
|
4472 |
|
|
4473 |
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
|
|
4474 |
|
|
4475 |
size_t numDirtyCards, cumNumDirtyCards;
|
|
4476 |
HeapWord *nextAddr, *lastAddr;
|
|
4477 |
for (cumNumDirtyCards = numDirtyCards = 0,
|
|
4478 |
nextAddr = lastAddr = startAddr;
|
|
4479 |
nextAddr < endAddr;
|
|
4480 |
nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
|
|
4481 |
|
|
4482 |
ResourceMark rm;
|
|
4483 |
HandleMark hm;
|
|
4484 |
|
|
4485 |
MemRegion dirtyRegion;
|
|
4486 |
{
|
|
4487 |
stopTimer();
|
|
4488 |
CMSTokenSync ts(true);
|
|
4489 |
startTimer();
|
|
4490 |
sample_eden();
|
|
4491 |
// Get dirty region starting at nextOffset (inclusive),
|
|
4492 |
// simultaneously clearing it.
|
|
4493 |
dirtyRegion =
|
|
4494 |
_modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
|
|
4495 |
assert(dirtyRegion.start() >= nextAddr,
|
|
4496 |
"returned region inconsistent?");
|
|
4497 |
}
|
|
4498 |
// Remember where the next search should begin.
|
|
4499 |
// The returned region (if non-empty) is a right open interval,
|
|
4500 |
// so lastOffset is obtained from the right end of that
|
|
4501 |
// interval.
|
|
4502 |
lastAddr = dirtyRegion.end();
|
|
4503 |
// Should do something more transparent and less hacky XXX
|
|
4504 |
numDirtyCards =
|
|
4505 |
_modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
|
|
4506 |
|
|
4507 |
// We'll scan the cards in the dirty region (with periodic
|
|
4508 |
// yields for foreground GC as needed).
|
|
4509 |
if (!dirtyRegion.is_empty()) {
|
|
4510 |
assert(numDirtyCards > 0, "consistency check");
|
|
4511 |
HeapWord* stop_point = NULL;
|
|
4512 |
{
|
|
4513 |
stopTimer();
|
|
4514 |
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
|
|
4515 |
bitMapLock());
|
|
4516 |
startTimer();
|
|
4517 |
verify_work_stacks_empty();
|
|
4518 |
verify_overflow_empty();
|
|
4519 |
sample_eden();
|
|
4520 |
stop_point =
|
|
4521 |
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
|
4522 |
}
|
|
4523 |
if (stop_point != NULL) {
|
|
4524 |
// The careful iteration stopped early either because it found an
|
|
4525 |
// uninitialized object, or because we were in the midst of an
|
|
4526 |
// "abortable preclean", which should now be aborted. Redirty
|
|
4527 |
// the bits corresponding to the partially-scanned or unscanned
|
|
4528 |
// cards. We'll either restart at the next block boundary or
|
|
4529 |
// abort the preclean.
|
|
4530 |
assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
|
|
4531 |
(_collectorState == AbortablePreclean && should_abort_preclean()),
|
|
4532 |
"Unparsable objects should only be in perm gen.");
|
|
4533 |
|
|
4534 |
stopTimer();
|
|
4535 |
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
|
4536 |
startTimer();
|
|
4537 |
_modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
|
|
4538 |
if (should_abort_preclean()) {
|
|
4539 |
break; // out of preclean loop
|
|
4540 |
} else {
|
|
4541 |
// Compute the next address at which preclean should pick up;
|
|
4542 |
// might need bitMapLock in order to read P-bits.
|
|
4543 |
lastAddr = next_card_start_after_block(stop_point);
|
|
4544 |
}
|
|
4545 |
}
|
|
4546 |
} else {
|
|
4547 |
assert(lastAddr == endAddr, "consistency check");
|
|
4548 |
assert(numDirtyCards == 0, "consistency check");
|
|
4549 |
break;
|
|
4550 |
}
|
|
4551 |
}
|
|
4552 |
verify_work_stacks_empty();
|
|
4553 |
verify_overflow_empty();
|
|
4554 |
return cumNumDirtyCards;
|
|
4555 |
}
|
|
4556 |
|
|
4557 |
// NOTE: preclean_mod_union_table() above and preclean_card_table()
|
|
4558 |
// below are largely identical; if you need to modify
|
|
4559 |
// one of these methods, please check the other method too.
|
|
4560 |
|
|
4561 |
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
|
4562 |
ScanMarkedObjectsAgainCarefullyClosure* cl) {
|
|
4563 |
// strategy: it's similar to precleamModUnionTable above, in that
|
|
4564 |
// we accumulate contiguous ranges of dirty cards, mark these cards
|
|
4565 |
// precleaned, then scan the region covered by these cards.
|
|
4566 |
HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
|
|
4567 |
HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
|
|
4568 |
|
|
4569 |
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
|
|
4570 |
|
|
4571 |
size_t numDirtyCards, cumNumDirtyCards;
|
|
4572 |
HeapWord *lastAddr, *nextAddr;
|
|
4573 |
|
|
4574 |
for (cumNumDirtyCards = numDirtyCards = 0,
|
|
4575 |
nextAddr = lastAddr = startAddr;
|
|
4576 |
nextAddr < endAddr;
|
|
4577 |
nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
|
|
4578 |
|
|
4579 |
ResourceMark rm;
|
|
4580 |
HandleMark hm;
|
|
4581 |
|
|
4582 |
MemRegion dirtyRegion;
|
|
4583 |
{
|
|
4584 |
// See comments in "Precleaning notes" above on why we
|
|
4585 |
// do this locking. XXX Could the locking overheads be
|
|
4586 |
// too high when dirty cards are sparse? [I don't think so.]
|
|
4587 |
stopTimer();
|
|
4588 |
CMSTokenSync x(true); // is cms thread
|
|
4589 |
startTimer();
|
|
4590 |
sample_eden();
|
|
4591 |
// Get and clear dirty region from card table
|
|
4592 |
dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
|
|
4593 |
MemRegion(nextAddr, endAddr));
|
|
4594 |
assert(dirtyRegion.start() >= nextAddr,
|
|
4595 |
"returned region inconsistent?");
|
|
4596 |
}
|
|
4597 |
lastAddr = dirtyRegion.end();
|
|
4598 |
numDirtyCards =
|
|
4599 |
dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
|
|
4600 |
|
|
4601 |
if (!dirtyRegion.is_empty()) {
|
|
4602 |
stopTimer();
|
|
4603 |
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
|
|
4604 |
startTimer();
|
|
4605 |
sample_eden();
|
|
4606 |
verify_work_stacks_empty();
|
|
4607 |
verify_overflow_empty();
|
|
4608 |
HeapWord* stop_point =
|
|
4609 |
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
|
4610 |
if (stop_point != NULL) {
|
|
4611 |
// The careful iteration stopped early because it found an
|
|
4612 |
// uninitialized object. Redirty the bits corresponding to the
|
|
4613 |
// partially-scanned or unscanned cards, and start again at the
|
|
4614 |
// next block boundary.
|
|
4615 |
assert(CMSPermGenPrecleaningEnabled ||
|
|
4616 |
(_collectorState == AbortablePreclean && should_abort_preclean()),
|
|
4617 |
"Unparsable objects should only be in perm gen.");
|
|
4618 |
_ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
|
|
4619 |
if (should_abort_preclean()) {
|
|
4620 |
break; // out of preclean loop
|
|
4621 |
} else {
|
|
4622 |
// Compute the next address at which preclean should pick up.
|
|
4623 |
lastAddr = next_card_start_after_block(stop_point);
|
|
4624 |
}
|
|
4625 |
}
|
|
4626 |
} else {
|
|
4627 |
break;
|
|
4628 |
}
|
|
4629 |
}
|
|
4630 |
verify_work_stacks_empty();
|
|
4631 |
verify_overflow_empty();
|
|
4632 |
return cumNumDirtyCards;
|
|
4633 |
}
|
|
4634 |
|
|
4635 |
void CMSCollector::checkpointRootsFinal(bool asynch,
|
|
4636 |
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
|
|
4637 |
assert(_collectorState == FinalMarking, "incorrect state transition?");
|
|
4638 |
check_correct_thread_executing();
|
|
4639 |
// world is stopped at this checkpoint
|
|
4640 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
4641 |
"world should be stopped");
|
|
4642 |
verify_work_stacks_empty();
|
|
4643 |
verify_overflow_empty();
|
|
4644 |
|
|
4645 |
SpecializationStats::clear();
|
|
4646 |
if (PrintGCDetails) {
|
|
4647 |
gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
|
|
4648 |
_young_gen->used() / K,
|
|
4649 |
_young_gen->capacity() / K);
|
|
4650 |
}
|
|
4651 |
if (asynch) {
|
|
4652 |
if (CMSScavengeBeforeRemark) {
|
|
4653 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
4654 |
// Temporarily set flag to false, GCH->do_collection will
|
|
4655 |
// expect it to be false and set to true
|
|
4656 |
FlagSetting fl(gch->_is_gc_active, false);
|
|
4657 |
NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
|
|
4658 |
PrintGCDetails && Verbose, true, gclog_or_tty);)
|
|
4659 |
int level = _cmsGen->level() - 1;
|
|
4660 |
if (level >= 0) {
|
|
4661 |
gch->do_collection(true, // full (i.e. force, see below)
|
|
4662 |
false, // !clear_all_soft_refs
|
|
4663 |
0, // size
|
|
4664 |
false, // is_tlab
|
|
4665 |
level // max_level
|
|
4666 |
);
|
|
4667 |
}
|
|
4668 |
}
|
|
4669 |
FreelistLocker x(this);
|
|
4670 |
MutexLockerEx y(bitMapLock(),
|
|
4671 |
Mutex::_no_safepoint_check_flag);
|
|
4672 |
assert(!init_mark_was_synchronous, "but that's impossible!");
|
|
4673 |
checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
|
|
4674 |
} else {
|
|
4675 |
// already have all the locks
|
|
4676 |
checkpointRootsFinalWork(asynch, clear_all_soft_refs,
|
|
4677 |
init_mark_was_synchronous);
|
|
4678 |
}
|
|
4679 |
verify_work_stacks_empty();
|
|
4680 |
verify_overflow_empty();
|
|
4681 |
SpecializationStats::print();
|
|
4682 |
}
|
|
4683 |
|
|
4684 |
void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
|
4685 |
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
|
|
4686 |
|
|
4687 |
NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
|
|
4688 |
|
|
4689 |
assert(haveFreelistLocks(), "must have free list locks");
|
|
4690 |
assert_lock_strong(bitMapLock());
|
|
4691 |
|
|
4692 |
if (UseAdaptiveSizePolicy) {
|
|
4693 |
size_policy()->checkpoint_roots_final_begin();
|
|
4694 |
}
|
|
4695 |
|
|
4696 |
ResourceMark rm;
|
|
4697 |
HandleMark hm;
|
|
4698 |
|
|
4699 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
4700 |
|
|
4701 |
if (cms_should_unload_classes()) {
|
|
4702 |
CodeCache::gc_prologue();
|
|
4703 |
}
|
|
4704 |
assert(haveFreelistLocks(), "must have free list locks");
|
|
4705 |
assert_lock_strong(bitMapLock());
|
|
4706 |
|
|
4707 |
if (!init_mark_was_synchronous) {
|
|
4708 |
// We might assume that we need not fill TLAB's when
|
|
4709 |
// CMSScavengeBeforeRemark is set, because we may have just done
|
|
4710 |
// a scavenge which would have filled all TLAB's -- and besides
|
|
4711 |
// Eden would be empty. This however may not always be the case --
|
|
4712 |
// for instance although we asked for a scavenge, it may not have
|
|
4713 |
// happened because of a JNI critical section. We probably need
|
|
4714 |
// a policy for deciding whether we can in that case wait until
|
|
4715 |
// the critical section releases and then do the remark following
|
|
4716 |
// the scavenge, and skip it here. In the absence of that policy,
|
|
4717 |
// or of an indication of whether the scavenge did indeed occur,
|
|
4718 |
// we cannot rely on TLAB's having been filled and must do
|
|
4719 |
// so here just in case a scavenge did not happen.
|
|
4720 |
gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
|
|
4721 |
// Update the saved marks which may affect the root scans.
|
|
4722 |
gch->save_marks();
|
|
4723 |
|
|
4724 |
{
|
|
4725 |
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
|
|
4726 |
|
|
4727 |
// Note on the role of the mod union table:
|
|
4728 |
// Since the marker in "markFromRoots" marks concurrently with
|
|
4729 |
// mutators, it is possible for some reachable objects not to have been
|
|
4730 |
// scanned. For instance, an only reference to an object A was
|
|
4731 |
// placed in object B after the marker scanned B. Unless B is rescanned,
|
|
4732 |
// A would be collected. Such updates to references in marked objects
|
|
4733 |
// are detected via the mod union table which is the set of all cards
|
|
4734 |
// dirtied since the first checkpoint in this GC cycle and prior to
|
|
4735 |
// the most recent young generation GC, minus those cleaned up by the
|
|
4736 |
// concurrent precleaning.
|
|
4737 |
if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
|
|
4738 |
TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
|
|
4739 |
do_remark_parallel();
|
|
4740 |
} else {
|
|
4741 |
TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
|
|
4742 |
gclog_or_tty);
|
|
4743 |
do_remark_non_parallel();
|
|
4744 |
}
|
|
4745 |
}
|
|
4746 |
} else {
|
|
4747 |
assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
|
|
4748 |
// The initial mark was stop-world, so there's no rescanning to
|
|
4749 |
// do; go straight on to the next step below.
|
|
4750 |
}
|
|
4751 |
verify_work_stacks_empty();
|
|
4752 |
verify_overflow_empty();
|
|
4753 |
|
|
4754 |
{
|
|
4755 |
NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
|
|
4756 |
refProcessingWork(asynch, clear_all_soft_refs);
|
|
4757 |
}
|
|
4758 |
verify_work_stacks_empty();
|
|
4759 |
verify_overflow_empty();
|
|
4760 |
|
|
4761 |
if (cms_should_unload_classes()) {
|
|
4762 |
CodeCache::gc_epilogue();
|
|
4763 |
}
|
|
4764 |
|
|
4765 |
// If we encountered any (marking stack / work queue) overflow
|
|
4766 |
// events during the current CMS cycle, take appropriate
|
|
4767 |
// remedial measures, where possible, so as to try and avoid
|
|
4768 |
// recurrence of that condition.
|
|
4769 |
assert(_markStack.isEmpty(), "No grey objects");
|
|
4770 |
size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
|
|
4771 |
_ser_kac_ovflw;
|
|
4772 |
if (ser_ovflw > 0) {
|
|
4773 |
if (PrintCMSStatistics != 0) {
|
|
4774 |
gclog_or_tty->print_cr("Marking stack overflow (benign) "
|
|
4775 |
"(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
|
|
4776 |
_ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
|
|
4777 |
_ser_kac_ovflw);
|
|
4778 |
}
|
|
4779 |
_markStack.expand();
|
|
4780 |
_ser_pmc_remark_ovflw = 0;
|
|
4781 |
_ser_pmc_preclean_ovflw = 0;
|
|
4782 |
_ser_kac_ovflw = 0;
|
|
4783 |
}
|
|
4784 |
if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
|
|
4785 |
if (PrintCMSStatistics != 0) {
|
|
4786 |
gclog_or_tty->print_cr("Work queue overflow (benign) "
|
|
4787 |
"(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
|
|
4788 |
_par_pmc_remark_ovflw, _par_kac_ovflw);
|
|
4789 |
}
|
|
4790 |
_par_pmc_remark_ovflw = 0;
|
|
4791 |
_par_kac_ovflw = 0;
|
|
4792 |
}
|
|
4793 |
if (PrintCMSStatistics != 0) {
|
|
4794 |
if (_markStack._hit_limit > 0) {
|
|
4795 |
gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
|
|
4796 |
_markStack._hit_limit);
|
|
4797 |
}
|
|
4798 |
if (_markStack._failed_double > 0) {
|
|
4799 |
gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
|
|
4800 |
" current capacity "SIZE_FORMAT,
|
|
4801 |
_markStack._failed_double,
|
|
4802 |
_markStack.capacity());
|
|
4803 |
}
|
|
4804 |
}
|
|
4805 |
_markStack._hit_limit = 0;
|
|
4806 |
_markStack._failed_double = 0;
|
|
4807 |
|
|
4808 |
if ((VerifyAfterGC || VerifyDuringGC) &&
|
|
4809 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
4810 |
verify_after_remark();
|
|
4811 |
}
|
|
4812 |
|
|
4813 |
// Change under the freelistLocks.
|
|
4814 |
_collectorState = Sweeping;
|
|
4815 |
// Call isAllClear() under bitMapLock
|
|
4816 |
assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
|
|
4817 |
" final marking");
|
|
4818 |
if (UseAdaptiveSizePolicy) {
|
|
4819 |
size_policy()->checkpoint_roots_final_end(gch->gc_cause());
|
|
4820 |
}
|
|
4821 |
}
|
|
4822 |
|
|
4823 |
// Parallel remark task
|
|
4824 |
class CMSParRemarkTask: public AbstractGangTask {
|
|
4825 |
CMSCollector* _collector;
|
|
4826 |
WorkGang* _workers;
|
|
4827 |
int _n_workers;
|
|
4828 |
CompactibleFreeListSpace* _cms_space;
|
|
4829 |
CompactibleFreeListSpace* _perm_space;
|
|
4830 |
|
|
4831 |
// The per-thread work queues, available here for stealing.
|
|
4832 |
OopTaskQueueSet* _task_queues;
|
|
4833 |
ParallelTaskTerminator _term;
|
|
4834 |
|
|
4835 |
public:
|
|
4836 |
CMSParRemarkTask(CMSCollector* collector,
|
|
4837 |
CompactibleFreeListSpace* cms_space,
|
|
4838 |
CompactibleFreeListSpace* perm_space,
|
|
4839 |
int n_workers, WorkGang* workers,
|
|
4840 |
OopTaskQueueSet* task_queues):
|
|
4841 |
AbstractGangTask("Rescan roots and grey objects in parallel"),
|
|
4842 |
_collector(collector),
|
|
4843 |
_cms_space(cms_space), _perm_space(perm_space),
|
|
4844 |
_n_workers(n_workers),
|
|
4845 |
_workers(workers),
|
|
4846 |
_task_queues(task_queues),
|
|
4847 |
_term(workers->total_workers(), task_queues) { }
|
|
4848 |
|
|
4849 |
OopTaskQueueSet* task_queues() { return _task_queues; }
|
|
4850 |
|
|
4851 |
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
|
4852 |
|
|
4853 |
ParallelTaskTerminator* terminator() { return &_term; }
|
|
4854 |
|
|
4855 |
void work(int i);
|
|
4856 |
|
|
4857 |
private:
|
|
4858 |
// Work method in support of parallel rescan ... of young gen spaces
|
|
4859 |
void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
|
|
4860 |
ContiguousSpace* space,
|
|
4861 |
HeapWord** chunk_array, size_t chunk_top);
|
|
4862 |
|
|
4863 |
// ... of dirty cards in old space
|
|
4864 |
void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
|
|
4865 |
Par_MarkRefsIntoAndScanClosure* cl);
|
|
4866 |
|
|
4867 |
// ... work stealing for the above
|
|
4868 |
void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
|
|
4869 |
};
|
|
4870 |
|
|
4871 |
void CMSParRemarkTask::work(int i) {
|
|
4872 |
elapsedTimer _timer;
|
|
4873 |
ResourceMark rm;
|
|
4874 |
HandleMark hm;
|
|
4875 |
|
|
4876 |
// ---------- rescan from roots --------------
|
|
4877 |
_timer.start();
|
|
4878 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
4879 |
Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
|
|
4880 |
_collector->_span, _collector->ref_processor(),
|
|
4881 |
&(_collector->_markBitMap),
|
|
4882 |
work_queue(i), &(_collector->_revisitStack));
|
|
4883 |
|
|
4884 |
// Rescan young gen roots first since these are likely
|
|
4885 |
// coarsely partitioned and may, on that account, constitute
|
|
4886 |
// the critical path; thus, it's best to start off that
|
|
4887 |
// work first.
|
|
4888 |
// ---------- young gen roots --------------
|
|
4889 |
{
|
|
4890 |
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
|
|
4891 |
EdenSpace* eden_space = dng->eden();
|
|
4892 |
ContiguousSpace* from_space = dng->from();
|
|
4893 |
ContiguousSpace* to_space = dng->to();
|
|
4894 |
|
|
4895 |
HeapWord** eca = _collector->_eden_chunk_array;
|
|
4896 |
size_t ect = _collector->_eden_chunk_index;
|
|
4897 |
HeapWord** sca = _collector->_survivor_chunk_array;
|
|
4898 |
size_t sct = _collector->_survivor_chunk_index;
|
|
4899 |
|
|
4900 |
assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
|
|
4901 |
assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
|
|
4902 |
|
|
4903 |
do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
|
|
4904 |
do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
|
|
4905 |
do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
|
|
4906 |
|
|
4907 |
_timer.stop();
|
|
4908 |
if (PrintCMSStatistics != 0) {
|
|
4909 |
gclog_or_tty->print_cr(
|
|
4910 |
"Finished young gen rescan work in %dth thread: %3.3f sec",
|
|
4911 |
i, _timer.seconds());
|
|
4912 |
}
|
|
4913 |
}
|
|
4914 |
|
|
4915 |
// ---------- remaining roots --------------
|
|
4916 |
_timer.reset();
|
|
4917 |
_timer.start();
|
|
4918 |
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
|
|
4919 |
false, // yg was scanned above
|
|
4920 |
true, // collecting perm gen
|
|
4921 |
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
|
4922 |
NULL, &par_mrias_cl);
|
|
4923 |
_timer.stop();
|
|
4924 |
if (PrintCMSStatistics != 0) {
|
|
4925 |
gclog_or_tty->print_cr(
|
|
4926 |
"Finished remaining root rescan work in %dth thread: %3.3f sec",
|
|
4927 |
i, _timer.seconds());
|
|
4928 |
}
|
|
4929 |
|
|
4930 |
// ---------- rescan dirty cards ------------
|
|
4931 |
_timer.reset();
|
|
4932 |
_timer.start();
|
|
4933 |
|
|
4934 |
// Do the rescan tasks for each of the two spaces
|
|
4935 |
// (cms_space and perm_space) in turn.
|
|
4936 |
do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
|
|
4937 |
do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
|
|
4938 |
_timer.stop();
|
|
4939 |
if (PrintCMSStatistics != 0) {
|
|
4940 |
gclog_or_tty->print_cr(
|
|
4941 |
"Finished dirty card rescan work in %dth thread: %3.3f sec",
|
|
4942 |
i, _timer.seconds());
|
|
4943 |
}
|
|
4944 |
|
|
4945 |
// ---------- steal work from other threads ...
|
|
4946 |
// ---------- ... and drain overflow list.
|
|
4947 |
_timer.reset();
|
|
4948 |
_timer.start();
|
|
4949 |
do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
|
|
4950 |
_timer.stop();
|
|
4951 |
if (PrintCMSStatistics != 0) {
|
|
4952 |
gclog_or_tty->print_cr(
|
|
4953 |
"Finished work stealing in %dth thread: %3.3f sec",
|
|
4954 |
i, _timer.seconds());
|
|
4955 |
}
|
|
4956 |
}
|
|
4957 |
|
|
4958 |
void
|
|
4959 |
CMSParRemarkTask::do_young_space_rescan(int i,
|
|
4960 |
Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
|
|
4961 |
HeapWord** chunk_array, size_t chunk_top) {
|
|
4962 |
// Until all tasks completed:
|
|
4963 |
// . claim an unclaimed task
|
|
4964 |
// . compute region boundaries corresponding to task claimed
|
|
4965 |
// using chunk_array
|
|
4966 |
// . par_oop_iterate(cl) over that region
|
|
4967 |
|
|
4968 |
ResourceMark rm;
|
|
4969 |
HandleMark hm;
|
|
4970 |
|
|
4971 |
SequentialSubTasksDone* pst = space->par_seq_tasks();
|
|
4972 |
assert(pst->valid(), "Uninitialized use?");
|
|
4973 |
|
|
4974 |
int nth_task = 0;
|
|
4975 |
int n_tasks = pst->n_tasks();
|
|
4976 |
|
|
4977 |
HeapWord *start, *end;
|
|
4978 |
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
|
4979 |
// We claimed task # nth_task; compute its boundaries.
|
|
4980 |
if (chunk_top == 0) { // no samples were taken
|
|
4981 |
assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
|
|
4982 |
start = space->bottom();
|
|
4983 |
end = space->top();
|
|
4984 |
} else if (nth_task == 0) {
|
|
4985 |
start = space->bottom();
|
|
4986 |
end = chunk_array[nth_task];
|
|
4987 |
} else if (nth_task < (jint)chunk_top) {
|
|
4988 |
assert(nth_task >= 1, "Control point invariant");
|
|
4989 |
start = chunk_array[nth_task - 1];
|
|
4990 |
end = chunk_array[nth_task];
|
|
4991 |
} else {
|
|
4992 |
assert(nth_task == (jint)chunk_top, "Control point invariant");
|
|
4993 |
start = chunk_array[chunk_top - 1];
|
|
4994 |
end = space->top();
|
|
4995 |
}
|
|
4996 |
MemRegion mr(start, end);
|
|
4997 |
// Verify that mr is in space
|
|
4998 |
assert(mr.is_empty() || space->used_region().contains(mr),
|
|
4999 |
"Should be in space");
|
|
5000 |
// Verify that "start" is an object boundary
|
|
5001 |
assert(mr.is_empty() || oop(mr.start())->is_oop(),
|
|
5002 |
"Should be an oop");
|
|
5003 |
space->par_oop_iterate(mr, cl);
|
|
5004 |
}
|
|
5005 |
pst->all_tasks_completed();
|
|
5006 |
}
|
|
5007 |
|
|
5008 |
void
|
|
5009 |
CMSParRemarkTask::do_dirty_card_rescan_tasks(
|
|
5010 |
CompactibleFreeListSpace* sp, int i,
|
|
5011 |
Par_MarkRefsIntoAndScanClosure* cl) {
|
|
5012 |
// Until all tasks completed:
|
|
5013 |
// . claim an unclaimed task
|
|
5014 |
// . compute region boundaries corresponding to task claimed
|
|
5015 |
// . transfer dirty bits ct->mut for that region
|
|
5016 |
// . apply rescanclosure to dirty mut bits for that region
|
|
5017 |
|
|
5018 |
ResourceMark rm;
|
|
5019 |
HandleMark hm;
|
|
5020 |
|
|
5021 |
OopTaskQueue* work_q = work_queue(i);
|
|
5022 |
ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
|
|
5023 |
// CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
|
|
5024 |
// CAUTION: This closure has state that persists across calls to
|
|
5025 |
// the work method dirty_range_iterate_clear() in that it has
|
|
5026 |
// imbedded in it a (subtype of) UpwardsObjectClosure. The
|
|
5027 |
// use of that state in the imbedded UpwardsObjectClosure instance
|
|
5028 |
// assumes that the cards are always iterated (even if in parallel
|
|
5029 |
// by several threads) in monotonically increasing order per each
|
|
5030 |
// thread. This is true of the implementation below which picks
|
|
5031 |
// card ranges (chunks) in monotonically increasing order globally
|
|
5032 |
// and, a-fortiori, in monotonically increasing order per thread
|
|
5033 |
// (the latter order being a subsequence of the former).
|
|
5034 |
// If the work code below is ever reorganized into a more chaotic
|
|
5035 |
// work-partitioning form than the current "sequential tasks"
|
|
5036 |
// paradigm, the use of that persistent state will have to be
|
|
5037 |
// revisited and modified appropriately. See also related
|
|
5038 |
// bug 4756801 work on which should examine this code to make
|
|
5039 |
// sure that the changes there do not run counter to the
|
|
5040 |
// assumptions made here and necessary for correctness and
|
|
5041 |
// efficiency. Note also that this code might yield inefficient
|
|
5042 |
// behaviour in the case of very large objects that span one or
|
|
5043 |
// more work chunks. Such objects would potentially be scanned
|
|
5044 |
// several times redundantly. Work on 4756801 should try and
|
|
5045 |
// address that performance anomaly if at all possible. XXX
|
|
5046 |
MemRegion full_span = _collector->_span;
|
|
5047 |
CMSBitMap* bm = &(_collector->_markBitMap); // shared
|
|
5048 |
CMSMarkStack* rs = &(_collector->_revisitStack); // shared
|
|
5049 |
MarkFromDirtyCardsClosure
|
|
5050 |
greyRescanClosure(_collector, full_span, // entire span of interest
|
|
5051 |
sp, bm, work_q, rs, cl);
|
|
5052 |
|
|
5053 |
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
|
|
5054 |
assert(pst->valid(), "Uninitialized use?");
|
|
5055 |
int nth_task = 0;
|
|
5056 |
const int alignment = CardTableModRefBS::card_size * BitsPerWord;
|
|
5057 |
MemRegion span = sp->used_region();
|
|
5058 |
HeapWord* start_addr = span.start();
|
|
5059 |
HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
|
|
5060 |
alignment);
|
|
5061 |
const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
|
|
5062 |
assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
|
|
5063 |
start_addr, "Check alignment");
|
|
5064 |
assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
|
|
5065 |
chunk_size, "Check alignment");
|
|
5066 |
|
|
5067 |
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
|
5068 |
// Having claimed the nth_task, compute corresponding mem-region,
|
|
5069 |
// which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
|
|
5070 |
// The alignment restriction ensures that we do not need any
|
|
5071 |
// synchronization with other gang-workers while setting or
|
|
5072 |
// clearing bits in thus chunk of the MUT.
|
|
5073 |
MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
|
|
5074 |
start_addr + (nth_task+1)*chunk_size);
|
|
5075 |
// The last chunk's end might be way beyond end of the
|
|
5076 |
// used region. In that case pull back appropriately.
|
|
5077 |
if (this_span.end() > end_addr) {
|
|
5078 |
this_span.set_end(end_addr);
|
|
5079 |
assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
|
|
5080 |
}
|
|
5081 |
// Iterate over the dirty cards covering this chunk, marking them
|
|
5082 |
// precleaned, and setting the corresponding bits in the mod union
|
|
5083 |
// table. Since we have been careful to partition at Card and MUT-word
|
|
5084 |
// boundaries no synchronization is needed between parallel threads.
|
|
5085 |
_collector->_ct->ct_bs()->dirty_card_iterate(this_span,
|
|
5086 |
&modUnionClosure);
|
|
5087 |
|
|
5088 |
// Having transferred these marks into the modUnionTable,
|
|
5089 |
// rescan the marked objects on the dirty cards in the modUnionTable.
|
|
5090 |
// Even if this is at a synchronous collection, the initial marking
|
|
5091 |
// may have been done during an asynchronous collection so there
|
|
5092 |
// may be dirty bits in the mod-union table.
|
|
5093 |
_collector->_modUnionTable.dirty_range_iterate_clear(
|
|
5094 |
this_span, &greyRescanClosure);
|
|
5095 |
_collector->_modUnionTable.verifyNoOneBitsInRange(
|
|
5096 |
this_span.start(),
|
|
5097 |
this_span.end());
|
|
5098 |
}
|
|
5099 |
pst->all_tasks_completed(); // declare that i am done
|
|
5100 |
}
|
|
5101 |
|
|
5102 |
// . see if we can share work_queues with ParNew? XXX
|
|
5103 |
void
|
|
5104 |
CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
|
|
5105 |
int* seed) {
|
|
5106 |
OopTaskQueue* work_q = work_queue(i);
|
|
5107 |
NOT_PRODUCT(int num_steals = 0;)
|
|
5108 |
oop obj_to_scan;
|
|
5109 |
CMSBitMap* bm = &(_collector->_markBitMap);
|
|
5110 |
size_t num_from_overflow_list =
|
|
5111 |
MIN2((size_t)work_q->max_elems()/4,
|
|
5112 |
(size_t)ParGCDesiredObjsFromOverflowList);
|
|
5113 |
|
|
5114 |
while (true) {
|
|
5115 |
// Completely finish any left over work from (an) earlier round(s)
|
|
5116 |
cl->trim_queue(0);
|
|
5117 |
// Now check if there's any work in the overflow list
|
|
5118 |
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
|
5119 |
work_q)) {
|
|
5120 |
// found something in global overflow list;
|
|
5121 |
// not yet ready to go stealing work from others.
|
|
5122 |
// We'd like to assert(work_q->size() != 0, ...)
|
|
5123 |
// because we just took work from the overflow list,
|
|
5124 |
// but of course we can't since all of that could have
|
|
5125 |
// been already stolen from us.
|
|
5126 |
// "He giveth and He taketh away."
|
|
5127 |
continue;
|
|
5128 |
}
|
|
5129 |
// Verify that we have no work before we resort to stealing
|
|
5130 |
assert(work_q->size() == 0, "Have work, shouldn't steal");
|
|
5131 |
// Try to steal from other queues that have work
|
|
5132 |
if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
|
|
5133 |
NOT_PRODUCT(num_steals++;)
|
|
5134 |
assert(obj_to_scan->is_oop(), "Oops, not an oop!");
|
|
5135 |
assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
|
|
5136 |
// Do scanning work
|
|
5137 |
obj_to_scan->oop_iterate(cl);
|
|
5138 |
// Loop around, finish this work, and try to steal some more
|
|
5139 |
} else if (terminator()->offer_termination()) {
|
|
5140 |
break; // nirvana from the infinite cycle
|
|
5141 |
}
|
|
5142 |
}
|
|
5143 |
NOT_PRODUCT(
|
|
5144 |
if (PrintCMSStatistics != 0) {
|
|
5145 |
gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
|
|
5146 |
}
|
|
5147 |
)
|
|
5148 |
assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
|
|
5149 |
"Else our work is not yet done");
|
|
5150 |
}
|
|
5151 |
|
|
5152 |
// Return a thread-local PLAB recording array, as appropriate.
|
|
5153 |
void* CMSCollector::get_data_recorder(int thr_num) {
|
|
5154 |
if (_survivor_plab_array != NULL &&
|
|
5155 |
(CMSPLABRecordAlways ||
|
|
5156 |
(_collectorState > Marking && _collectorState < FinalMarking))) {
|
|
5157 |
assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
|
|
5158 |
ChunkArray* ca = &_survivor_plab_array[thr_num];
|
|
5159 |
ca->reset(); // clear it so that fresh data is recorded
|
|
5160 |
return (void*) ca;
|
|
5161 |
} else {
|
|
5162 |
return NULL;
|
|
5163 |
}
|
|
5164 |
}
|
|
5165 |
|
|
5166 |
// Reset all the thread-local PLAB recording arrays
|
|
5167 |
void CMSCollector::reset_survivor_plab_arrays() {
|
|
5168 |
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
5169 |
_survivor_plab_array[i].reset();
|
|
5170 |
}
|
|
5171 |
}
|
|
5172 |
|
|
5173 |
// Merge the per-thread plab arrays into the global survivor chunk
|
|
5174 |
// array which will provide the partitioning of the survivor space
|
|
5175 |
// for CMS rescan.
|
|
5176 |
void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
|
|
5177 |
assert(_survivor_plab_array != NULL, "Error");
|
|
5178 |
assert(_survivor_chunk_array != NULL, "Error");
|
|
5179 |
assert(_collectorState == FinalMarking, "Error");
|
|
5180 |
for (uint j = 0; j < ParallelGCThreads; j++) {
|
|
5181 |
_cursor[j] = 0;
|
|
5182 |
}
|
|
5183 |
HeapWord* top = surv->top();
|
|
5184 |
size_t i;
|
|
5185 |
for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
|
|
5186 |
HeapWord* min_val = top; // Higher than any PLAB address
|
|
5187 |
uint min_tid = 0; // position of min_val this round
|
|
5188 |
for (uint j = 0; j < ParallelGCThreads; j++) {
|
|
5189 |
ChunkArray* cur_sca = &_survivor_plab_array[j];
|
|
5190 |
if (_cursor[j] == cur_sca->end()) {
|
|
5191 |
continue;
|
|
5192 |
}
|
|
5193 |
assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
|
|
5194 |
HeapWord* cur_val = cur_sca->nth(_cursor[j]);
|
|
5195 |
assert(surv->used_region().contains(cur_val), "Out of bounds value");
|
|
5196 |
if (cur_val < min_val) {
|
|
5197 |
min_tid = j;
|
|
5198 |
min_val = cur_val;
|
|
5199 |
} else {
|
|
5200 |
assert(cur_val < top, "All recorded addresses should be less");
|
|
5201 |
}
|
|
5202 |
}
|
|
5203 |
// At this point min_val and min_tid are respectively
|
|
5204 |
// the least address in _survivor_plab_array[j]->nth(_cursor[j])
|
|
5205 |
// and the thread (j) that witnesses that address.
|
|
5206 |
// We record this address in the _survivor_chunk_array[i]
|
|
5207 |
// and increment _cursor[min_tid] prior to the next round i.
|
|
5208 |
if (min_val == top) {
|
|
5209 |
break;
|
|
5210 |
}
|
|
5211 |
_survivor_chunk_array[i] = min_val;
|
|
5212 |
_cursor[min_tid]++;
|
|
5213 |
}
|
|
5214 |
// We are all done; record the size of the _survivor_chunk_array
|
|
5215 |
_survivor_chunk_index = i; // exclusive: [0, i)
|
|
5216 |
if (PrintCMSStatistics > 0) {
|
|
5217 |
gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
|
|
5218 |
}
|
|
5219 |
// Verify that we used up all the recorded entries
|
|
5220 |
#ifdef ASSERT
|
|
5221 |
size_t total = 0;
|
|
5222 |
for (uint j = 0; j < ParallelGCThreads; j++) {
|
|
5223 |
assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
|
|
5224 |
total += _cursor[j];
|
|
5225 |
}
|
|
5226 |
assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
|
|
5227 |
// Check that the merged array is in sorted order
|
|
5228 |
if (total > 0) {
|
|
5229 |
for (size_t i = 0; i < total - 1; i++) {
|
|
5230 |
if (PrintCMSStatistics > 0) {
|
|
5231 |
gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
|
|
5232 |
i, _survivor_chunk_array[i]);
|
|
5233 |
}
|
|
5234 |
assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
|
|
5235 |
"Not sorted");
|
|
5236 |
}
|
|
5237 |
}
|
|
5238 |
#endif // ASSERT
|
|
5239 |
}
|
|
5240 |
|
|
5241 |
// Set up the space's par_seq_tasks structure for work claiming
|
|
5242 |
// for parallel rescan of young gen.
|
|
5243 |
// See ParRescanTask where this is currently used.
|
|
5244 |
void
|
|
5245 |
CMSCollector::
|
|
5246 |
initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
|
5247 |
assert(n_threads > 0, "Unexpected n_threads argument");
|
|
5248 |
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
|
5249 |
|
|
5250 |
// Eden space
|
|
5251 |
{
|
|
5252 |
SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
|
|
5253 |
assert(!pst->valid(), "Clobbering existing data?");
|
|
5254 |
// Each valid entry in [0, _eden_chunk_index) represents a task.
|
|
5255 |
size_t n_tasks = _eden_chunk_index + 1;
|
|
5256 |
assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
|
|
5257 |
pst->set_par_threads(n_threads);
|
|
5258 |
pst->set_n_tasks((int)n_tasks);
|
|
5259 |
}
|
|
5260 |
|
|
5261 |
// Merge the survivor plab arrays into _survivor_chunk_array
|
|
5262 |
if (_survivor_plab_array != NULL) {
|
|
5263 |
merge_survivor_plab_arrays(dng->from());
|
|
5264 |
} else {
|
|
5265 |
assert(_survivor_chunk_index == 0, "Error");
|
|
5266 |
}
|
|
5267 |
|
|
5268 |
// To space
|
|
5269 |
{
|
|
5270 |
SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
|
|
5271 |
assert(!pst->valid(), "Clobbering existing data?");
|
|
5272 |
pst->set_par_threads(n_threads);
|
|
5273 |
pst->set_n_tasks(1);
|
|
5274 |
assert(pst->valid(), "Error");
|
|
5275 |
}
|
|
5276 |
|
|
5277 |
// From space
|
|
5278 |
{
|
|
5279 |
SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
|
|
5280 |
assert(!pst->valid(), "Clobbering existing data?");
|
|
5281 |
size_t n_tasks = _survivor_chunk_index + 1;
|
|
5282 |
assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
|
|
5283 |
pst->set_par_threads(n_threads);
|
|
5284 |
pst->set_n_tasks((int)n_tasks);
|
|
5285 |
assert(pst->valid(), "Error");
|
|
5286 |
}
|
|
5287 |
}
|
|
5288 |
|
|
5289 |
// Parallel version of remark
|
|
5290 |
void CMSCollector::do_remark_parallel() {
|
|
5291 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
5292 |
WorkGang* workers = gch->workers();
|
|
5293 |
assert(workers != NULL, "Need parallel worker threads.");
|
|
5294 |
int n_workers = workers->total_workers();
|
|
5295 |
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
|
5296 |
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
|
|
5297 |
|
|
5298 |
CMSParRemarkTask tsk(this,
|
|
5299 |
cms_space, perm_space,
|
|
5300 |
n_workers, workers, task_queues());
|
|
5301 |
|
|
5302 |
// Set up for parallel process_strong_roots work.
|
|
5303 |
gch->set_par_threads(n_workers);
|
|
5304 |
gch->change_strong_roots_parity();
|
|
5305 |
// We won't be iterating over the cards in the card table updating
|
|
5306 |
// the younger_gen cards, so we shouldn't call the following else
|
|
5307 |
// the verification code as well as subsequent younger_refs_iterate
|
|
5308 |
// code would get confused. XXX
|
|
5309 |
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
|
|
5310 |
|
|
5311 |
// The young gen rescan work will not be done as part of
|
|
5312 |
// process_strong_roots (which currently doesn't knw how to
|
|
5313 |
// parallelize such a scan), but rather will be broken up into
|
|
5314 |
// a set of parallel tasks (via the sampling that the [abortable]
|
|
5315 |
// preclean phase did of EdenSpace, plus the [two] tasks of
|
|
5316 |
// scanning the [two] survivor spaces. Further fine-grain
|
|
5317 |
// parallelization of the scanning of the survivor spaces
|
|
5318 |
// themselves, and of precleaning of the younger gen itself
|
|
5319 |
// is deferred to the future.
|
|
5320 |
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
|
|
5321 |
|
|
5322 |
// The dirty card rescan work is broken up into a "sequence"
|
|
5323 |
// of parallel tasks (per constituent space) that are dynamically
|
|
5324 |
// claimed by the parallel threads.
|
|
5325 |
cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
|
|
5326 |
perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
|
|
5327 |
|
|
5328 |
// It turns out that even when we're using 1 thread, doing the work in a
|
|
5329 |
// separate thread causes wide variance in run times. We can't help this
|
|
5330 |
// in the multi-threaded case, but we special-case n=1 here to get
|
|
5331 |
// repeatable measurements of the 1-thread overhead of the parallel code.
|
|
5332 |
if (n_workers > 1) {
|
|
5333 |
// Make refs discovery MT-safe
|
|
5334 |
ReferenceProcessorMTMutator mt(ref_processor(), true);
|
|
5335 |
workers->run_task(&tsk);
|
|
5336 |
} else {
|
|
5337 |
tsk.work(0);
|
|
5338 |
}
|
|
5339 |
gch->set_par_threads(0); // 0 ==> non-parallel.
|
|
5340 |
// restore, single-threaded for now, any preserved marks
|
|
5341 |
// as a result of work_q overflow
|
|
5342 |
restore_preserved_marks_if_any();
|
|
5343 |
}
|
|
5344 |
|
|
5345 |
// Non-parallel version of remark
|
|
5346 |
void CMSCollector::do_remark_non_parallel() {
|
|
5347 |
ResourceMark rm;
|
|
5348 |
HandleMark hm;
|
|
5349 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
5350 |
MarkRefsIntoAndScanClosure
|
|
5351 |
mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
|
|
5352 |
&_markStack, &_revisitStack, this,
|
|
5353 |
false /* should_yield */, false /* not precleaning */);
|
|
5354 |
MarkFromDirtyCardsClosure
|
|
5355 |
markFromDirtyCardsClosure(this, _span,
|
|
5356 |
NULL, // space is set further below
|
|
5357 |
&_markBitMap, &_markStack, &_revisitStack,
|
|
5358 |
&mrias_cl);
|
|
5359 |
{
|
|
5360 |
TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
|
|
5361 |
// Iterate over the dirty cards, marking them precleaned, and
|
|
5362 |
// setting the corresponding bits in the mod union table.
|
|
5363 |
{
|
|
5364 |
ModUnionClosure modUnionClosure(&_modUnionTable);
|
|
5365 |
_ct->ct_bs()->dirty_card_iterate(
|
|
5366 |
_cmsGen->used_region(),
|
|
5367 |
&modUnionClosure);
|
|
5368 |
_ct->ct_bs()->dirty_card_iterate(
|
|
5369 |
_permGen->used_region(),
|
|
5370 |
&modUnionClosure);
|
|
5371 |
}
|
|
5372 |
// Having transferred these marks into the modUnionTable, we just need
|
|
5373 |
// to rescan the marked objects on the dirty cards in the modUnionTable.
|
|
5374 |
// The initial marking may have been done during an asynchronous
|
|
5375 |
// collection so there may be dirty bits in the mod-union table.
|
|
5376 |
const int alignment =
|
|
5377 |
CardTableModRefBS::card_size * BitsPerWord;
|
|
5378 |
{
|
|
5379 |
// ... First handle dirty cards in CMS gen
|
|
5380 |
markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
|
|
5381 |
MemRegion ur = _cmsGen->used_region();
|
|
5382 |
HeapWord* lb = ur.start();
|
|
5383 |
HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
|
|
5384 |
MemRegion cms_span(lb, ub);
|
|
5385 |
_modUnionTable.dirty_range_iterate_clear(cms_span,
|
|
5386 |
&markFromDirtyCardsClosure);
|
|
5387 |
verify_work_stacks_empty();
|
|
5388 |
if (PrintCMSStatistics != 0) {
|
|
5389 |
gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
|
|
5390 |
markFromDirtyCardsClosure.num_dirty_cards());
|
|
5391 |
}
|
|
5392 |
}
|
|
5393 |
{
|
|
5394 |
// .. and then repeat for dirty cards in perm gen
|
|
5395 |
markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
|
|
5396 |
MemRegion ur = _permGen->used_region();
|
|
5397 |
HeapWord* lb = ur.start();
|
|
5398 |
HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
|
|
5399 |
MemRegion perm_span(lb, ub);
|
|
5400 |
_modUnionTable.dirty_range_iterate_clear(perm_span,
|
|
5401 |
&markFromDirtyCardsClosure);
|
|
5402 |
verify_work_stacks_empty();
|
|
5403 |
if (PrintCMSStatistics != 0) {
|
|
5404 |
gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
|
|
5405 |
markFromDirtyCardsClosure.num_dirty_cards());
|
|
5406 |
}
|
|
5407 |
}
|
|
5408 |
}
|
|
5409 |
if (VerifyDuringGC &&
|
|
5410 |
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
|
5411 |
HandleMark hm; // Discard invalid handles created during verification
|
|
5412 |
Universe::verify(true);
|
|
5413 |
}
|
|
5414 |
{
|
|
5415 |
TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
|
|
5416 |
|
|
5417 |
verify_work_stacks_empty();
|
|
5418 |
|
|
5419 |
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
|
5420 |
gch->gen_process_strong_roots(_cmsGen->level(),
|
|
5421 |
true, // younger gens as roots
|
|
5422 |
true, // collecting perm gen
|
|
5423 |
SharedHeap::ScanningOption(roots_scanning_options()),
|
|
5424 |
NULL, &mrias_cl);
|
|
5425 |
}
|
|
5426 |
verify_work_stacks_empty();
|
|
5427 |
// Restore evacuated mark words, if any, used for overflow list links
|
|
5428 |
if (!CMSOverflowEarlyRestoration) {
|
|
5429 |
restore_preserved_marks_if_any();
|
|
5430 |
}
|
|
5431 |
verify_overflow_empty();
|
|
5432 |
}
|
|
5433 |
|
|
5434 |
////////////////////////////////////////////////////////
|
|
5435 |
// Parallel Reference Processing Task Proxy Class
|
|
5436 |
////////////////////////////////////////////////////////
|
|
5437 |
class CMSRefProcTaskProxy: public AbstractGangTask {
|
|
5438 |
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
|
5439 |
CMSCollector* _collector;
|
|
5440 |
CMSBitMap* _mark_bit_map;
|
|
5441 |
MemRegion _span;
|
|
5442 |
OopTaskQueueSet* _task_queues;
|
|
5443 |
ParallelTaskTerminator _term;
|
|
5444 |
ProcessTask& _task;
|
|
5445 |
|
|
5446 |
public:
|
|
5447 |
CMSRefProcTaskProxy(ProcessTask& task,
|
|
5448 |
CMSCollector* collector,
|
|
5449 |
const MemRegion& span,
|
|
5450 |
CMSBitMap* mark_bit_map,
|
|
5451 |
int total_workers,
|
|
5452 |
OopTaskQueueSet* task_queues):
|
|
5453 |
AbstractGangTask("Process referents by policy in parallel"),
|
|
5454 |
_task(task),
|
|
5455 |
_collector(collector), _span(span), _mark_bit_map(mark_bit_map),
|
|
5456 |
_task_queues(task_queues),
|
|
5457 |
_term(total_workers, task_queues)
|
|
5458 |
{ }
|
|
5459 |
|
|
5460 |
OopTaskQueueSet* task_queues() { return _task_queues; }
|
|
5461 |
|
|
5462 |
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
|
5463 |
|
|
5464 |
ParallelTaskTerminator* terminator() { return &_term; }
|
|
5465 |
|
|
5466 |
void do_work_steal(int i,
|
|
5467 |
CMSParDrainMarkingStackClosure* drain,
|
|
5468 |
CMSParKeepAliveClosure* keep_alive,
|
|
5469 |
int* seed);
|
|
5470 |
|
|
5471 |
virtual void work(int i);
|
|
5472 |
};
|
|
5473 |
|
|
5474 |
void CMSRefProcTaskProxy::work(int i) {
|
|
5475 |
CMSParKeepAliveClosure par_keep_alive(_collector, _span,
|
|
5476 |
_mark_bit_map, work_queue(i));
|
|
5477 |
CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
|
|
5478 |
_mark_bit_map, work_queue(i));
|
|
5479 |
CMSIsAliveClosure is_alive_closure(_mark_bit_map);
|
|
5480 |
_task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
|
|
5481 |
if (_task.marks_oops_alive()) {
|
|
5482 |
do_work_steal(i, &par_drain_stack, &par_keep_alive,
|
|
5483 |
_collector->hash_seed(i));
|
|
5484 |
}
|
|
5485 |
assert(work_queue(i)->size() == 0, "work_queue should be empty");
|
|
5486 |
assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
|
|
5487 |
}
|
|
5488 |
|
|
5489 |
class CMSRefEnqueueTaskProxy: public AbstractGangTask {
|
|
5490 |
typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
|
|
5491 |
EnqueueTask& _task;
|
|
5492 |
|
|
5493 |
public:
|
|
5494 |
CMSRefEnqueueTaskProxy(EnqueueTask& task)
|
|
5495 |
: AbstractGangTask("Enqueue reference objects in parallel"),
|
|
5496 |
_task(task)
|
|
5497 |
{ }
|
|
5498 |
|
|
5499 |
virtual void work(int i)
|
|
5500 |
{
|
|
5501 |
_task.work(i);
|
|
5502 |
}
|
|
5503 |
};
|
|
5504 |
|
|
5505 |
CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
|
|
5506 |
MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
|
|
5507 |
_collector(collector),
|
|
5508 |
_span(span),
|
|
5509 |
_bit_map(bit_map),
|
|
5510 |
_work_queue(work_queue),
|
|
5511 |
_mark_and_push(collector, span, bit_map, work_queue),
|
|
5512 |
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
|
|
5513 |
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
|
|
5514 |
{ }
|
|
5515 |
|
|
5516 |
// . see if we can share work_queues with ParNew? XXX
|
|
5517 |
void CMSRefProcTaskProxy::do_work_steal(int i,
|
|
5518 |
CMSParDrainMarkingStackClosure* drain,
|
|
5519 |
CMSParKeepAliveClosure* keep_alive,
|
|
5520 |
int* seed) {
|
|
5521 |
OopTaskQueue* work_q = work_queue(i);
|
|
5522 |
NOT_PRODUCT(int num_steals = 0;)
|
|
5523 |
oop obj_to_scan;
|
|
5524 |
size_t num_from_overflow_list =
|
|
5525 |
MIN2((size_t)work_q->max_elems()/4,
|
|
5526 |
(size_t)ParGCDesiredObjsFromOverflowList);
|
|
5527 |
|
|
5528 |
while (true) {
|
|
5529 |
// Completely finish any left over work from (an) earlier round(s)
|
|
5530 |
drain->trim_queue(0);
|
|
5531 |
// Now check if there's any work in the overflow list
|
|
5532 |
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
|
5533 |
work_q)) {
|
|
5534 |
// Found something in global overflow list;
|
|
5535 |
// not yet ready to go stealing work from others.
|
|
5536 |
// We'd like to assert(work_q->size() != 0, ...)
|
|
5537 |
// because we just took work from the overflow list,
|
|
5538 |
// but of course we can't, since all of that might have
|
|
5539 |
// been already stolen from us.
|
|
5540 |
continue;
|
|
5541 |
}
|
|
5542 |
// Verify that we have no work before we resort to stealing
|
|
5543 |
assert(work_q->size() == 0, "Have work, shouldn't steal");
|
|
5544 |
// Try to steal from other queues that have work
|
|
5545 |
if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
|
|
5546 |
NOT_PRODUCT(num_steals++;)
|
|
5547 |
assert(obj_to_scan->is_oop(), "Oops, not an oop!");
|
|
5548 |
assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
|
|
5549 |
// Do scanning work
|
|
5550 |
obj_to_scan->oop_iterate(keep_alive);
|
|
5551 |
// Loop around, finish this work, and try to steal some more
|
|
5552 |
} else if (terminator()->offer_termination()) {
|
|
5553 |
break; // nirvana from the infinite cycle
|
|
5554 |
}
|
|
5555 |
}
|
|
5556 |
NOT_PRODUCT(
|
|
5557 |
if (PrintCMSStatistics != 0) {
|
|
5558 |
gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
|
|
5559 |
}
|
|
5560 |
)
|
|
5561 |
}
|
|
5562 |
|
|
5563 |
void CMSRefProcTaskExecutor::execute(ProcessTask& task)
|
|
5564 |
{
|
|
5565 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
5566 |
WorkGang* workers = gch->workers();
|
|
5567 |
assert(workers != NULL, "Need parallel worker threads.");
|
|
5568 |
int n_workers = workers->total_workers();
|
|
5569 |
CMSRefProcTaskProxy rp_task(task, &_collector,
|
|
5570 |
_collector.ref_processor()->span(),
|
|
5571 |
_collector.markBitMap(),
|
|
5572 |
n_workers, _collector.task_queues());
|
|
5573 |
workers->run_task(&rp_task);
|
|
5574 |
}
|
|
5575 |
|
|
5576 |
void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
|
|
5577 |
{
|
|
5578 |
|
|
5579 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
5580 |
WorkGang* workers = gch->workers();
|
|
5581 |
assert(workers != NULL, "Need parallel worker threads.");
|
|
5582 |
CMSRefEnqueueTaskProxy enq_task(task);
|
|
5583 |
workers->run_task(&enq_task);
|
|
5584 |
}
|
|
5585 |
|
|
5586 |
void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
|
5587 |
|
|
5588 |
ResourceMark rm;
|
|
5589 |
HandleMark hm;
|
|
5590 |
ReferencePolicy* soft_ref_policy;
|
|
5591 |
|
|
5592 |
assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
|
|
5593 |
// Process weak references.
|
|
5594 |
if (clear_all_soft_refs) {
|
|
5595 |
soft_ref_policy = new AlwaysClearPolicy();
|
|
5596 |
} else {
|
|
5597 |
#ifdef COMPILER2
|
|
5598 |
soft_ref_policy = new LRUMaxHeapPolicy();
|
|
5599 |
#else
|
|
5600 |
soft_ref_policy = new LRUCurrentHeapPolicy();
|
|
5601 |
#endif // COMPILER2
|
|
5602 |
}
|
|
5603 |
verify_work_stacks_empty();
|
|
5604 |
|
|
5605 |
ReferenceProcessor* rp = ref_processor();
|
|
5606 |
assert(rp->span().equals(_span), "Spans should be equal");
|
|
5607 |
CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
|
|
5608 |
&_markStack);
|
|
5609 |
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
|
|
5610 |
_span, &_markBitMap, &_markStack,
|
|
5611 |
&cmsKeepAliveClosure);
|
|
5612 |
{
|
|
5613 |
TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
|
|
5614 |
if (rp->processing_is_mt()) {
|
|
5615 |
CMSRefProcTaskExecutor task_executor(*this);
|
|
5616 |
rp->process_discovered_references(soft_ref_policy,
|
|
5617 |
&_is_alive_closure,
|
|
5618 |
&cmsKeepAliveClosure,
|
|
5619 |
&cmsDrainMarkingStackClosure,
|
|
5620 |
&task_executor);
|
|
5621 |
} else {
|
|
5622 |
rp->process_discovered_references(soft_ref_policy,
|
|
5623 |
&_is_alive_closure,
|
|
5624 |
&cmsKeepAliveClosure,
|
|
5625 |
&cmsDrainMarkingStackClosure,
|
|
5626 |
NULL);
|
|
5627 |
}
|
|
5628 |
verify_work_stacks_empty();
|
|
5629 |
}
|
|
5630 |
|
|
5631 |
if (cms_should_unload_classes()) {
|
|
5632 |
{
|
|
5633 |
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
|
|
5634 |
|
|
5635 |
// Follow SystemDictionary roots and unload classes
|
|
5636 |
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
|
|
5637 |
|
|
5638 |
// Follow CodeCache roots and unload any methods marked for unloading
|
|
5639 |
CodeCache::do_unloading(&_is_alive_closure,
|
|
5640 |
&cmsKeepAliveClosure,
|
|
5641 |
purged_class);
|
|
5642 |
|
|
5643 |
cmsDrainMarkingStackClosure.do_void();
|
|
5644 |
verify_work_stacks_empty();
|
|
5645 |
|
|
5646 |
// Update subklass/sibling/implementor links in KlassKlass descendants
|
|
5647 |
assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
|
|
5648 |
oop k;
|
|
5649 |
while ((k = _revisitStack.pop()) != NULL) {
|
|
5650 |
((Klass*)(oopDesc*)k)->follow_weak_klass_links(
|
|
5651 |
&_is_alive_closure,
|
|
5652 |
&cmsKeepAliveClosure);
|
|
5653 |
}
|
|
5654 |
assert(!ClassUnloading ||
|
|
5655 |
(_markStack.isEmpty() && overflow_list_is_empty()),
|
|
5656 |
"Should not have found new reachable objects");
|
|
5657 |
assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
|
|
5658 |
cmsDrainMarkingStackClosure.do_void();
|
|
5659 |
verify_work_stacks_empty();
|
|
5660 |
}
|
|
5661 |
|
|
5662 |
{
|
|
5663 |
TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
|
|
5664 |
// Now clean up stale oops in SymbolTable and StringTable
|
|
5665 |
SymbolTable::unlink(&_is_alive_closure);
|
|
5666 |
StringTable::unlink(&_is_alive_closure);
|
|
5667 |
}
|
|
5668 |
}
|
|
5669 |
|
|
5670 |
verify_work_stacks_empty();
|
|
5671 |
// Restore any preserved marks as a result of mark stack or
|
|
5672 |
// work queue overflow
|
|
5673 |
restore_preserved_marks_if_any(); // done single-threaded for now
|
|
5674 |
|
|
5675 |
rp->set_enqueuing_is_done(true);
|
|
5676 |
if (rp->processing_is_mt()) {
|
|
5677 |
CMSRefProcTaskExecutor task_executor(*this);
|
|
5678 |
rp->enqueue_discovered_references(&task_executor);
|
|
5679 |
} else {
|
|
5680 |
rp->enqueue_discovered_references(NULL);
|
|
5681 |
}
|
|
5682 |
rp->verify_no_references_recorded();
|
|
5683 |
assert(!rp->discovery_enabled(), "should have been disabled");
|
|
5684 |
|
|
5685 |
// JVMTI object tagging is based on JNI weak refs. If any of these
|
|
5686 |
// refs were cleared then JVMTI needs to update its maps and
|
|
5687 |
// maybe post ObjectFrees to agents.
|
|
5688 |
JvmtiExport::cms_ref_processing_epilogue();
|
|
5689 |
}
|
|
5690 |
|
|
5691 |
#ifndef PRODUCT
|
|
5692 |
void CMSCollector::check_correct_thread_executing() {
|
|
5693 |
Thread* t = Thread::current();
|
|
5694 |
// Only the VM thread or the CMS thread should be here.
|
|
5695 |
assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
|
|
5696 |
"Unexpected thread type");
|
|
5697 |
// If this is the vm thread, the foreground process
|
|
5698 |
// should not be waiting. Note that _foregroundGCIsActive is
|
|
5699 |
// true while the foreground collector is waiting.
|
|
5700 |
if (_foregroundGCShouldWait) {
|
|
5701 |
// We cannot be the VM thread
|
|
5702 |
assert(t->is_ConcurrentGC_thread(),
|
|
5703 |
"Should be CMS thread");
|
|
5704 |
} else {
|
|
5705 |
// We can be the CMS thread only if we are in a stop-world
|
|
5706 |
// phase of CMS collection.
|
|
5707 |
if (t->is_ConcurrentGC_thread()) {
|
|
5708 |
assert(_collectorState == InitialMarking ||
|
|
5709 |
_collectorState == FinalMarking,
|
|
5710 |
"Should be a stop-world phase");
|
|
5711 |
// The CMS thread should be holding the CMS_token.
|
|
5712 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
5713 |
"Potential interference with concurrently "
|
|
5714 |
"executing VM thread");
|
|
5715 |
}
|
|
5716 |
}
|
|
5717 |
}
|
|
5718 |
#endif
|
|
5719 |
|
|
5720 |
void CMSCollector::sweep(bool asynch) {
|
|
5721 |
assert(_collectorState == Sweeping, "just checking");
|
|
5722 |
check_correct_thread_executing();
|
|
5723 |
verify_work_stacks_empty();
|
|
5724 |
verify_overflow_empty();
|
|
5725 |
incrementSweepCount();
|
|
5726 |
_sweep_timer.stop();
|
|
5727 |
_sweep_estimate.sample(_sweep_timer.seconds());
|
|
5728 |
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
|
|
5729 |
|
|
5730 |
// PermGen verification support: If perm gen sweeping is disabled in
|
|
5731 |
// this cycle, we preserve the perm gen object "deadness" information
|
|
5732 |
// in the perm_gen_verify_bit_map. In order to do that we traverse
|
|
5733 |
// all blocks in perm gen and mark all dead objects.
|
|
5734 |
if (verifying() && !cms_should_unload_classes()) {
|
|
5735 |
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
|
|
5736 |
bitMapLock());
|
|
5737 |
assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
|
|
5738 |
"Should have already been allocated");
|
|
5739 |
MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
|
|
5740 |
markBitMap(), perm_gen_verify_bit_map());
|
|
5741 |
_permGen->cmsSpace()->blk_iterate(&mdo);
|
|
5742 |
}
|
|
5743 |
|
|
5744 |
if (asynch) {
|
|
5745 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
5746 |
CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
|
|
5747 |
// First sweep the old gen then the perm gen
|
|
5748 |
{
|
|
5749 |
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
|
|
5750 |
bitMapLock());
|
|
5751 |
sweepWork(_cmsGen, asynch);
|
|
5752 |
}
|
|
5753 |
|
|
5754 |
// Now repeat for perm gen
|
|
5755 |
if (cms_should_unload_classes()) {
|
|
5756 |
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
|
|
5757 |
bitMapLock());
|
|
5758 |
sweepWork(_permGen, asynch);
|
|
5759 |
}
|
|
5760 |
|
|
5761 |
// Update Universe::_heap_*_at_gc figures.
|
|
5762 |
// We need all the free list locks to make the abstract state
|
|
5763 |
// transition from Sweeping to Resetting. See detailed note
|
|
5764 |
// further below.
|
|
5765 |
{
|
|
5766 |
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
|
|
5767 |
_permGen->freelistLock());
|
|
5768 |
// Update heap occupancy information which is used as
|
|
5769 |
// input to soft ref clearing policy at the next gc.
|
|
5770 |
Universe::update_heap_info_at_gc();
|
|
5771 |
_collectorState = Resizing;
|
|
5772 |
}
|
|
5773 |
} else {
|
|
5774 |
// already have needed locks
|
|
5775 |
sweepWork(_cmsGen, asynch);
|
|
5776 |
|
|
5777 |
if (cms_should_unload_classes()) {
|
|
5778 |
sweepWork(_permGen, asynch);
|
|
5779 |
}
|
|
5780 |
// Update heap occupancy information which is used as
|
|
5781 |
// input to soft ref clearing policy at the next gc.
|
|
5782 |
Universe::update_heap_info_at_gc();
|
|
5783 |
_collectorState = Resizing;
|
|
5784 |
}
|
|
5785 |
verify_work_stacks_empty();
|
|
5786 |
verify_overflow_empty();
|
|
5787 |
|
|
5788 |
_sweep_timer.reset();
|
|
5789 |
_sweep_timer.start();
|
|
5790 |
|
|
5791 |
update_time_of_last_gc(os::javaTimeMillis());
|
|
5792 |
|
|
5793 |
// NOTE on abstract state transitions:
|
|
5794 |
// Mutators allocate-live and/or mark the mod-union table dirty
|
|
5795 |
// based on the state of the collection. The former is done in
|
|
5796 |
// the interval [Marking, Sweeping] and the latter in the interval
|
|
5797 |
// [Marking, Sweeping). Thus the transitions into the Marking state
|
|
5798 |
// and out of the Sweeping state must be synchronously visible
|
|
5799 |
// globally to the mutators.
|
|
5800 |
// The transition into the Marking state happens with the world
|
|
5801 |
// stopped so the mutators will globally see it. Sweeping is
|
|
5802 |
// done asynchronously by the background collector so the transition
|
|
5803 |
// from the Sweeping state to the Resizing state must be done
|
|
5804 |
// under the freelistLock (as is the check for whether to
|
|
5805 |
// allocate-live and whether to dirty the mod-union table).
|
|
5806 |
assert(_collectorState == Resizing, "Change of collector state to"
|
|
5807 |
" Resizing must be done under the freelistLocks (plural)");
|
|
5808 |
|
|
5809 |
// Now that sweeping has been completed, if the GCH's
|
|
5810 |
// incremental_collection_will_fail flag is set, clear it,
|
|
5811 |
// thus inviting a younger gen collection to promote into
|
|
5812 |
// this generation. If such a promotion may still fail,
|
|
5813 |
// the flag will be set again when a young collection is
|
|
5814 |
// attempted.
|
|
5815 |
// I think the incremental_collection_will_fail flag's use
|
|
5816 |
// is specific to a 2 generation collection policy, so i'll
|
|
5817 |
// assert that that's the configuration we are operating within.
|
|
5818 |
// The use of the flag can and should be generalized appropriately
|
|
5819 |
// in the future to deal with a general n-generation system.
|
|
5820 |
|
|
5821 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
5822 |
assert(gch->collector_policy()->is_two_generation_policy(),
|
|
5823 |
"Resetting of incremental_collection_will_fail flag"
|
|
5824 |
" may be incorrect otherwise");
|
|
5825 |
gch->clear_incremental_collection_will_fail();
|
|
5826 |
gch->update_full_collections_completed(_collection_count_start);
|
|
5827 |
}
|
|
5828 |
|
|
5829 |
// FIX ME!!! Looks like this belongs in CFLSpace, with
|
|
5830 |
// CMSGen merely delegating to it.
|
|
5831 |
void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
|
|
5832 |
double nearLargestPercent = 0.999;
|
|
5833 |
HeapWord* minAddr = _cmsSpace->bottom();
|
|
5834 |
HeapWord* largestAddr =
|
|
5835 |
(HeapWord*) _cmsSpace->dictionary()->findLargestDict();
|
|
5836 |
if (largestAddr == 0) {
|
|
5837 |
// The dictionary appears to be empty. In this case
|
|
5838 |
// try to coalesce at the end of the heap.
|
|
5839 |
largestAddr = _cmsSpace->end();
|
|
5840 |
}
|
|
5841 |
size_t largestOffset = pointer_delta(largestAddr, minAddr);
|
|
5842 |
size_t nearLargestOffset =
|
|
5843 |
(size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
|
|
5844 |
_cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
|
|
5845 |
}
|
|
5846 |
|
|
5847 |
bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
|
|
5848 |
return addr >= _cmsSpace->nearLargestChunk();
|
|
5849 |
}
|
|
5850 |
|
|
5851 |
FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
|
|
5852 |
return _cmsSpace->find_chunk_at_end();
|
|
5853 |
}
|
|
5854 |
|
|
5855 |
void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
|
|
5856 |
bool full) {
|
|
5857 |
// The next lower level has been collected. Gather any statistics
|
|
5858 |
// that are of interest at this point.
|
|
5859 |
if (!full && (current_level + 1) == level()) {
|
|
5860 |
// Gather statistics on the young generation collection.
|
|
5861 |
collector()->stats().record_gc0_end(used());
|
|
5862 |
}
|
|
5863 |
}
|
|
5864 |
|
|
5865 |
CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
|
|
5866 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
5867 |
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
|
5868 |
"Wrong type of heap");
|
|
5869 |
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
|
|
5870 |
gch->gen_policy()->size_policy();
|
|
5871 |
assert(sp->is_gc_cms_adaptive_size_policy(),
|
|
5872 |
"Wrong type of size policy");
|
|
5873 |
return sp;
|
|
5874 |
}
|
|
5875 |
|
|
5876 |
void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
|
|
5877 |
if (PrintGCDetails && Verbose) {
|
|
5878 |
gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
|
|
5879 |
}
|
|
5880 |
_debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
|
|
5881 |
_debug_collection_type =
|
|
5882 |
(CollectionTypes) (_debug_collection_type % Unknown_collection_type);
|
|
5883 |
if (PrintGCDetails && Verbose) {
|
|
5884 |
gclog_or_tty->print_cr("to %d ", _debug_collection_type);
|
|
5885 |
}
|
|
5886 |
}
|
|
5887 |
|
|
5888 |
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
|
|
5889 |
bool asynch) {
|
|
5890 |
// We iterate over the space(s) underlying this generation,
|
|
5891 |
// checking the mark bit map to see if the bits corresponding
|
|
5892 |
// to specific blocks are marked or not. Blocks that are
|
|
5893 |
// marked are live and are not swept up. All remaining blocks
|
|
5894 |
// are swept up, with coalescing on-the-fly as we sweep up
|
|
5895 |
// contiguous free and/or garbage blocks:
|
|
5896 |
// We need to ensure that the sweeper synchronizes with allocators
|
|
5897 |
// and stop-the-world collectors. In particular, the following
|
|
5898 |
// locks are used:
|
|
5899 |
// . CMS token: if this is held, a stop the world collection cannot occur
|
|
5900 |
// . freelistLock: if this is held no allocation can occur from this
|
|
5901 |
// generation by another thread
|
|
5902 |
// . bitMapLock: if this is held, no other thread can access or update
|
|
5903 |
//
|
|
5904 |
|
|
5905 |
// Note that we need to hold the freelistLock if we use
|
|
5906 |
// block iterate below; else the iterator might go awry if
|
|
5907 |
// a mutator (or promotion) causes block contents to change
|
|
5908 |
// (for instance if the allocator divvies up a block).
|
|
5909 |
// If we hold the free list lock, for all practical purposes
|
|
5910 |
// young generation GC's can't occur (they'll usually need to
|
|
5911 |
// promote), so we might as well prevent all young generation
|
|
5912 |
// GC's while we do a sweeping step. For the same reason, we might
|
|
5913 |
// as well take the bit map lock for the entire duration
|
|
5914 |
|
|
5915 |
// check that we hold the requisite locks
|
|
5916 |
assert(have_cms_token(), "Should hold cms token");
|
|
5917 |
assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
|
|
5918 |
|| (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
|
|
5919 |
"Should possess CMS token to sweep");
|
|
5920 |
assert_lock_strong(gen->freelistLock());
|
|
5921 |
assert_lock_strong(bitMapLock());
|
|
5922 |
|
|
5923 |
assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
|
|
5924 |
gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
|
|
5925 |
_sweep_estimate.padded_average());
|
|
5926 |
gen->setNearLargestChunk();
|
|
5927 |
|
|
5928 |
{
|
|
5929 |
SweepClosure sweepClosure(this, gen, &_markBitMap,
|
|
5930 |
CMSYield && asynch);
|
|
5931 |
gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
|
|
5932 |
// We need to free-up/coalesce garbage/blocks from a
|
|
5933 |
// co-terminal free run. This is done in the SweepClosure
|
|
5934 |
// destructor; so, do not remove this scope, else the
|
|
5935 |
// end-of-sweep-census below will be off by a little bit.
|
|
5936 |
}
|
|
5937 |
gen->cmsSpace()->sweep_completed();
|
|
5938 |
gen->cmsSpace()->endSweepFLCensus(sweepCount());
|
|
5939 |
}
|
|
5940 |
|
|
5941 |
// Reset CMS data structures (for now just the marking bit map)
|
|
5942 |
// preparatory for the next cycle.
|
|
5943 |
void CMSCollector::reset(bool asynch) {
|
|
5944 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
5945 |
CMSAdaptiveSizePolicy* sp = size_policy();
|
|
5946 |
AdaptiveSizePolicyOutput(sp, gch->total_collections());
|
|
5947 |
if (asynch) {
|
|
5948 |
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
|
5949 |
|
|
5950 |
// If the state is not "Resetting", the foreground thread
|
|
5951 |
// has done a collection and the resetting.
|
|
5952 |
if (_collectorState != Resetting) {
|
|
5953 |
assert(_collectorState == Idling, "The state should only change"
|
|
5954 |
" because the foreground collector has finished the collection");
|
|
5955 |
return;
|
|
5956 |
}
|
|
5957 |
|
|
5958 |
// Clear the mark bitmap (no grey objects to start with)
|
|
5959 |
// for the next cycle.
|
|
5960 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
5961 |
CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
|
|
5962 |
|
|
5963 |
HeapWord* curAddr = _markBitMap.startWord();
|
|
5964 |
while (curAddr < _markBitMap.endWord()) {
|
|
5965 |
size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
|
|
5966 |
MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
|
|
5967 |
_markBitMap.clear_large_range(chunk);
|
|
5968 |
if (ConcurrentMarkSweepThread::should_yield() &&
|
|
5969 |
!foregroundGCIsActive() &&
|
|
5970 |
CMSYield) {
|
|
5971 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
5972 |
"CMS thread should hold CMS token");
|
|
5973 |
assert_lock_strong(bitMapLock());
|
|
5974 |
bitMapLock()->unlock();
|
|
5975 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
5976 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
5977 |
stopTimer();
|
|
5978 |
if (PrintCMSStatistics != 0) {
|
|
5979 |
incrementYields();
|
|
5980 |
}
|
|
5981 |
icms_wait();
|
|
5982 |
|
|
5983 |
// See the comment in coordinator_yield()
|
|
5984 |
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
|
5985 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
5986 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
5987 |
os::sleep(Thread::current(), 1, false);
|
|
5988 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
5989 |
}
|
|
5990 |
|
|
5991 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
5992 |
bitMapLock()->lock_without_safepoint_check();
|
|
5993 |
startTimer();
|
|
5994 |
}
|
|
5995 |
curAddr = chunk.end();
|
|
5996 |
}
|
|
5997 |
_collectorState = Idling;
|
|
5998 |
} else {
|
|
5999 |
// already have the lock
|
|
6000 |
assert(_collectorState == Resetting, "just checking");
|
|
6001 |
assert_lock_strong(bitMapLock());
|
|
6002 |
_markBitMap.clear_all();
|
|
6003 |
_collectorState = Idling;
|
|
6004 |
}
|
|
6005 |
|
|
6006 |
// Stop incremental mode after a cycle completes, so that any future cycles
|
|
6007 |
// are triggered by allocation.
|
|
6008 |
stop_icms();
|
|
6009 |
|
|
6010 |
NOT_PRODUCT(
|
|
6011 |
if (RotateCMSCollectionTypes) {
|
|
6012 |
_cmsGen->rotate_debug_collection_type();
|
|
6013 |
}
|
|
6014 |
)
|
|
6015 |
}
|
|
6016 |
|
|
6017 |
void CMSCollector::do_CMS_operation(CMS_op_type op) {
|
|
6018 |
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
6019 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
6020 |
TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
|
|
6021 |
TraceCollectorStats tcs(counters());
|
|
6022 |
|
|
6023 |
switch (op) {
|
|
6024 |
case CMS_op_checkpointRootsInitial: {
|
|
6025 |
checkpointRootsInitial(true); // asynch
|
|
6026 |
if (PrintGC) {
|
|
6027 |
_cmsGen->printOccupancy("initial-mark");
|
|
6028 |
}
|
|
6029 |
break;
|
|
6030 |
}
|
|
6031 |
case CMS_op_checkpointRootsFinal: {
|
|
6032 |
checkpointRootsFinal(true, // asynch
|
|
6033 |
false, // !clear_all_soft_refs
|
|
6034 |
false); // !init_mark_was_synchronous
|
|
6035 |
if (PrintGC) {
|
|
6036 |
_cmsGen->printOccupancy("remark");
|
|
6037 |
}
|
|
6038 |
break;
|
|
6039 |
}
|
|
6040 |
default:
|
|
6041 |
fatal("No such CMS_op");
|
|
6042 |
}
|
|
6043 |
}
|
|
6044 |
|
|
6045 |
#ifndef PRODUCT
|
|
6046 |
size_t const CMSCollector::skip_header_HeapWords() {
|
|
6047 |
return FreeChunk::header_size();
|
|
6048 |
}
|
|
6049 |
|
|
6050 |
// Try and collect here conditions that should hold when
|
|
6051 |
// CMS thread is exiting. The idea is that the foreground GC
|
|
6052 |
// thread should not be blocked if it wants to terminate
|
|
6053 |
// the CMS thread and yet continue to run the VM for a while
|
|
6054 |
// after that.
|
|
6055 |
void CMSCollector::verify_ok_to_terminate() const {
|
|
6056 |
assert(Thread::current()->is_ConcurrentGC_thread(),
|
|
6057 |
"should be called by CMS thread");
|
|
6058 |
assert(!_foregroundGCShouldWait, "should be false");
|
|
6059 |
// We could check here that all the various low-level locks
|
|
6060 |
// are not held by the CMS thread, but that is overkill; see
|
|
6061 |
// also CMSThread::verify_ok_to_terminate() where the CGC_lock
|
|
6062 |
// is checked.
|
|
6063 |
}
|
|
6064 |
#endif
|
|
6065 |
|
|
6066 |
size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
|
|
6067 |
assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
|
|
6068 |
"missing Printezis mark?");
|
|
6069 |
HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
|
|
6070 |
size_t size = pointer_delta(nextOneAddr + 1, addr);
|
|
6071 |
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
|
|
6072 |
"alignment problem");
|
|
6073 |
assert(size >= 3, "Necessary for Printezis marks to work");
|
|
6074 |
return size;
|
|
6075 |
}
|
|
6076 |
|
|
6077 |
// A variant of the above (block_size_using_printezis_bits()) except
|
|
6078 |
// that we return 0 if the P-bits are not yet set.
|
|
6079 |
size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
|
|
6080 |
if (_markBitMap.isMarked(addr)) {
|
|
6081 |
assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
|
|
6082 |
HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
|
|
6083 |
size_t size = pointer_delta(nextOneAddr + 1, addr);
|
|
6084 |
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
|
|
6085 |
"alignment problem");
|
|
6086 |
assert(size >= 3, "Necessary for Printezis marks to work");
|
|
6087 |
return size;
|
|
6088 |
} else {
|
|
6089 |
assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
|
|
6090 |
return 0;
|
|
6091 |
}
|
|
6092 |
}
|
|
6093 |
|
|
6094 |
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
|
|
6095 |
size_t sz = 0;
|
|
6096 |
oop p = (oop)addr;
|
|
6097 |
if (p->klass() != NULL && p->is_parsable()) {
|
|
6098 |
sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
|
|
6099 |
} else {
|
|
6100 |
sz = block_size_using_printezis_bits(addr);
|
|
6101 |
}
|
|
6102 |
assert(sz > 0, "size must be nonzero");
|
|
6103 |
HeapWord* next_block = addr + sz;
|
|
6104 |
HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
|
|
6105 |
CardTableModRefBS::card_size);
|
|
6106 |
assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
|
|
6107 |
round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
|
|
6108 |
"must be different cards");
|
|
6109 |
return next_card;
|
|
6110 |
}
|
|
6111 |
|
|
6112 |
|
|
6113 |
// CMS Bit Map Wrapper /////////////////////////////////////////
|
|
6114 |
|
|
6115 |
// Construct a CMS bit map infrastructure, but don't create the
|
|
6116 |
// bit vector itself. That is done by a separate call CMSBitMap::allocate()
|
|
6117 |
// further below.
|
|
6118 |
CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
|
|
6119 |
_bm(NULL,0),
|
|
6120 |
_shifter(shifter),
|
|
6121 |
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
|
|
6122 |
{
|
|
6123 |
_bmStartWord = 0;
|
|
6124 |
_bmWordSize = 0;
|
|
6125 |
}
|
|
6126 |
|
|
6127 |
bool CMSBitMap::allocate(MemRegion mr) {
|
|
6128 |
_bmStartWord = mr.start();
|
|
6129 |
_bmWordSize = mr.word_size();
|
|
6130 |
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
|
|
6131 |
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
|
|
6132 |
if (!brs.is_reserved()) {
|
|
6133 |
warning("CMS bit map allocation failure");
|
|
6134 |
return false;
|
|
6135 |
}
|
|
6136 |
// For now we'll just commit all of the bit map up fromt.
|
|
6137 |
// Later on we'll try to be more parsimonious with swap.
|
|
6138 |
if (!_virtual_space.initialize(brs, brs.size())) {
|
|
6139 |
warning("CMS bit map backing store failure");
|
|
6140 |
return false;
|
|
6141 |
}
|
|
6142 |
assert(_virtual_space.committed_size() == brs.size(),
|
|
6143 |
"didn't reserve backing store for all of CMS bit map?");
|
|
6144 |
_bm.set_map((uintptr_t*)_virtual_space.low());
|
|
6145 |
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
|
|
6146 |
_bmWordSize, "inconsistency in bit map sizing");
|
|
6147 |
_bm.set_size(_bmWordSize >> _shifter);
|
|
6148 |
|
|
6149 |
// bm.clear(); // can we rely on getting zero'd memory? verify below
|
|
6150 |
assert(isAllClear(),
|
|
6151 |
"Expected zero'd memory from ReservedSpace constructor");
|
|
6152 |
assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
|
|
6153 |
"consistency check");
|
|
6154 |
return true;
|
|
6155 |
}
|
|
6156 |
|
|
6157 |
void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
|
|
6158 |
HeapWord *next_addr, *end_addr, *last_addr;
|
|
6159 |
assert_locked();
|
|
6160 |
assert(covers(mr), "out-of-range error");
|
|
6161 |
// XXX assert that start and end are appropriately aligned
|
|
6162 |
for (next_addr = mr.start(), end_addr = mr.end();
|
|
6163 |
next_addr < end_addr; next_addr = last_addr) {
|
|
6164 |
MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
|
|
6165 |
last_addr = dirty_region.end();
|
|
6166 |
if (!dirty_region.is_empty()) {
|
|
6167 |
cl->do_MemRegion(dirty_region);
|
|
6168 |
} else {
|
|
6169 |
assert(last_addr == end_addr, "program logic");
|
|
6170 |
return;
|
|
6171 |
}
|
|
6172 |
}
|
|
6173 |
}
|
|
6174 |
|
|
6175 |
#ifndef PRODUCT
|
|
6176 |
void CMSBitMap::assert_locked() const {
|
|
6177 |
CMSLockVerifier::assert_locked(lock());
|
|
6178 |
}
|
|
6179 |
|
|
6180 |
bool CMSBitMap::covers(MemRegion mr) const {
|
|
6181 |
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
|
|
6182 |
assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
|
|
6183 |
"size inconsistency");
|
|
6184 |
return (mr.start() >= _bmStartWord) &&
|
|
6185 |
(mr.end() <= endWord());
|
|
6186 |
}
|
|
6187 |
|
|
6188 |
bool CMSBitMap::covers(HeapWord* start, size_t size) const {
|
|
6189 |
return (start >= _bmStartWord && (start + size) <= endWord());
|
|
6190 |
}
|
|
6191 |
|
|
6192 |
void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
|
|
6193 |
// verify that there are no 1 bits in the interval [left, right)
|
|
6194 |
FalseBitMapClosure falseBitMapClosure;
|
|
6195 |
iterate(&falseBitMapClosure, left, right);
|
|
6196 |
}
|
|
6197 |
|
|
6198 |
void CMSBitMap::region_invariant(MemRegion mr)
|
|
6199 |
{
|
|
6200 |
assert_locked();
|
|
6201 |
// mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
|
|
6202 |
assert(!mr.is_empty(), "unexpected empty region");
|
|
6203 |
assert(covers(mr), "mr should be covered by bit map");
|
|
6204 |
// convert address range into offset range
|
|
6205 |
size_t start_ofs = heapWordToOffset(mr.start());
|
|
6206 |
// Make sure that end() is appropriately aligned
|
|
6207 |
assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
|
|
6208 |
(1 << (_shifter+LogHeapWordSize))),
|
|
6209 |
"Misaligned mr.end()");
|
|
6210 |
size_t end_ofs = heapWordToOffset(mr.end());
|
|
6211 |
assert(end_ofs > start_ofs, "Should mark at least one bit");
|
|
6212 |
}
|
|
6213 |
|
|
6214 |
#endif
|
|
6215 |
|
|
6216 |
bool CMSMarkStack::allocate(size_t size) {
|
|
6217 |
// allocate a stack of the requisite depth
|
|
6218 |
ReservedSpace rs(ReservedSpace::allocation_align_size_up(
|
|
6219 |
size * sizeof(oop)));
|
|
6220 |
if (!rs.is_reserved()) {
|
|
6221 |
warning("CMSMarkStack allocation failure");
|
|
6222 |
return false;
|
|
6223 |
}
|
|
6224 |
if (!_virtual_space.initialize(rs, rs.size())) {
|
|
6225 |
warning("CMSMarkStack backing store failure");
|
|
6226 |
return false;
|
|
6227 |
}
|
|
6228 |
assert(_virtual_space.committed_size() == rs.size(),
|
|
6229 |
"didn't reserve backing store for all of CMS stack?");
|
|
6230 |
_base = (oop*)(_virtual_space.low());
|
|
6231 |
_index = 0;
|
|
6232 |
_capacity = size;
|
|
6233 |
NOT_PRODUCT(_max_depth = 0);
|
|
6234 |
return true;
|
|
6235 |
}
|
|
6236 |
|
|
6237 |
// XXX FIX ME !!! In the MT case we come in here holding a
|
|
6238 |
// leaf lock. For printing we need to take a further lock
|
|
6239 |
// which has lower rank. We need to recallibrate the two
|
|
6240 |
// lock-ranks involved in order to be able to rpint the
|
|
6241 |
// messages below. (Or defer the printing to the caller.
|
|
6242 |
// For now we take the expedient path of just disabling the
|
|
6243 |
// messages for the problematic case.)
|
|
6244 |
void CMSMarkStack::expand() {
|
|
6245 |
assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
|
|
6246 |
if (_capacity == CMSMarkStackSizeMax) {
|
|
6247 |
if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
|
|
6248 |
// We print a warning message only once per CMS cycle.
|
|
6249 |
gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
|
|
6250 |
}
|
|
6251 |
return;
|
|
6252 |
}
|
|
6253 |
// Double capacity if possible
|
|
6254 |
size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
|
|
6255 |
// Do not give up existing stack until we have managed to
|
|
6256 |
// get the double capacity that we desired.
|
|
6257 |
ReservedSpace rs(ReservedSpace::allocation_align_size_up(
|
|
6258 |
new_capacity * sizeof(oop)));
|
|
6259 |
if (rs.is_reserved()) {
|
|
6260 |
// Release the backing store associated with old stack
|
|
6261 |
_virtual_space.release();
|
|
6262 |
// Reinitialize virtual space for new stack
|
|
6263 |
if (!_virtual_space.initialize(rs, rs.size())) {
|
|
6264 |
fatal("Not enough swap for expanded marking stack");
|
|
6265 |
}
|
|
6266 |
_base = (oop*)(_virtual_space.low());
|
|
6267 |
_index = 0;
|
|
6268 |
_capacity = new_capacity;
|
|
6269 |
} else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
|
|
6270 |
// Failed to double capacity, continue;
|
|
6271 |
// we print a detail message only once per CMS cycle.
|
|
6272 |
gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
|
|
6273 |
SIZE_FORMAT"K",
|
|
6274 |
_capacity / K, new_capacity / K);
|
|
6275 |
}
|
|
6276 |
}
|
|
6277 |
|
|
6278 |
|
|
6279 |
// Closures
|
|
6280 |
// XXX: there seems to be a lot of code duplication here;
|
|
6281 |
// should refactor and consolidate common code.
|
|
6282 |
|
|
6283 |
// This closure is used to mark refs into the CMS generation in
|
|
6284 |
// the CMS bit map. Called at the first checkpoint. This closure
|
|
6285 |
// assumes that we do not need to re-mark dirty cards; if the CMS
|
|
6286 |
// generation on which this is used is not an oldest (modulo perm gen)
|
|
6287 |
// generation then this will lose younger_gen cards!
|
|
6288 |
|
|
6289 |
MarkRefsIntoClosure::MarkRefsIntoClosure(
|
|
6290 |
MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
|
|
6291 |
_span(span),
|
|
6292 |
_bitMap(bitMap),
|
|
6293 |
_should_do_nmethods(should_do_nmethods)
|
|
6294 |
{
|
|
6295 |
assert(_ref_processor == NULL, "deliberately left NULL");
|
|
6296 |
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
|
|
6297 |
}
|
|
6298 |
|
|
6299 |
void MarkRefsIntoClosure::do_oop(oop* p) {
|
|
6300 |
// if p points into _span, then mark corresponding bit in _markBitMap
|
|
6301 |
oop thisOop = *p;
|
|
6302 |
if (thisOop != NULL) {
|
|
6303 |
assert(thisOop->is_oop(), "expected an oop");
|
|
6304 |
HeapWord* addr = (HeapWord*)thisOop;
|
|
6305 |
if (_span.contains(addr)) {
|
|
6306 |
// this should be made more efficient
|
|
6307 |
_bitMap->mark(addr);
|
|
6308 |
}
|
|
6309 |
}
|
|
6310 |
}
|
|
6311 |
|
|
6312 |
// A variant of the above, used for CMS marking verification.
|
|
6313 |
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
|
|
6314 |
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
|
|
6315 |
bool should_do_nmethods):
|
|
6316 |
_span(span),
|
|
6317 |
_verification_bm(verification_bm),
|
|
6318 |
_cms_bm(cms_bm),
|
|
6319 |
_should_do_nmethods(should_do_nmethods) {
|
|
6320 |
assert(_ref_processor == NULL, "deliberately left NULL");
|
|
6321 |
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
|
|
6322 |
}
|
|
6323 |
|
|
6324 |
void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
|
|
6325 |
// if p points into _span, then mark corresponding bit in _markBitMap
|
|
6326 |
oop this_oop = *p;
|
|
6327 |
if (this_oop != NULL) {
|
|
6328 |
assert(this_oop->is_oop(), "expected an oop");
|
|
6329 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
6330 |
if (_span.contains(addr)) {
|
|
6331 |
_verification_bm->mark(addr);
|
|
6332 |
if (!_cms_bm->isMarked(addr)) {
|
|
6333 |
oop(addr)->print();
|
|
6334 |
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
|
|
6335 |
fatal("... aborting");
|
|
6336 |
}
|
|
6337 |
}
|
|
6338 |
}
|
|
6339 |
}
|
|
6340 |
|
|
6341 |
//////////////////////////////////////////////////
|
|
6342 |
// MarkRefsIntoAndScanClosure
|
|
6343 |
//////////////////////////////////////////////////
|
|
6344 |
|
|
6345 |
MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
|
|
6346 |
ReferenceProcessor* rp,
|
|
6347 |
CMSBitMap* bit_map,
|
|
6348 |
CMSBitMap* mod_union_table,
|
|
6349 |
CMSMarkStack* mark_stack,
|
|
6350 |
CMSMarkStack* revisit_stack,
|
|
6351 |
CMSCollector* collector,
|
|
6352 |
bool should_yield,
|
|
6353 |
bool concurrent_precleaning):
|
|
6354 |
_collector(collector),
|
|
6355 |
_span(span),
|
|
6356 |
_bit_map(bit_map),
|
|
6357 |
_mark_stack(mark_stack),
|
|
6358 |
_pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
|
|
6359 |
mark_stack, revisit_stack, concurrent_precleaning),
|
|
6360 |
_yield(should_yield),
|
|
6361 |
_concurrent_precleaning(concurrent_precleaning),
|
|
6362 |
_freelistLock(NULL)
|
|
6363 |
{
|
|
6364 |
_ref_processor = rp;
|
|
6365 |
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
|
6366 |
}
|
|
6367 |
|
|
6368 |
// This closure is used to mark refs into the CMS generation at the
|
|
6369 |
// second (final) checkpoint, and to scan and transitively follow
|
|
6370 |
// the unmarked oops. It is also used during the concurrent precleaning
|
|
6371 |
// phase while scanning objects on dirty cards in the CMS generation.
|
|
6372 |
// The marks are made in the marking bit map and the marking stack is
|
|
6373 |
// used for keeping the (newly) grey objects during the scan.
|
|
6374 |
// The parallel version (Par_...) appears further below.
|
|
6375 |
void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|
6376 |
oop this_oop = *p;
|
|
6377 |
if (this_oop != NULL) {
|
|
6378 |
assert(this_oop->is_oop(), "expected an oop");
|
|
6379 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
6380 |
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
|
|
6381 |
assert(_collector->overflow_list_is_empty(), "should be empty");
|
|
6382 |
if (_span.contains(addr) &&
|
|
6383 |
!_bit_map->isMarked(addr)) {
|
|
6384 |
// mark bit map (object is now grey)
|
|
6385 |
_bit_map->mark(addr);
|
|
6386 |
// push on marking stack (stack should be empty), and drain the
|
|
6387 |
// stack by applying this closure to the oops in the oops popped
|
|
6388 |
// from the stack (i.e. blacken the grey objects)
|
|
6389 |
bool res = _mark_stack->push(this_oop);
|
|
6390 |
assert(res, "Should have space to push on empty stack");
|
|
6391 |
do {
|
|
6392 |
oop new_oop = _mark_stack->pop();
|
|
6393 |
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
|
|
6394 |
assert(new_oop->is_parsable(), "Found unparsable oop");
|
|
6395 |
assert(_bit_map->isMarked((HeapWord*)new_oop),
|
|
6396 |
"only grey objects on this stack");
|
|
6397 |
// iterate over the oops in this oop, marking and pushing
|
|
6398 |
// the ones in CMS heap (i.e. in _span).
|
|
6399 |
new_oop->oop_iterate(&_pushAndMarkClosure);
|
|
6400 |
// check if it's time to yield
|
|
6401 |
do_yield_check();
|
|
6402 |
} while (!_mark_stack->isEmpty() ||
|
|
6403 |
(!_concurrent_precleaning && take_from_overflow_list()));
|
|
6404 |
// if marking stack is empty, and we are not doing this
|
|
6405 |
// during precleaning, then check the overflow list
|
|
6406 |
}
|
|
6407 |
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
|
|
6408 |
assert(_collector->overflow_list_is_empty(),
|
|
6409 |
"overflow list was drained above");
|
|
6410 |
// We could restore evacuated mark words, if any, used for
|
|
6411 |
// overflow list links here because the overflow list is
|
|
6412 |
// provably empty here. That would reduce the maximum
|
|
6413 |
// size requirements for preserved_{oop,mark}_stack.
|
|
6414 |
// But we'll just postpone it until we are all done
|
|
6415 |
// so we can just stream through.
|
|
6416 |
if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
|
|
6417 |
_collector->restore_preserved_marks_if_any();
|
|
6418 |
assert(_collector->no_preserved_marks(), "No preserved marks");
|
|
6419 |
}
|
|
6420 |
assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
|
|
6421 |
"All preserved marks should have been restored above");
|
|
6422 |
}
|
|
6423 |
}
|
|
6424 |
|
|
6425 |
void MarkRefsIntoAndScanClosure::do_yield_work() {
|
|
6426 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
6427 |
"CMS thread should hold CMS token");
|
|
6428 |
assert_lock_strong(_freelistLock);
|
|
6429 |
assert_lock_strong(_bit_map->lock());
|
|
6430 |
// relinquish the free_list_lock and bitMaplock()
|
|
6431 |
_bit_map->lock()->unlock();
|
|
6432 |
_freelistLock->unlock();
|
|
6433 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
6434 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6435 |
_collector->stopTimer();
|
|
6436 |
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
6437 |
if (PrintCMSStatistics != 0) {
|
|
6438 |
_collector->incrementYields();
|
|
6439 |
}
|
|
6440 |
_collector->icms_wait();
|
|
6441 |
|
|
6442 |
// See the comment in coordinator_yield()
|
|
6443 |
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
|
6444 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
6445 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
6446 |
os::sleep(Thread::current(), 1, false);
|
|
6447 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6448 |
}
|
|
6449 |
|
|
6450 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
6451 |
_freelistLock->lock_without_safepoint_check();
|
|
6452 |
_bit_map->lock()->lock_without_safepoint_check();
|
|
6453 |
_collector->startTimer();
|
|
6454 |
}
|
|
6455 |
|
|
6456 |
///////////////////////////////////////////////////////////
|
|
6457 |
// Par_MarkRefsIntoAndScanClosure: a parallel version of
|
|
6458 |
// MarkRefsIntoAndScanClosure
|
|
6459 |
///////////////////////////////////////////////////////////
|
|
6460 |
Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
|
|
6461 |
CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
|
|
6462 |
CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
|
|
6463 |
_span(span),
|
|
6464 |
_bit_map(bit_map),
|
|
6465 |
_work_queue(work_queue),
|
|
6466 |
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
|
|
6467 |
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
|
|
6468 |
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
|
|
6469 |
revisit_stack)
|
|
6470 |
{
|
|
6471 |
_ref_processor = rp;
|
|
6472 |
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
|
6473 |
}
|
|
6474 |
|
|
6475 |
// This closure is used to mark refs into the CMS generation at the
|
|
6476 |
// second (final) checkpoint, and to scan and transitively follow
|
|
6477 |
// the unmarked oops. The marks are made in the marking bit map and
|
|
6478 |
// the work_queue is used for keeping the (newly) grey objects during
|
|
6479 |
// the scan phase whence they are also available for stealing by parallel
|
|
6480 |
// threads. Since the marking bit map is shared, updates are
|
|
6481 |
// synchronized (via CAS).
|
|
6482 |
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|
6483 |
oop this_oop = *p;
|
|
6484 |
if (this_oop != NULL) {
|
|
6485 |
// Ignore mark word because this could be an already marked oop
|
|
6486 |
// that may be chained at the end of the overflow list.
|
|
6487 |
assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
|
|
6488 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
6489 |
if (_span.contains(addr) &&
|
|
6490 |
!_bit_map->isMarked(addr)) {
|
|
6491 |
// mark bit map (object will become grey):
|
|
6492 |
// It is possible for several threads to be
|
|
6493 |
// trying to "claim" this object concurrently;
|
|
6494 |
// the unique thread that succeeds in marking the
|
|
6495 |
// object first will do the subsequent push on
|
|
6496 |
// to the work queue (or overflow list).
|
|
6497 |
if (_bit_map->par_mark(addr)) {
|
|
6498 |
// push on work_queue (which may not be empty), and trim the
|
|
6499 |
// queue to an appropriate length by applying this closure to
|
|
6500 |
// the oops in the oops popped from the stack (i.e. blacken the
|
|
6501 |
// grey objects)
|
|
6502 |
bool res = _work_queue->push(this_oop);
|
|
6503 |
assert(res, "Low water mark should be less than capacity?");
|
|
6504 |
trim_queue(_low_water_mark);
|
|
6505 |
} // Else, another thread claimed the object
|
|
6506 |
}
|
|
6507 |
}
|
|
6508 |
}
|
|
6509 |
|
|
6510 |
// This closure is used to rescan the marked objects on the dirty cards
|
|
6511 |
// in the mod union table and the card table proper.
|
|
6512 |
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
|
6513 |
oop p, MemRegion mr) {
|
|
6514 |
|
|
6515 |
size_t size = 0;
|
|
6516 |
HeapWord* addr = (HeapWord*)p;
|
|
6517 |
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
|
|
6518 |
assert(_span.contains(addr), "we are scanning the CMS generation");
|
|
6519 |
// check if it's time to yield
|
|
6520 |
if (do_yield_check()) {
|
|
6521 |
// We yielded for some foreground stop-world work,
|
|
6522 |
// and we have been asked to abort this ongoing preclean cycle.
|
|
6523 |
return 0;
|
|
6524 |
}
|
|
6525 |
if (_bitMap->isMarked(addr)) {
|
|
6526 |
// it's marked; is it potentially uninitialized?
|
|
6527 |
if (p->klass() != NULL) {
|
|
6528 |
if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
|
|
6529 |
// Signal precleaning to redirty the card since
|
|
6530 |
// the klass pointer is already installed.
|
|
6531 |
assert(size == 0, "Initial value");
|
|
6532 |
} else {
|
|
6533 |
assert(p->is_parsable(), "must be parsable.");
|
|
6534 |
// an initialized object; ignore mark word in verification below
|
|
6535 |
// since we are running concurrent with mutators
|
|
6536 |
assert(p->is_oop(true), "should be an oop");
|
|
6537 |
if (p->is_objArray()) {
|
|
6538 |
// objArrays are precisely marked; restrict scanning
|
|
6539 |
// to dirty cards only.
|
|
6540 |
size = p->oop_iterate(_scanningClosure, mr);
|
|
6541 |
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
|
|
6542 |
"adjustObjectSize should be the identity for array sizes, "
|
|
6543 |
"which are necessarily larger than minimum object size of "
|
|
6544 |
"two heap words");
|
|
6545 |
} else {
|
|
6546 |
// A non-array may have been imprecisely marked; we need
|
|
6547 |
// to scan object in its entirety.
|
|
6548 |
size = CompactibleFreeListSpace::adjustObjectSize(
|
|
6549 |
p->oop_iterate(_scanningClosure));
|
|
6550 |
}
|
|
6551 |
#ifdef DEBUG
|
|
6552 |
size_t direct_size =
|
|
6553 |
CompactibleFreeListSpace::adjustObjectSize(p->size());
|
|
6554 |
assert(size == direct_size, "Inconsistency in size");
|
|
6555 |
assert(size >= 3, "Necessary for Printezis marks to work");
|
|
6556 |
if (!_bitMap->isMarked(addr+1)) {
|
|
6557 |
_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
|
|
6558 |
} else {
|
|
6559 |
_bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
|
|
6560 |
assert(_bitMap->isMarked(addr+size-1),
|
|
6561 |
"inconsistent Printezis mark");
|
|
6562 |
}
|
|
6563 |
#endif // DEBUG
|
|
6564 |
}
|
|
6565 |
} else {
|
|
6566 |
// an unitialized object
|
|
6567 |
assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
|
|
6568 |
HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
|
|
6569 |
size = pointer_delta(nextOneAddr + 1, addr);
|
|
6570 |
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
|
|
6571 |
"alignment problem");
|
|
6572 |
// Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
|
|
6573 |
// will dirty the card when the klass pointer is installed in the
|
|
6574 |
// object (signalling the completion of initialization).
|
|
6575 |
}
|
|
6576 |
} else {
|
|
6577 |
// Either a not yet marked object or an uninitialized object
|
|
6578 |
if (p->klass() == NULL || !p->is_parsable()) {
|
|
6579 |
// An uninitialized object, skip to the next card, since
|
|
6580 |
// we may not be able to read its P-bits yet.
|
|
6581 |
assert(size == 0, "Initial value");
|
|
6582 |
} else {
|
|
6583 |
// An object not (yet) reached by marking: we merely need to
|
|
6584 |
// compute its size so as to go look at the next block.
|
|
6585 |
assert(p->is_oop(true), "should be an oop");
|
|
6586 |
size = CompactibleFreeListSpace::adjustObjectSize(p->size());
|
|
6587 |
}
|
|
6588 |
}
|
|
6589 |
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
|
|
6590 |
return size;
|
|
6591 |
}
|
|
6592 |
|
|
6593 |
void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
|
|
6594 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
6595 |
"CMS thread should hold CMS token");
|
|
6596 |
assert_lock_strong(_freelistLock);
|
|
6597 |
assert_lock_strong(_bitMap->lock());
|
|
6598 |
// relinquish the free_list_lock and bitMaplock()
|
|
6599 |
_bitMap->lock()->unlock();
|
|
6600 |
_freelistLock->unlock();
|
|
6601 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
6602 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6603 |
_collector->stopTimer();
|
|
6604 |
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
6605 |
if (PrintCMSStatistics != 0) {
|
|
6606 |
_collector->incrementYields();
|
|
6607 |
}
|
|
6608 |
_collector->icms_wait();
|
|
6609 |
|
|
6610 |
// See the comment in coordinator_yield()
|
|
6611 |
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
|
6612 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
6613 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
6614 |
os::sleep(Thread::current(), 1, false);
|
|
6615 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6616 |
}
|
|
6617 |
|
|
6618 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
6619 |
_freelistLock->lock_without_safepoint_check();
|
|
6620 |
_bitMap->lock()->lock_without_safepoint_check();
|
|
6621 |
_collector->startTimer();
|
|
6622 |
}
|
|
6623 |
|
|
6624 |
|
|
6625 |
//////////////////////////////////////////////////////////////////
|
|
6626 |
// SurvivorSpacePrecleanClosure
|
|
6627 |
//////////////////////////////////////////////////////////////////
|
|
6628 |
// This (single-threaded) closure is used to preclean the oops in
|
|
6629 |
// the survivor spaces.
|
|
6630 |
size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
|
|
6631 |
|
|
6632 |
HeapWord* addr = (HeapWord*)p;
|
|
6633 |
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
|
|
6634 |
assert(!_span.contains(addr), "we are scanning the survivor spaces");
|
|
6635 |
assert(p->klass() != NULL, "object should be initializd");
|
|
6636 |
assert(p->is_parsable(), "must be parsable.");
|
|
6637 |
// an initialized object; ignore mark word in verification below
|
|
6638 |
// since we are running concurrent with mutators
|
|
6639 |
assert(p->is_oop(true), "should be an oop");
|
|
6640 |
// Note that we do not yield while we iterate over
|
|
6641 |
// the interior oops of p, pushing the relevant ones
|
|
6642 |
// on our marking stack.
|
|
6643 |
size_t size = p->oop_iterate(_scanning_closure);
|
|
6644 |
do_yield_check();
|
|
6645 |
// Observe that below, we do not abandon the preclean
|
|
6646 |
// phase as soon as we should; rather we empty the
|
|
6647 |
// marking stack before returning. This is to satisfy
|
|
6648 |
// some existing assertions. In general, it may be a
|
|
6649 |
// good idea to abort immediately and complete the marking
|
|
6650 |
// from the grey objects at a later time.
|
|
6651 |
while (!_mark_stack->isEmpty()) {
|
|
6652 |
oop new_oop = _mark_stack->pop();
|
|
6653 |
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
|
|
6654 |
assert(new_oop->is_parsable(), "Found unparsable oop");
|
|
6655 |
assert(_bit_map->isMarked((HeapWord*)new_oop),
|
|
6656 |
"only grey objects on this stack");
|
|
6657 |
// iterate over the oops in this oop, marking and pushing
|
|
6658 |
// the ones in CMS heap (i.e. in _span).
|
|
6659 |
new_oop->oop_iterate(_scanning_closure);
|
|
6660 |
// check if it's time to yield
|
|
6661 |
do_yield_check();
|
|
6662 |
}
|
|
6663 |
unsigned int after_count =
|
|
6664 |
GenCollectedHeap::heap()->total_collections();
|
|
6665 |
bool abort = (_before_count != after_count) ||
|
|
6666 |
_collector->should_abort_preclean();
|
|
6667 |
return abort ? 0 : size;
|
|
6668 |
}
|
|
6669 |
|
|
6670 |
void SurvivorSpacePrecleanClosure::do_yield_work() {
|
|
6671 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
6672 |
"CMS thread should hold CMS token");
|
|
6673 |
assert_lock_strong(_bit_map->lock());
|
|
6674 |
// Relinquish the bit map lock
|
|
6675 |
_bit_map->lock()->unlock();
|
|
6676 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
6677 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6678 |
_collector->stopTimer();
|
|
6679 |
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
6680 |
if (PrintCMSStatistics != 0) {
|
|
6681 |
_collector->incrementYields();
|
|
6682 |
}
|
|
6683 |
_collector->icms_wait();
|
|
6684 |
|
|
6685 |
// See the comment in coordinator_yield()
|
|
6686 |
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
|
6687 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
6688 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
6689 |
os::sleep(Thread::current(), 1, false);
|
|
6690 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6691 |
}
|
|
6692 |
|
|
6693 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
6694 |
_bit_map->lock()->lock_without_safepoint_check();
|
|
6695 |
_collector->startTimer();
|
|
6696 |
}
|
|
6697 |
|
|
6698 |
// This closure is used to rescan the marked objects on the dirty cards
|
|
6699 |
// in the mod union table and the card table proper. In the parallel
|
|
6700 |
// case, although the bitMap is shared, we do a single read so the
|
|
6701 |
// isMarked() query is "safe".
|
|
6702 |
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
|
|
6703 |
// Ignore mark word because we are running concurrent with mutators
|
|
6704 |
assert(p->is_oop_or_null(true), "expected an oop or null");
|
|
6705 |
HeapWord* addr = (HeapWord*)p;
|
|
6706 |
assert(_span.contains(addr), "we are scanning the CMS generation");
|
|
6707 |
bool is_obj_array = false;
|
|
6708 |
#ifdef DEBUG
|
|
6709 |
if (!_parallel) {
|
|
6710 |
assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
|
|
6711 |
assert(_collector->overflow_list_is_empty(),
|
|
6712 |
"overflow list should be empty");
|
|
6713 |
|
|
6714 |
}
|
|
6715 |
#endif // DEBUG
|
|
6716 |
if (_bit_map->isMarked(addr)) {
|
|
6717 |
// Obj arrays are precisely marked, non-arrays are not;
|
|
6718 |
// so we scan objArrays precisely and non-arrays in their
|
|
6719 |
// entirety.
|
|
6720 |
if (p->is_objArray()) {
|
|
6721 |
is_obj_array = true;
|
|
6722 |
if (_parallel) {
|
|
6723 |
p->oop_iterate(_par_scan_closure, mr);
|
|
6724 |
} else {
|
|
6725 |
p->oop_iterate(_scan_closure, mr);
|
|
6726 |
}
|
|
6727 |
} else {
|
|
6728 |
if (_parallel) {
|
|
6729 |
p->oop_iterate(_par_scan_closure);
|
|
6730 |
} else {
|
|
6731 |
p->oop_iterate(_scan_closure);
|
|
6732 |
}
|
|
6733 |
}
|
|
6734 |
}
|
|
6735 |
#ifdef DEBUG
|
|
6736 |
if (!_parallel) {
|
|
6737 |
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
|
|
6738 |
assert(_collector->overflow_list_is_empty(),
|
|
6739 |
"overflow list should be empty");
|
|
6740 |
|
|
6741 |
}
|
|
6742 |
#endif // DEBUG
|
|
6743 |
return is_obj_array;
|
|
6744 |
}
|
|
6745 |
|
|
6746 |
MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
|
|
6747 |
MemRegion span,
|
|
6748 |
CMSBitMap* bitMap, CMSMarkStack* markStack,
|
|
6749 |
CMSMarkStack* revisitStack,
|
|
6750 |
bool should_yield, bool verifying):
|
|
6751 |
_collector(collector),
|
|
6752 |
_span(span),
|
|
6753 |
_bitMap(bitMap),
|
|
6754 |
_mut(&collector->_modUnionTable),
|
|
6755 |
_markStack(markStack),
|
|
6756 |
_revisitStack(revisitStack),
|
|
6757 |
_yield(should_yield),
|
|
6758 |
_skipBits(0)
|
|
6759 |
{
|
|
6760 |
assert(_markStack->isEmpty(), "stack should be empty");
|
|
6761 |
_finger = _bitMap->startWord();
|
|
6762 |
_threshold = _finger;
|
|
6763 |
assert(_collector->_restart_addr == NULL, "Sanity check");
|
|
6764 |
assert(_span.contains(_finger), "Out of bounds _finger?");
|
|
6765 |
DEBUG_ONLY(_verifying = verifying;)
|
|
6766 |
}
|
|
6767 |
|
|
6768 |
void MarkFromRootsClosure::reset(HeapWord* addr) {
|
|
6769 |
assert(_markStack->isEmpty(), "would cause duplicates on stack");
|
|
6770 |
assert(_span.contains(addr), "Out of bounds _finger?");
|
|
6771 |
_finger = addr;
|
|
6772 |
_threshold = (HeapWord*)round_to(
|
|
6773 |
(intptr_t)_finger, CardTableModRefBS::card_size);
|
|
6774 |
}
|
|
6775 |
|
|
6776 |
// Should revisit to see if this should be restructured for
|
|
6777 |
// greater efficiency.
|
|
6778 |
void MarkFromRootsClosure::do_bit(size_t offset) {
|
|
6779 |
if (_skipBits > 0) {
|
|
6780 |
_skipBits--;
|
|
6781 |
return;
|
|
6782 |
}
|
|
6783 |
// convert offset into a HeapWord*
|
|
6784 |
HeapWord* addr = _bitMap->startWord() + offset;
|
|
6785 |
assert(_bitMap->endWord() && addr < _bitMap->endWord(),
|
|
6786 |
"address out of range");
|
|
6787 |
assert(_bitMap->isMarked(addr), "tautology");
|
|
6788 |
if (_bitMap->isMarked(addr+1)) {
|
|
6789 |
// this is an allocated but not yet initialized object
|
|
6790 |
assert(_skipBits == 0, "tautology");
|
|
6791 |
_skipBits = 2; // skip next two marked bits ("Printezis-marks")
|
|
6792 |
oop p = oop(addr);
|
|
6793 |
if (p->klass() == NULL || !p->is_parsable()) {
|
|
6794 |
DEBUG_ONLY(if (!_verifying) {)
|
|
6795 |
// We re-dirty the cards on which this object lies and increase
|
|
6796 |
// the _threshold so that we'll come back to scan this object
|
|
6797 |
// during the preclean or remark phase. (CMSCleanOnEnter)
|
|
6798 |
if (CMSCleanOnEnter) {
|
|
6799 |
size_t sz = _collector->block_size_using_printezis_bits(addr);
|
|
6800 |
HeapWord* start_card_addr = (HeapWord*)round_down(
|
|
6801 |
(intptr_t)addr, CardTableModRefBS::card_size);
|
|
6802 |
HeapWord* end_card_addr = (HeapWord*)round_to(
|
|
6803 |
(intptr_t)(addr+sz), CardTableModRefBS::card_size);
|
|
6804 |
MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr);
|
|
6805 |
assert(!redirty_range.is_empty(), "Arithmetical tautology");
|
|
6806 |
// Bump _threshold to end_card_addr; note that
|
|
6807 |
// _threshold cannot possibly exceed end_card_addr, anyhow.
|
|
6808 |
// This prevents future clearing of the card as the scan proceeds
|
|
6809 |
// to the right.
|
|
6810 |
assert(_threshold <= end_card_addr,
|
|
6811 |
"Because we are just scanning into this object");
|
|
6812 |
if (_threshold < end_card_addr) {
|
|
6813 |
_threshold = end_card_addr;
|
|
6814 |
}
|
|
6815 |
if (p->klass() != NULL) {
|
|
6816 |
// Redirty the range of cards...
|
|
6817 |
_mut->mark_range(redirty_range);
|
|
6818 |
} // ...else the setting of klass will dirty the card anyway.
|
|
6819 |
}
|
|
6820 |
DEBUG_ONLY(})
|
|
6821 |
return;
|
|
6822 |
}
|
|
6823 |
}
|
|
6824 |
scanOopsInOop(addr);
|
|
6825 |
}
|
|
6826 |
|
|
6827 |
// We take a break if we've been at this for a while,
|
|
6828 |
// so as to avoid monopolizing the locks involved.
|
|
6829 |
void MarkFromRootsClosure::do_yield_work() {
|
|
6830 |
// First give up the locks, then yield, then re-lock
|
|
6831 |
// We should probably use a constructor/destructor idiom to
|
|
6832 |
// do this unlock/lock or modify the MutexUnlocker class to
|
|
6833 |
// serve our purpose. XXX
|
|
6834 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
6835 |
"CMS thread should hold CMS token");
|
|
6836 |
assert_lock_strong(_bitMap->lock());
|
|
6837 |
_bitMap->lock()->unlock();
|
|
6838 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
6839 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6840 |
_collector->stopTimer();
|
|
6841 |
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
6842 |
if (PrintCMSStatistics != 0) {
|
|
6843 |
_collector->incrementYields();
|
|
6844 |
}
|
|
6845 |
_collector->icms_wait();
|
|
6846 |
|
|
6847 |
// See the comment in coordinator_yield()
|
|
6848 |
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
|
6849 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
6850 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
6851 |
os::sleep(Thread::current(), 1, false);
|
|
6852 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
6853 |
}
|
|
6854 |
|
|
6855 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
6856 |
_bitMap->lock()->lock_without_safepoint_check();
|
|
6857 |
_collector->startTimer();
|
|
6858 |
}
|
|
6859 |
|
|
6860 |
void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
|
6861 |
assert(_bitMap->isMarked(ptr), "expected bit to be set");
|
|
6862 |
assert(_markStack->isEmpty(),
|
|
6863 |
"should drain stack to limit stack usage");
|
|
6864 |
// convert ptr to an oop preparatory to scanning
|
|
6865 |
oop this_oop = oop(ptr);
|
|
6866 |
// Ignore mark word in verification below, since we
|
|
6867 |
// may be running concurrent with mutators.
|
|
6868 |
assert(this_oop->is_oop(true), "should be an oop");
|
|
6869 |
assert(_finger <= ptr, "_finger runneth ahead");
|
|
6870 |
// advance the finger to right end of this object
|
|
6871 |
_finger = ptr + this_oop->size();
|
|
6872 |
assert(_finger > ptr, "we just incremented it above");
|
|
6873 |
// On large heaps, it may take us some time to get through
|
|
6874 |
// the marking phase (especially if running iCMS). During
|
|
6875 |
// this time it's possible that a lot of mutations have
|
|
6876 |
// accumulated in the card table and the mod union table --
|
|
6877 |
// these mutation records are redundant until we have
|
|
6878 |
// actually traced into the corresponding card.
|
|
6879 |
// Here, we check whether advancing the finger would make
|
|
6880 |
// us cross into a new card, and if so clear corresponding
|
|
6881 |
// cards in the MUT (preclean them in the card-table in the
|
|
6882 |
// future).
|
|
6883 |
|
|
6884 |
DEBUG_ONLY(if (!_verifying) {)
|
|
6885 |
// The clean-on-enter optimization is disabled by default,
|
|
6886 |
// until we fix 6178663.
|
|
6887 |
if (CMSCleanOnEnter && (_finger > _threshold)) {
|
|
6888 |
// [_threshold, _finger) represents the interval
|
|
6889 |
// of cards to be cleared in MUT (or precleaned in card table).
|
|
6890 |
// The set of cards to be cleared is all those that overlap
|
|
6891 |
// with the interval [_threshold, _finger); note that
|
|
6892 |
// _threshold is always kept card-aligned but _finger isn't
|
|
6893 |
// always card-aligned.
|
|
6894 |
HeapWord* old_threshold = _threshold;
|
|
6895 |
assert(old_threshold == (HeapWord*)round_to(
|
|
6896 |
(intptr_t)old_threshold, CardTableModRefBS::card_size),
|
|
6897 |
"_threshold should always be card-aligned");
|
|
6898 |
_threshold = (HeapWord*)round_to(
|
|
6899 |
(intptr_t)_finger, CardTableModRefBS::card_size);
|
|
6900 |
MemRegion mr(old_threshold, _threshold);
|
|
6901 |
assert(!mr.is_empty(), "Control point invariant");
|
|
6902 |
assert(_span.contains(mr), "Should clear within span");
|
|
6903 |
// XXX When _finger crosses from old gen into perm gen
|
|
6904 |
// we may be doing unnecessary cleaning; do better in the
|
|
6905 |
// future by detecting that condition and clearing fewer
|
|
6906 |
// MUT/CT entries.
|
|
6907 |
_mut->clear_range(mr);
|
|
6908 |
}
|
|
6909 |
DEBUG_ONLY(})
|
|
6910 |
|
|
6911 |
// Note: the finger doesn't advance while we drain
|
|
6912 |
// the stack below.
|
|
6913 |
PushOrMarkClosure pushOrMarkClosure(_collector,
|
|
6914 |
_span, _bitMap, _markStack,
|
|
6915 |
_revisitStack,
|
|
6916 |
_finger, this);
|
|
6917 |
bool res = _markStack->push(this_oop);
|
|
6918 |
assert(res, "Empty non-zero size stack should have space for single push");
|
|
6919 |
while (!_markStack->isEmpty()) {
|
|
6920 |
oop new_oop = _markStack->pop();
|
|
6921 |
// Skip verifying header mark word below because we are
|
|
6922 |
// running concurrent with mutators.
|
|
6923 |
assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
|
|
6924 |
// now scan this oop's oops
|
|
6925 |
new_oop->oop_iterate(&pushOrMarkClosure);
|
|
6926 |
do_yield_check();
|
|
6927 |
}
|
|
6928 |
assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
|
|
6929 |
}
|
|
6930 |
|
|
6931 |
Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
|
|
6932 |
CMSCollector* collector, MemRegion span,
|
|
6933 |
CMSBitMap* bit_map,
|
|
6934 |
OopTaskQueue* work_queue,
|
|
6935 |
CMSMarkStack* overflow_stack,
|
|
6936 |
CMSMarkStack* revisit_stack,
|
|
6937 |
bool should_yield):
|
|
6938 |
_collector(collector),
|
|
6939 |
_whole_span(collector->_span),
|
|
6940 |
_span(span),
|
|
6941 |
_bit_map(bit_map),
|
|
6942 |
_mut(&collector->_modUnionTable),
|
|
6943 |
_work_queue(work_queue),
|
|
6944 |
_overflow_stack(overflow_stack),
|
|
6945 |
_revisit_stack(revisit_stack),
|
|
6946 |
_yield(should_yield),
|
|
6947 |
_skip_bits(0),
|
|
6948 |
_task(task)
|
|
6949 |
{
|
|
6950 |
assert(_work_queue->size() == 0, "work_queue should be empty");
|
|
6951 |
_finger = span.start();
|
|
6952 |
_threshold = _finger; // XXX Defer clear-on-enter optimization for now
|
|
6953 |
assert(_span.contains(_finger), "Out of bounds _finger?");
|
|
6954 |
}
|
|
6955 |
|
|
6956 |
// Should revisit to see if this should be restructured for
|
|
6957 |
// greater efficiency.
|
|
6958 |
void Par_MarkFromRootsClosure::do_bit(size_t offset) {
|
|
6959 |
if (_skip_bits > 0) {
|
|
6960 |
_skip_bits--;
|
|
6961 |
return;
|
|
6962 |
}
|
|
6963 |
// convert offset into a HeapWord*
|
|
6964 |
HeapWord* addr = _bit_map->startWord() + offset;
|
|
6965 |
assert(_bit_map->endWord() && addr < _bit_map->endWord(),
|
|
6966 |
"address out of range");
|
|
6967 |
assert(_bit_map->isMarked(addr), "tautology");
|
|
6968 |
if (_bit_map->isMarked(addr+1)) {
|
|
6969 |
// this is an allocated object that might not yet be initialized
|
|
6970 |
assert(_skip_bits == 0, "tautology");
|
|
6971 |
_skip_bits = 2; // skip next two marked bits ("Printezis-marks")
|
|
6972 |
oop p = oop(addr);
|
|
6973 |
if (p->klass() == NULL || !p->is_parsable()) {
|
|
6974 |
// in the case of Clean-on-Enter optimization, redirty card
|
|
6975 |
// and avoid clearing card by increasing the threshold.
|
|
6976 |
return;
|
|
6977 |
}
|
|
6978 |
}
|
|
6979 |
scan_oops_in_oop(addr);
|
|
6980 |
}
|
|
6981 |
|
|
6982 |
void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
|
|
6983 |
assert(_bit_map->isMarked(ptr), "expected bit to be set");
|
|
6984 |
// Should we assert that our work queue is empty or
|
|
6985 |
// below some drain limit?
|
|
6986 |
assert(_work_queue->size() == 0,
|
|
6987 |
"should drain stack to limit stack usage");
|
|
6988 |
// convert ptr to an oop preparatory to scanning
|
|
6989 |
oop this_oop = oop(ptr);
|
|
6990 |
// Ignore mark word in verification below, since we
|
|
6991 |
// may be running concurrent with mutators.
|
|
6992 |
assert(this_oop->is_oop(true), "should be an oop");
|
|
6993 |
assert(_finger <= ptr, "_finger runneth ahead");
|
|
6994 |
// advance the finger to right end of this object
|
|
6995 |
_finger = ptr + this_oop->size();
|
|
6996 |
assert(_finger > ptr, "we just incremented it above");
|
|
6997 |
// On large heaps, it may take us some time to get through
|
|
6998 |
// the marking phase (especially if running iCMS). During
|
|
6999 |
// this time it's possible that a lot of mutations have
|
|
7000 |
// accumulated in the card table and the mod union table --
|
|
7001 |
// these mutation records are redundant until we have
|
|
7002 |
// actually traced into the corresponding card.
|
|
7003 |
// Here, we check whether advancing the finger would make
|
|
7004 |
// us cross into a new card, and if so clear corresponding
|
|
7005 |
// cards in the MUT (preclean them in the card-table in the
|
|
7006 |
// future).
|
|
7007 |
|
|
7008 |
// The clean-on-enter optimization is disabled by default,
|
|
7009 |
// until we fix 6178663.
|
|
7010 |
if (CMSCleanOnEnter && (_finger > _threshold)) {
|
|
7011 |
// [_threshold, _finger) represents the interval
|
|
7012 |
// of cards to be cleared in MUT (or precleaned in card table).
|
|
7013 |
// The set of cards to be cleared is all those that overlap
|
|
7014 |
// with the interval [_threshold, _finger); note that
|
|
7015 |
// _threshold is always kept card-aligned but _finger isn't
|
|
7016 |
// always card-aligned.
|
|
7017 |
HeapWord* old_threshold = _threshold;
|
|
7018 |
assert(old_threshold == (HeapWord*)round_to(
|
|
7019 |
(intptr_t)old_threshold, CardTableModRefBS::card_size),
|
|
7020 |
"_threshold should always be card-aligned");
|
|
7021 |
_threshold = (HeapWord*)round_to(
|
|
7022 |
(intptr_t)_finger, CardTableModRefBS::card_size);
|
|
7023 |
MemRegion mr(old_threshold, _threshold);
|
|
7024 |
assert(!mr.is_empty(), "Control point invariant");
|
|
7025 |
assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
|
|
7026 |
// XXX When _finger crosses from old gen into perm gen
|
|
7027 |
// we may be doing unnecessary cleaning; do better in the
|
|
7028 |
// future by detecting that condition and clearing fewer
|
|
7029 |
// MUT/CT entries.
|
|
7030 |
_mut->clear_range(mr);
|
|
7031 |
}
|
|
7032 |
|
|
7033 |
// Note: the local finger doesn't advance while we drain
|
|
7034 |
// the stack below, but the global finger sure can and will.
|
|
7035 |
HeapWord** gfa = _task->global_finger_addr();
|
|
7036 |
Par_PushOrMarkClosure pushOrMarkClosure(_collector,
|
|
7037 |
_span, _bit_map,
|
|
7038 |
_work_queue,
|
|
7039 |
_overflow_stack,
|
|
7040 |
_revisit_stack,
|
|
7041 |
_finger,
|
|
7042 |
gfa, this);
|
|
7043 |
bool res = _work_queue->push(this_oop); // overflow could occur here
|
|
7044 |
assert(res, "Will hold once we use workqueues");
|
|
7045 |
while (true) {
|
|
7046 |
oop new_oop;
|
|
7047 |
if (!_work_queue->pop_local(new_oop)) {
|
|
7048 |
// We emptied our work_queue; check if there's stuff that can
|
|
7049 |
// be gotten from the overflow stack.
|
|
7050 |
if (CMSConcMarkingTask::get_work_from_overflow_stack(
|
|
7051 |
_overflow_stack, _work_queue)) {
|
|
7052 |
do_yield_check();
|
|
7053 |
continue;
|
|
7054 |
} else { // done
|
|
7055 |
break;
|
|
7056 |
}
|
|
7057 |
}
|
|
7058 |
// Skip verifying header mark word below because we are
|
|
7059 |
// running concurrent with mutators.
|
|
7060 |
assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
|
|
7061 |
// now scan this oop's oops
|
|
7062 |
new_oop->oop_iterate(&pushOrMarkClosure);
|
|
7063 |
do_yield_check();
|
|
7064 |
}
|
|
7065 |
assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
|
|
7066 |
}
|
|
7067 |
|
|
7068 |
// Yield in response to a request from VM Thread or
|
|
7069 |
// from mutators.
|
|
7070 |
void Par_MarkFromRootsClosure::do_yield_work() {
|
|
7071 |
assert(_task != NULL, "sanity");
|
|
7072 |
_task->yield();
|
|
7073 |
}
|
|
7074 |
|
|
7075 |
// A variant of the above used for verifying CMS marking work.
|
|
7076 |
MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
|
|
7077 |
MemRegion span,
|
|
7078 |
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
|
|
7079 |
CMSMarkStack* mark_stack):
|
|
7080 |
_collector(collector),
|
|
7081 |
_span(span),
|
|
7082 |
_verification_bm(verification_bm),
|
|
7083 |
_cms_bm(cms_bm),
|
|
7084 |
_mark_stack(mark_stack),
|
|
7085 |
_pam_verify_closure(collector, span, verification_bm, cms_bm,
|
|
7086 |
mark_stack)
|
|
7087 |
{
|
|
7088 |
assert(_mark_stack->isEmpty(), "stack should be empty");
|
|
7089 |
_finger = _verification_bm->startWord();
|
|
7090 |
assert(_collector->_restart_addr == NULL, "Sanity check");
|
|
7091 |
assert(_span.contains(_finger), "Out of bounds _finger?");
|
|
7092 |
}
|
|
7093 |
|
|
7094 |
void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
|
|
7095 |
assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
|
|
7096 |
assert(_span.contains(addr), "Out of bounds _finger?");
|
|
7097 |
_finger = addr;
|
|
7098 |
}
|
|
7099 |
|
|
7100 |
// Should revisit to see if this should be restructured for
|
|
7101 |
// greater efficiency.
|
|
7102 |
void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
|
|
7103 |
// convert offset into a HeapWord*
|
|
7104 |
HeapWord* addr = _verification_bm->startWord() + offset;
|
|
7105 |
assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
|
|
7106 |
"address out of range");
|
|
7107 |
assert(_verification_bm->isMarked(addr), "tautology");
|
|
7108 |
assert(_cms_bm->isMarked(addr), "tautology");
|
|
7109 |
|
|
7110 |
assert(_mark_stack->isEmpty(),
|
|
7111 |
"should drain stack to limit stack usage");
|
|
7112 |
// convert addr to an oop preparatory to scanning
|
|
7113 |
oop this_oop = oop(addr);
|
|
7114 |
assert(this_oop->is_oop(), "should be an oop");
|
|
7115 |
assert(_finger <= addr, "_finger runneth ahead");
|
|
7116 |
// advance the finger to right end of this object
|
|
7117 |
_finger = addr + this_oop->size();
|
|
7118 |
assert(_finger > addr, "we just incremented it above");
|
|
7119 |
// Note: the finger doesn't advance while we drain
|
|
7120 |
// the stack below.
|
|
7121 |
bool res = _mark_stack->push(this_oop);
|
|
7122 |
assert(res, "Empty non-zero size stack should have space for single push");
|
|
7123 |
while (!_mark_stack->isEmpty()) {
|
|
7124 |
oop new_oop = _mark_stack->pop();
|
|
7125 |
assert(new_oop->is_oop(), "Oops! expected to pop an oop");
|
|
7126 |
// now scan this oop's oops
|
|
7127 |
new_oop->oop_iterate(&_pam_verify_closure);
|
|
7128 |
}
|
|
7129 |
assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
|
|
7130 |
}
|
|
7131 |
|
|
7132 |
PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
|
|
7133 |
CMSCollector* collector, MemRegion span,
|
|
7134 |
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
|
|
7135 |
CMSMarkStack* mark_stack):
|
|
7136 |
OopClosure(collector->ref_processor()),
|
|
7137 |
_collector(collector),
|
|
7138 |
_span(span),
|
|
7139 |
_verification_bm(verification_bm),
|
|
7140 |
_cms_bm(cms_bm),
|
|
7141 |
_mark_stack(mark_stack)
|
|
7142 |
{ }
|
|
7143 |
|
|
7144 |
|
|
7145 |
// Upon stack overflow, we discard (part of) the stack,
|
|
7146 |
// remembering the least address amongst those discarded
|
|
7147 |
// in CMSCollector's _restart_address.
|
|
7148 |
void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
|
|
7149 |
// Remember the least grey address discarded
|
|
7150 |
HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
|
|
7151 |
_collector->lower_restart_addr(ra);
|
|
7152 |
_mark_stack->reset(); // discard stack contents
|
|
7153 |
_mark_stack->expand(); // expand the stack if possible
|
|
7154 |
}
|
|
7155 |
|
|
7156 |
void PushAndMarkVerifyClosure::do_oop(oop* p) {
|
|
7157 |
oop this_oop = *p;
|
|
7158 |
assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
|
|
7159 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
7160 |
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
|
|
7161 |
// Oop lies in _span and isn't yet grey or black
|
|
7162 |
_verification_bm->mark(addr); // now grey
|
|
7163 |
if (!_cms_bm->isMarked(addr)) {
|
|
7164 |
oop(addr)->print();
|
|
7165 |
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
|
|
7166 |
fatal("... aborting");
|
|
7167 |
}
|
|
7168 |
|
|
7169 |
if (!_mark_stack->push(this_oop)) { // stack overflow
|
|
7170 |
if (PrintCMSStatistics != 0) {
|
|
7171 |
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
|
7172 |
SIZE_FORMAT, _mark_stack->capacity());
|
|
7173 |
}
|
|
7174 |
assert(_mark_stack->isFull(), "Else push should have succeeded");
|
|
7175 |
handle_stack_overflow(addr);
|
|
7176 |
}
|
|
7177 |
// anything including and to the right of _finger
|
|
7178 |
// will be scanned as we iterate over the remainder of the
|
|
7179 |
// bit map
|
|
7180 |
}
|
|
7181 |
}
|
|
7182 |
|
|
7183 |
PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
|
|
7184 |
MemRegion span,
|
|
7185 |
CMSBitMap* bitMap, CMSMarkStack* markStack,
|
|
7186 |
CMSMarkStack* revisitStack,
|
|
7187 |
HeapWord* finger, MarkFromRootsClosure* parent) :
|
|
7188 |
OopClosure(collector->ref_processor()),
|
|
7189 |
_collector(collector),
|
|
7190 |
_span(span),
|
|
7191 |
_bitMap(bitMap),
|
|
7192 |
_markStack(markStack),
|
|
7193 |
_revisitStack(revisitStack),
|
|
7194 |
_finger(finger),
|
|
7195 |
_parent(parent),
|
|
7196 |
_should_remember_klasses(collector->cms_should_unload_classes())
|
|
7197 |
{ }
|
|
7198 |
|
|
7199 |
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
|
|
7200 |
MemRegion span,
|
|
7201 |
CMSBitMap* bit_map,
|
|
7202 |
OopTaskQueue* work_queue,
|
|
7203 |
CMSMarkStack* overflow_stack,
|
|
7204 |
CMSMarkStack* revisit_stack,
|
|
7205 |
HeapWord* finger,
|
|
7206 |
HeapWord** global_finger_addr,
|
|
7207 |
Par_MarkFromRootsClosure* parent) :
|
|
7208 |
OopClosure(collector->ref_processor()),
|
|
7209 |
_collector(collector),
|
|
7210 |
_whole_span(collector->_span),
|
|
7211 |
_span(span),
|
|
7212 |
_bit_map(bit_map),
|
|
7213 |
_work_queue(work_queue),
|
|
7214 |
_overflow_stack(overflow_stack),
|
|
7215 |
_revisit_stack(revisit_stack),
|
|
7216 |
_finger(finger),
|
|
7217 |
_global_finger_addr(global_finger_addr),
|
|
7218 |
_parent(parent),
|
|
7219 |
_should_remember_klasses(collector->cms_should_unload_classes())
|
|
7220 |
{ }
|
|
7221 |
|
|
7222 |
|
|
7223 |
void CMSCollector::lower_restart_addr(HeapWord* low) {
|
|
7224 |
assert(_span.contains(low), "Out of bounds addr");
|
|
7225 |
if (_restart_addr == NULL) {
|
|
7226 |
_restart_addr = low;
|
|
7227 |
} else {
|
|
7228 |
_restart_addr = MIN2(_restart_addr, low);
|
|
7229 |
}
|
|
7230 |
}
|
|
7231 |
|
|
7232 |
// Upon stack overflow, we discard (part of) the stack,
|
|
7233 |
// remembering the least address amongst those discarded
|
|
7234 |
// in CMSCollector's _restart_address.
|
|
7235 |
void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
|
7236 |
// Remember the least grey address discarded
|
|
7237 |
HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
|
|
7238 |
_collector->lower_restart_addr(ra);
|
|
7239 |
_markStack->reset(); // discard stack contents
|
|
7240 |
_markStack->expand(); // expand the stack if possible
|
|
7241 |
}
|
|
7242 |
|
|
7243 |
// Upon stack overflow, we discard (part of) the stack,
|
|
7244 |
// remembering the least address amongst those discarded
|
|
7245 |
// in CMSCollector's _restart_address.
|
|
7246 |
void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
|
7247 |
// We need to do this under a mutex to prevent other
|
|
7248 |
// workers from interfering with the expansion below.
|
|
7249 |
MutexLockerEx ml(_overflow_stack->par_lock(),
|
|
7250 |
Mutex::_no_safepoint_check_flag);
|
|
7251 |
// Remember the least grey address discarded
|
|
7252 |
HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
|
|
7253 |
_collector->lower_restart_addr(ra);
|
|
7254 |
_overflow_stack->reset(); // discard stack contents
|
|
7255 |
_overflow_stack->expand(); // expand the stack if possible
|
|
7256 |
}
|
|
7257 |
|
|
7258 |
|
|
7259 |
void PushOrMarkClosure::do_oop(oop* p) {
|
|
7260 |
oop thisOop = *p;
|
|
7261 |
// Ignore mark word because we are running concurrent with mutators.
|
|
7262 |
assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
|
|
7263 |
HeapWord* addr = (HeapWord*)thisOop;
|
|
7264 |
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
|
|
7265 |
// Oop lies in _span and isn't yet grey or black
|
|
7266 |
_bitMap->mark(addr); // now grey
|
|
7267 |
if (addr < _finger) {
|
|
7268 |
// the bit map iteration has already either passed, or
|
|
7269 |
// sampled, this bit in the bit map; we'll need to
|
|
7270 |
// use the marking stack to scan this oop's oops.
|
|
7271 |
bool simulate_overflow = false;
|
|
7272 |
NOT_PRODUCT(
|
|
7273 |
if (CMSMarkStackOverflowALot &&
|
|
7274 |
_collector->simulate_overflow()) {
|
|
7275 |
// simulate a stack overflow
|
|
7276 |
simulate_overflow = true;
|
|
7277 |
}
|
|
7278 |
)
|
|
7279 |
if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
|
|
7280 |
if (PrintCMSStatistics != 0) {
|
|
7281 |
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
|
7282 |
SIZE_FORMAT, _markStack->capacity());
|
|
7283 |
}
|
|
7284 |
assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
|
|
7285 |
handle_stack_overflow(addr);
|
|
7286 |
}
|
|
7287 |
}
|
|
7288 |
// anything including and to the right of _finger
|
|
7289 |
// will be scanned as we iterate over the remainder of the
|
|
7290 |
// bit map
|
|
7291 |
do_yield_check();
|
|
7292 |
}
|
|
7293 |
}
|
|
7294 |
|
|
7295 |
void Par_PushOrMarkClosure::do_oop(oop* p) {
|
|
7296 |
oop this_oop = *p;
|
|
7297 |
// Ignore mark word because we are running concurrent with mutators.
|
|
7298 |
assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
|
|
7299 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
7300 |
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
|
7301 |
// Oop lies in _span and isn't yet grey or black
|
|
7302 |
// We read the global_finger (volatile read) strictly after marking oop
|
|
7303 |
bool res = _bit_map->par_mark(addr); // now grey
|
|
7304 |
volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
|
|
7305 |
// Should we push this marked oop on our stack?
|
|
7306 |
// -- if someone else marked it, nothing to do
|
|
7307 |
// -- if target oop is above global finger nothing to do
|
|
7308 |
// -- if target oop is in chunk and above local finger
|
|
7309 |
// then nothing to do
|
|
7310 |
// -- else push on work queue
|
|
7311 |
if ( !res // someone else marked it, they will deal with it
|
|
7312 |
|| (addr >= *gfa) // will be scanned in a later task
|
|
7313 |
|| (_span.contains(addr) && addr >= _finger)) { // later in this chunk
|
|
7314 |
return;
|
|
7315 |
}
|
|
7316 |
// the bit map iteration has already either passed, or
|
|
7317 |
// sampled, this bit in the bit map; we'll need to
|
|
7318 |
// use the marking stack to scan this oop's oops.
|
|
7319 |
bool simulate_overflow = false;
|
|
7320 |
NOT_PRODUCT(
|
|
7321 |
if (CMSMarkStackOverflowALot &&
|
|
7322 |
_collector->simulate_overflow()) {
|
|
7323 |
// simulate a stack overflow
|
|
7324 |
simulate_overflow = true;
|
|
7325 |
}
|
|
7326 |
)
|
|
7327 |
if (simulate_overflow ||
|
|
7328 |
!(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
|
|
7329 |
// stack overflow
|
|
7330 |
if (PrintCMSStatistics != 0) {
|
|
7331 |
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
|
7332 |
SIZE_FORMAT, _overflow_stack->capacity());
|
|
7333 |
}
|
|
7334 |
// We cannot assert that the overflow stack is full because
|
|
7335 |
// it may have been emptied since.
|
|
7336 |
assert(simulate_overflow ||
|
|
7337 |
_work_queue->size() == _work_queue->max_elems(),
|
|
7338 |
"Else push should have succeeded");
|
|
7339 |
handle_stack_overflow(addr);
|
|
7340 |
}
|
|
7341 |
do_yield_check();
|
|
7342 |
}
|
|
7343 |
}
|
|
7344 |
|
|
7345 |
|
|
7346 |
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
|
7347 |
MemRegion span,
|
|
7348 |
ReferenceProcessor* rp,
|
|
7349 |
CMSBitMap* bit_map,
|
|
7350 |
CMSBitMap* mod_union_table,
|
|
7351 |
CMSMarkStack* mark_stack,
|
|
7352 |
CMSMarkStack* revisit_stack,
|
|
7353 |
bool concurrent_precleaning):
|
|
7354 |
OopClosure(rp),
|
|
7355 |
_collector(collector),
|
|
7356 |
_span(span),
|
|
7357 |
_bit_map(bit_map),
|
|
7358 |
_mod_union_table(mod_union_table),
|
|
7359 |
_mark_stack(mark_stack),
|
|
7360 |
_revisit_stack(revisit_stack),
|
|
7361 |
_concurrent_precleaning(concurrent_precleaning),
|
|
7362 |
_should_remember_klasses(collector->cms_should_unload_classes())
|
|
7363 |
{
|
|
7364 |
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
|
7365 |
}
|
|
7366 |
|
|
7367 |
// Grey object rescan during pre-cleaning and second checkpoint phases --
|
|
7368 |
// the non-parallel version (the parallel version appears further below.)
|
|
7369 |
void PushAndMarkClosure::do_oop(oop* p) {
|
|
7370 |
oop this_oop = *p;
|
|
7371 |
// Ignore mark word verification. If during concurrent precleaning
|
|
7372 |
// the object monitor may be locked. If during the checkpoint
|
|
7373 |
// phases, the object may already have been reached by a different
|
|
7374 |
// path and may be at the end of the global overflow list (so
|
|
7375 |
// the mark word may be NULL).
|
|
7376 |
assert(this_oop->is_oop_or_null(true/* ignore mark word */),
|
|
7377 |
"expected an oop or NULL");
|
|
7378 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
7379 |
// Check if oop points into the CMS generation
|
|
7380 |
// and is not marked
|
|
7381 |
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
|
7382 |
// a white object ...
|
|
7383 |
_bit_map->mark(addr); // ... now grey
|
|
7384 |
// push on the marking stack (grey set)
|
|
7385 |
bool simulate_overflow = false;
|
|
7386 |
NOT_PRODUCT(
|
|
7387 |
if (CMSMarkStackOverflowALot &&
|
|
7388 |
_collector->simulate_overflow()) {
|
|
7389 |
// simulate a stack overflow
|
|
7390 |
simulate_overflow = true;
|
|
7391 |
}
|
|
7392 |
)
|
|
7393 |
if (simulate_overflow || !_mark_stack->push(this_oop)) {
|
|
7394 |
if (_concurrent_precleaning) {
|
|
7395 |
// During precleaning we can just dirty the appropriate card
|
|
7396 |
// in the mod union table, thus ensuring that the object remains
|
|
7397 |
// in the grey set and continue. Note that no one can be intefering
|
|
7398 |
// with us in this action of dirtying the mod union table, so
|
|
7399 |
// no locking is required.
|
|
7400 |
_mod_union_table->mark(addr);
|
|
7401 |
_collector->_ser_pmc_preclean_ovflw++;
|
|
7402 |
} else {
|
|
7403 |
// During the remark phase, we need to remember this oop
|
|
7404 |
// in the overflow list.
|
|
7405 |
_collector->push_on_overflow_list(this_oop);
|
|
7406 |
_collector->_ser_pmc_remark_ovflw++;
|
|
7407 |
}
|
|
7408 |
}
|
|
7409 |
}
|
|
7410 |
}
|
|
7411 |
|
|
7412 |
Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
|
|
7413 |
MemRegion span,
|
|
7414 |
ReferenceProcessor* rp,
|
|
7415 |
CMSBitMap* bit_map,
|
|
7416 |
OopTaskQueue* work_queue,
|
|
7417 |
CMSMarkStack* revisit_stack):
|
|
7418 |
OopClosure(rp),
|
|
7419 |
_collector(collector),
|
|
7420 |
_span(span),
|
|
7421 |
_bit_map(bit_map),
|
|
7422 |
_work_queue(work_queue),
|
|
7423 |
_revisit_stack(revisit_stack),
|
|
7424 |
_should_remember_klasses(collector->cms_should_unload_classes())
|
|
7425 |
{
|
|
7426 |
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
|
7427 |
}
|
|
7428 |
|
|
7429 |
// Grey object rescan during second checkpoint phase --
|
|
7430 |
// the parallel version.
|
|
7431 |
void Par_PushAndMarkClosure::do_oop(oop* p) {
|
|
7432 |
oop this_oop = *p;
|
|
7433 |
// In the assert below, we ignore the mark word because
|
|
7434 |
// this oop may point to an already visited object that is
|
|
7435 |
// on the overflow stack (in which case the mark word has
|
|
7436 |
// been hijacked for chaining into the overflow stack --
|
|
7437 |
// if this is the last object in the overflow stack then
|
|
7438 |
// its mark word will be NULL). Because this object may
|
|
7439 |
// have been subsequently popped off the global overflow
|
|
7440 |
// stack, and the mark word possibly restored to the prototypical
|
|
7441 |
// value, by the time we get to examined this failing assert in
|
|
7442 |
// the debugger, is_oop_or_null(false) may subsequently start
|
|
7443 |
// to hold.
|
|
7444 |
assert(this_oop->is_oop_or_null(true),
|
|
7445 |
"expected an oop or NULL");
|
|
7446 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
7447 |
// Check if oop points into the CMS generation
|
|
7448 |
// and is not marked
|
|
7449 |
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
|
7450 |
// a white object ...
|
|
7451 |
// If we manage to "claim" the object, by being the
|
|
7452 |
// first thread to mark it, then we push it on our
|
|
7453 |
// marking stack
|
|
7454 |
if (_bit_map->par_mark(addr)) { // ... now grey
|
|
7455 |
// push on work queue (grey set)
|
|
7456 |
bool simulate_overflow = false;
|
|
7457 |
NOT_PRODUCT(
|
|
7458 |
if (CMSMarkStackOverflowALot &&
|
|
7459 |
_collector->par_simulate_overflow()) {
|
|
7460 |
// simulate a stack overflow
|
|
7461 |
simulate_overflow = true;
|
|
7462 |
}
|
|
7463 |
)
|
|
7464 |
if (simulate_overflow || !_work_queue->push(this_oop)) {
|
|
7465 |
_collector->par_push_on_overflow_list(this_oop);
|
|
7466 |
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
|
|
7467 |
}
|
|
7468 |
} // Else, some other thread got there first
|
|
7469 |
}
|
|
7470 |
}
|
|
7471 |
|
|
7472 |
void PushAndMarkClosure::remember_klass(Klass* k) {
|
|
7473 |
if (!_revisit_stack->push(oop(k))) {
|
|
7474 |
fatal("Revisit stack overflowed in PushAndMarkClosure");
|
|
7475 |
}
|
|
7476 |
}
|
|
7477 |
|
|
7478 |
void Par_PushAndMarkClosure::remember_klass(Klass* k) {
|
|
7479 |
if (!_revisit_stack->par_push(oop(k))) {
|
|
7480 |
fatal("Revist stack overflowed in Par_PushAndMarkClosure");
|
|
7481 |
}
|
|
7482 |
}
|
|
7483 |
|
|
7484 |
void CMSPrecleanRefsYieldClosure::do_yield_work() {
|
|
7485 |
Mutex* bml = _collector->bitMapLock();
|
|
7486 |
assert_lock_strong(bml);
|
|
7487 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
7488 |
"CMS thread should hold CMS token");
|
|
7489 |
|
|
7490 |
bml->unlock();
|
|
7491 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
7492 |
|
|
7493 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
7494 |
|
|
7495 |
_collector->stopTimer();
|
|
7496 |
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
7497 |
if (PrintCMSStatistics != 0) {
|
|
7498 |
_collector->incrementYields();
|
|
7499 |
}
|
|
7500 |
_collector->icms_wait();
|
|
7501 |
|
|
7502 |
// See the comment in coordinator_yield()
|
|
7503 |
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
|
7504 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
7505 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
7506 |
os::sleep(Thread::current(), 1, false);
|
|
7507 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
7508 |
}
|
|
7509 |
|
|
7510 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
7511 |
bml->lock();
|
|
7512 |
|
|
7513 |
_collector->startTimer();
|
|
7514 |
}
|
|
7515 |
|
|
7516 |
bool CMSPrecleanRefsYieldClosure::should_return() {
|
|
7517 |
if (ConcurrentMarkSweepThread::should_yield()) {
|
|
7518 |
do_yield_work();
|
|
7519 |
}
|
|
7520 |
return _collector->foregroundGCIsActive();
|
|
7521 |
}
|
|
7522 |
|
|
7523 |
void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
|
|
7524 |
assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
|
|
7525 |
"mr should be aligned to start at a card boundary");
|
|
7526 |
// We'd like to assert:
|
|
7527 |
// assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
|
|
7528 |
// "mr should be a range of cards");
|
|
7529 |
// However, that would be too strong in one case -- the last
|
|
7530 |
// partition ends at _unallocated_block which, in general, can be
|
|
7531 |
// an arbitrary boundary, not necessarily card aligned.
|
|
7532 |
if (PrintCMSStatistics != 0) {
|
|
7533 |
_num_dirty_cards +=
|
|
7534 |
mr.word_size()/CardTableModRefBS::card_size_in_words;
|
|
7535 |
}
|
|
7536 |
_space->object_iterate_mem(mr, &_scan_cl);
|
|
7537 |
}
|
|
7538 |
|
|
7539 |
SweepClosure::SweepClosure(CMSCollector* collector,
|
|
7540 |
ConcurrentMarkSweepGeneration* g,
|
|
7541 |
CMSBitMap* bitMap, bool should_yield) :
|
|
7542 |
_collector(collector),
|
|
7543 |
_g(g),
|
|
7544 |
_sp(g->cmsSpace()),
|
|
7545 |
_limit(_sp->sweep_limit()),
|
|
7546 |
_freelistLock(_sp->freelistLock()),
|
|
7547 |
_bitMap(bitMap),
|
|
7548 |
_yield(should_yield),
|
|
7549 |
_inFreeRange(false), // No free range at beginning of sweep
|
|
7550 |
_freeRangeInFreeLists(false), // No free range at beginning of sweep
|
|
7551 |
_lastFreeRangeCoalesced(false),
|
|
7552 |
_freeFinger(g->used_region().start())
|
|
7553 |
{
|
|
7554 |
NOT_PRODUCT(
|
|
7555 |
_numObjectsFreed = 0;
|
|
7556 |
_numWordsFreed = 0;
|
|
7557 |
_numObjectsLive = 0;
|
|
7558 |
_numWordsLive = 0;
|
|
7559 |
_numObjectsAlreadyFree = 0;
|
|
7560 |
_numWordsAlreadyFree = 0;
|
|
7561 |
_last_fc = NULL;
|
|
7562 |
|
|
7563 |
_sp->initializeIndexedFreeListArrayReturnedBytes();
|
|
7564 |
_sp->dictionary()->initializeDictReturnedBytes();
|
|
7565 |
)
|
|
7566 |
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
|
|
7567 |
"sweep _limit out of bounds");
|
|
7568 |
if (CMSTraceSweeper) {
|
|
7569 |
gclog_or_tty->print("\n====================\nStarting new sweep\n");
|
|
7570 |
}
|
|
7571 |
}
|
|
7572 |
|
|
7573 |
// We need this destructor to reclaim any space at the end
|
|
7574 |
// of the space, which do_blk below may not have added back to
|
|
7575 |
// the free lists. [basically dealing with the "fringe effect"]
|
|
7576 |
SweepClosure::~SweepClosure() {
|
|
7577 |
assert_lock_strong(_freelistLock);
|
|
7578 |
// this should be treated as the end of a free run if any
|
|
7579 |
// The current free range should be returned to the free lists
|
|
7580 |
// as one coalesced chunk.
|
|
7581 |
if (inFreeRange()) {
|
|
7582 |
flushCurFreeChunk(freeFinger(),
|
|
7583 |
pointer_delta(_limit, freeFinger()));
|
|
7584 |
assert(freeFinger() < _limit, "the finger pointeth off base");
|
|
7585 |
if (CMSTraceSweeper) {
|
|
7586 |
gclog_or_tty->print("destructor:");
|
|
7587 |
gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
|
|
7588 |
"[coalesced:"SIZE_FORMAT"]\n",
|
|
7589 |
freeFinger(), pointer_delta(_limit, freeFinger()),
|
|
7590 |
lastFreeRangeCoalesced());
|
|
7591 |
}
|
|
7592 |
}
|
|
7593 |
NOT_PRODUCT(
|
|
7594 |
if (Verbose && PrintGC) {
|
|
7595 |
gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
|
|
7596 |
SIZE_FORMAT " bytes",
|
|
7597 |
_numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
|
|
7598 |
gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
|
|
7599 |
SIZE_FORMAT" bytes "
|
|
7600 |
"Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
|
|
7601 |
_numObjectsLive, _numWordsLive*sizeof(HeapWord),
|
|
7602 |
_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
|
|
7603 |
size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
|
|
7604 |
sizeof(HeapWord);
|
|
7605 |
gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
|
|
7606 |
|
|
7607 |
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
|
|
7608 |
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
|
|
7609 |
size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
|
|
7610 |
size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
|
|
7611 |
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
|
|
7612 |
gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
|
|
7613 |
indexListReturnedBytes);
|
|
7614 |
gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
|
|
7615 |
dictReturnedBytes);
|
|
7616 |
}
|
|
7617 |
}
|
|
7618 |
)
|
|
7619 |
// Now, in debug mode, just null out the sweep_limit
|
|
7620 |
NOT_PRODUCT(_sp->clear_sweep_limit();)
|
|
7621 |
if (CMSTraceSweeper) {
|
|
7622 |
gclog_or_tty->print("end of sweep\n================\n");
|
|
7623 |
}
|
|
7624 |
}
|
|
7625 |
|
|
7626 |
void SweepClosure::initialize_free_range(HeapWord* freeFinger,
|
|
7627 |
bool freeRangeInFreeLists) {
|
|
7628 |
if (CMSTraceSweeper) {
|
|
7629 |
gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
|
|
7630 |
freeFinger, _sp->block_size(freeFinger),
|
|
7631 |
freeRangeInFreeLists);
|
|
7632 |
}
|
|
7633 |
assert(!inFreeRange(), "Trampling existing free range");
|
|
7634 |
set_inFreeRange(true);
|
|
7635 |
set_lastFreeRangeCoalesced(false);
|
|
7636 |
|
|
7637 |
set_freeFinger(freeFinger);
|
|
7638 |
set_freeRangeInFreeLists(freeRangeInFreeLists);
|
|
7639 |
if (CMSTestInFreeList) {
|
|
7640 |
if (freeRangeInFreeLists) {
|
|
7641 |
FreeChunk* fc = (FreeChunk*) freeFinger;
|
|
7642 |
assert(fc->isFree(), "A chunk on the free list should be free.");
|
|
7643 |
assert(fc->size() > 0, "Free range should have a size");
|
|
7644 |
assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
|
|
7645 |
}
|
|
7646 |
}
|
|
7647 |
}
|
|
7648 |
|
|
7649 |
// Note that the sweeper runs concurrently with mutators. Thus,
|
|
7650 |
// it is possible for direct allocation in this generation to happen
|
|
7651 |
// in the middle of the sweep. Note that the sweeper also coalesces
|
|
7652 |
// contiguous free blocks. Thus, unless the sweeper and the allocator
|
|
7653 |
// synchronize appropriately freshly allocated blocks may get swept up.
|
|
7654 |
// This is accomplished by the sweeper locking the free lists while
|
|
7655 |
// it is sweeping. Thus blocks that are determined to be free are
|
|
7656 |
// indeed free. There is however one additional complication:
|
|
7657 |
// blocks that have been allocated since the final checkpoint and
|
|
7658 |
// mark, will not have been marked and so would be treated as
|
|
7659 |
// unreachable and swept up. To prevent this, the allocator marks
|
|
7660 |
// the bit map when allocating during the sweep phase. This leads,
|
|
7661 |
// however, to a further complication -- objects may have been allocated
|
|
7662 |
// but not yet initialized -- in the sense that the header isn't yet
|
|
7663 |
// installed. The sweeper can not then determine the size of the block
|
|
7664 |
// in order to skip over it. To deal with this case, we use a technique
|
|
7665 |
// (due to Printezis) to encode such uninitialized block sizes in the
|
|
7666 |
// bit map. Since the bit map uses a bit per every HeapWord, but the
|
|
7667 |
// CMS generation has a minimum object size of 3 HeapWords, it follows
|
|
7668 |
// that "normal marks" won't be adjacent in the bit map (there will
|
|
7669 |
// always be at least two 0 bits between successive 1 bits). We make use
|
|
7670 |
// of these "unused" bits to represent uninitialized blocks -- the bit
|
|
7671 |
// corresponding to the start of the uninitialized object and the next
|
|
7672 |
// bit are both set. Finally, a 1 bit marks the end of the object that
|
|
7673 |
// started with the two consecutive 1 bits to indicate its potentially
|
|
7674 |
// uninitialized state.
|
|
7675 |
|
|
7676 |
size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
|
7677 |
FreeChunk* fc = (FreeChunk*)addr;
|
|
7678 |
size_t res;
|
|
7679 |
|
|
7680 |
// check if we are done sweepinrg
|
|
7681 |
if (addr == _limit) { // we have swept up to the limit, do nothing more
|
|
7682 |
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
|
|
7683 |
"sweep _limit out of bounds");
|
|
7684 |
// help the closure application finish
|
|
7685 |
return pointer_delta(_sp->end(), _limit);
|
|
7686 |
}
|
|
7687 |
assert(addr <= _limit, "sweep invariant");
|
|
7688 |
|
|
7689 |
// check if we should yield
|
|
7690 |
do_yield_check(addr);
|
|
7691 |
if (fc->isFree()) {
|
|
7692 |
// Chunk that is already free
|
|
7693 |
res = fc->size();
|
|
7694 |
doAlreadyFreeChunk(fc);
|
|
7695 |
debug_only(_sp->verifyFreeLists());
|
|
7696 |
assert(res == fc->size(), "Don't expect the size to change");
|
|
7697 |
NOT_PRODUCT(
|
|
7698 |
_numObjectsAlreadyFree++;
|
|
7699 |
_numWordsAlreadyFree += res;
|
|
7700 |
)
|
|
7701 |
NOT_PRODUCT(_last_fc = fc;)
|
|
7702 |
} else if (!_bitMap->isMarked(addr)) {
|
|
7703 |
// Chunk is fresh garbage
|
|
7704 |
res = doGarbageChunk(fc);
|
|
7705 |
debug_only(_sp->verifyFreeLists());
|
|
7706 |
NOT_PRODUCT(
|
|
7707 |
_numObjectsFreed++;
|
|
7708 |
_numWordsFreed += res;
|
|
7709 |
)
|
|
7710 |
} else {
|
|
7711 |
// Chunk that is alive.
|
|
7712 |
res = doLiveChunk(fc);
|
|
7713 |
debug_only(_sp->verifyFreeLists());
|
|
7714 |
NOT_PRODUCT(
|
|
7715 |
_numObjectsLive++;
|
|
7716 |
_numWordsLive += res;
|
|
7717 |
)
|
|
7718 |
}
|
|
7719 |
return res;
|
|
7720 |
}
|
|
7721 |
|
|
7722 |
// For the smart allocation, record following
|
|
7723 |
// split deaths - a free chunk is removed from its free list because
|
|
7724 |
// it is being split into two or more chunks.
|
|
7725 |
// split birth - a free chunk is being added to its free list because
|
|
7726 |
// a larger free chunk has been split and resulted in this free chunk.
|
|
7727 |
// coal death - a free chunk is being removed from its free list because
|
|
7728 |
// it is being coalesced into a large free chunk.
|
|
7729 |
// coal birth - a free chunk is being added to its free list because
|
|
7730 |
// it was created when two or more free chunks where coalesced into
|
|
7731 |
// this free chunk.
|
|
7732 |
//
|
|
7733 |
// These statistics are used to determine the desired number of free
|
|
7734 |
// chunks of a given size. The desired number is chosen to be relative
|
|
7735 |
// to the end of a CMS sweep. The desired number at the end of a sweep
|
|
7736 |
// is the
|
|
7737 |
// count-at-end-of-previous-sweep (an amount that was enough)
|
|
7738 |
// - count-at-beginning-of-current-sweep (the excess)
|
|
7739 |
// + split-births (gains in this size during interval)
|
|
7740 |
// - split-deaths (demands on this size during interval)
|
|
7741 |
// where the interval is from the end of one sweep to the end of the
|
|
7742 |
// next.
|
|
7743 |
//
|
|
7744 |
// When sweeping the sweeper maintains an accumulated chunk which is
|
|
7745 |
// the chunk that is made up of chunks that have been coalesced. That
|
|
7746 |
// will be termed the left-hand chunk. A new chunk of garbage that
|
|
7747 |
// is being considered for coalescing will be referred to as the
|
|
7748 |
// right-hand chunk.
|
|
7749 |
//
|
|
7750 |
// When making a decision on whether to coalesce a right-hand chunk with
|
|
7751 |
// the current left-hand chunk, the current count vs. the desired count
|
|
7752 |
// of the left-hand chunk is considered. Also if the right-hand chunk
|
|
7753 |
// is near the large chunk at the end of the heap (see
|
|
7754 |
// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
|
|
7755 |
// left-hand chunk is coalesced.
|
|
7756 |
//
|
|
7757 |
// When making a decision about whether to split a chunk, the desired count
|
|
7758 |
// vs. the current count of the candidate to be split is also considered.
|
|
7759 |
// If the candidate is underpopulated (currently fewer chunks than desired)
|
|
7760 |
// a chunk of an overpopulated (currently more chunks than desired) size may
|
|
7761 |
// be chosen. The "hint" associated with a free list, if non-null, points
|
|
7762 |
// to a free list which may be overpopulated.
|
|
7763 |
//
|
|
7764 |
|
|
7765 |
void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
|
|
7766 |
size_t size = fc->size();
|
|
7767 |
// Chunks that cannot be coalesced are not in the
|
|
7768 |
// free lists.
|
|
7769 |
if (CMSTestInFreeList && !fc->cantCoalesce()) {
|
|
7770 |
assert(_sp->verifyChunkInFreeLists(fc),
|
|
7771 |
"free chunk should be in free lists");
|
|
7772 |
}
|
|
7773 |
// a chunk that is already free, should not have been
|
|
7774 |
// marked in the bit map
|
|
7775 |
HeapWord* addr = (HeapWord*) fc;
|
|
7776 |
assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
|
|
7777 |
// Verify that the bit map has no bits marked between
|
|
7778 |
// addr and purported end of this block.
|
|
7779 |
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
|
7780 |
|
|
7781 |
// Some chunks cannot be coalesced in under any circumstances.
|
|
7782 |
// See the definition of cantCoalesce().
|
|
7783 |
if (!fc->cantCoalesce()) {
|
|
7784 |
// This chunk can potentially be coalesced.
|
|
7785 |
if (_sp->adaptive_freelists()) {
|
|
7786 |
// All the work is done in
|
|
7787 |
doPostIsFreeOrGarbageChunk(fc, size);
|
|
7788 |
} else { // Not adaptive free lists
|
|
7789 |
// this is a free chunk that can potentially be coalesced by the sweeper;
|
|
7790 |
if (!inFreeRange()) {
|
|
7791 |
// if the next chunk is a free block that can't be coalesced
|
|
7792 |
// it doesn't make sense to remove this chunk from the free lists
|
|
7793 |
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
|
|
7794 |
assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
|
|
7795 |
if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
|
|
7796 |
nextChunk->isFree() && // which is free...
|
|
7797 |
nextChunk->cantCoalesce()) { // ... but cant be coalesced
|
|
7798 |
// nothing to do
|
|
7799 |
} else {
|
|
7800 |
// Potentially the start of a new free range:
|
|
7801 |
// Don't eagerly remove it from the free lists.
|
|
7802 |
// No need to remove it if it will just be put
|
|
7803 |
// back again. (Also from a pragmatic point of view
|
|
7804 |
// if it is a free block in a region that is beyond
|
|
7805 |
// any allocated blocks, an assertion will fail)
|
|
7806 |
// Remember the start of a free run.
|
|
7807 |
initialize_free_range(addr, true);
|
|
7808 |
// end - can coalesce with next chunk
|
|
7809 |
}
|
|
7810 |
} else {
|
|
7811 |
// the midst of a free range, we are coalescing
|
|
7812 |
debug_only(record_free_block_coalesced(fc);)
|
|
7813 |
if (CMSTraceSweeper) {
|
|
7814 |
gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
|
|
7815 |
}
|
|
7816 |
// remove it from the free lists
|
|
7817 |
_sp->removeFreeChunkFromFreeLists(fc);
|
|
7818 |
set_lastFreeRangeCoalesced(true);
|
|
7819 |
// If the chunk is being coalesced and the current free range is
|
|
7820 |
// in the free lists, remove the current free range so that it
|
|
7821 |
// will be returned to the free lists in its entirety - all
|
|
7822 |
// the coalesced pieces included.
|
|
7823 |
if (freeRangeInFreeLists()) {
|
|
7824 |
FreeChunk* ffc = (FreeChunk*) freeFinger();
|
|
7825 |
assert(ffc->size() == pointer_delta(addr, freeFinger()),
|
|
7826 |
"Size of free range is inconsistent with chunk size.");
|
|
7827 |
if (CMSTestInFreeList) {
|
|
7828 |
assert(_sp->verifyChunkInFreeLists(ffc),
|
|
7829 |
"free range is not in free lists");
|
|
7830 |
}
|
|
7831 |
_sp->removeFreeChunkFromFreeLists(ffc);
|
|
7832 |
set_freeRangeInFreeLists(false);
|
|
7833 |
}
|
|
7834 |
}
|
|
7835 |
}
|
|
7836 |
} else {
|
|
7837 |
// Code path common to both original and adaptive free lists.
|
|
7838 |
|
|
7839 |
// cant coalesce with previous block; this should be treated
|
|
7840 |
// as the end of a free run if any
|
|
7841 |
if (inFreeRange()) {
|
|
7842 |
// we kicked some butt; time to pick up the garbage
|
|
7843 |
assert(freeFinger() < addr, "the finger pointeth off base");
|
|
7844 |
flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
|
|
7845 |
}
|
|
7846 |
// else, nothing to do, just continue
|
|
7847 |
}
|
|
7848 |
}
|
|
7849 |
|
|
7850 |
size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
|
|
7851 |
// This is a chunk of garbage. It is not in any free list.
|
|
7852 |
// Add it to a free list or let it possibly be coalesced into
|
|
7853 |
// a larger chunk.
|
|
7854 |
HeapWord* addr = (HeapWord*) fc;
|
|
7855 |
size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
|
|
7856 |
|
|
7857 |
if (_sp->adaptive_freelists()) {
|
|
7858 |
// Verify that the bit map has no bits marked between
|
|
7859 |
// addr and purported end of just dead object.
|
|
7860 |
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
|
7861 |
|
|
7862 |
doPostIsFreeOrGarbageChunk(fc, size);
|
|
7863 |
} else {
|
|
7864 |
if (!inFreeRange()) {
|
|
7865 |
// start of a new free range
|
|
7866 |
assert(size > 0, "A free range should have a size");
|
|
7867 |
initialize_free_range(addr, false);
|
|
7868 |
|
|
7869 |
} else {
|
|
7870 |
// this will be swept up when we hit the end of the
|
|
7871 |
// free range
|
|
7872 |
if (CMSTraceSweeper) {
|
|
7873 |
gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
|
|
7874 |
}
|
|
7875 |
// If the chunk is being coalesced and the current free range is
|
|
7876 |
// in the free lists, remove the current free range so that it
|
|
7877 |
// will be returned to the free lists in its entirety - all
|
|
7878 |
// the coalesced pieces included.
|
|
7879 |
if (freeRangeInFreeLists()) {
|
|
7880 |
FreeChunk* ffc = (FreeChunk*)freeFinger();
|
|
7881 |
assert(ffc->size() == pointer_delta(addr, freeFinger()),
|
|
7882 |
"Size of free range is inconsistent with chunk size.");
|
|
7883 |
if (CMSTestInFreeList) {
|
|
7884 |
assert(_sp->verifyChunkInFreeLists(ffc),
|
|
7885 |
"free range is not in free lists");
|
|
7886 |
}
|
|
7887 |
_sp->removeFreeChunkFromFreeLists(ffc);
|
|
7888 |
set_freeRangeInFreeLists(false);
|
|
7889 |
}
|
|
7890 |
set_lastFreeRangeCoalesced(true);
|
|
7891 |
}
|
|
7892 |
// this will be swept up when we hit the end of the free range
|
|
7893 |
|
|
7894 |
// Verify that the bit map has no bits marked between
|
|
7895 |
// addr and purported end of just dead object.
|
|
7896 |
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
|
7897 |
}
|
|
7898 |
return size;
|
|
7899 |
}
|
|
7900 |
|
|
7901 |
size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
|
7902 |
HeapWord* addr = (HeapWord*) fc;
|
|
7903 |
// The sweeper has just found a live object. Return any accumulated
|
|
7904 |
// left hand chunk to the free lists.
|
|
7905 |
if (inFreeRange()) {
|
|
7906 |
if (_sp->adaptive_freelists()) {
|
|
7907 |
flushCurFreeChunk(freeFinger(),
|
|
7908 |
pointer_delta(addr, freeFinger()));
|
|
7909 |
} else { // not adaptive freelists
|
|
7910 |
set_inFreeRange(false);
|
|
7911 |
// Add the free range back to the free list if it is not already
|
|
7912 |
// there.
|
|
7913 |
if (!freeRangeInFreeLists()) {
|
|
7914 |
assert(freeFinger() < addr, "the finger pointeth off base");
|
|
7915 |
if (CMSTraceSweeper) {
|
|
7916 |
gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
|
|
7917 |
"[coalesced:%d]\n",
|
|
7918 |
freeFinger(), pointer_delta(addr, freeFinger()),
|
|
7919 |
lastFreeRangeCoalesced());
|
|
7920 |
}
|
|
7921 |
_sp->addChunkAndRepairOffsetTable(freeFinger(),
|
|
7922 |
pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
|
|
7923 |
}
|
|
7924 |
}
|
|
7925 |
}
|
|
7926 |
|
|
7927 |
// Common code path for original and adaptive free lists.
|
|
7928 |
|
|
7929 |
// this object is live: we'd normally expect this to be
|
|
7930 |
// an oop, and like to assert the following:
|
|
7931 |
// assert(oop(addr)->is_oop(), "live block should be an oop");
|
|
7932 |
// However, as we commented above, this may be an object whose
|
|
7933 |
// header hasn't yet been initialized.
|
|
7934 |
size_t size;
|
|
7935 |
assert(_bitMap->isMarked(addr), "Tautology for this control point");
|
|
7936 |
if (_bitMap->isMarked(addr + 1)) {
|
|
7937 |
// Determine the size from the bit map, rather than trying to
|
|
7938 |
// compute it from the object header.
|
|
7939 |
HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
|
|
7940 |
size = pointer_delta(nextOneAddr + 1, addr);
|
|
7941 |
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
|
|
7942 |
"alignment problem");
|
|
7943 |
|
|
7944 |
#ifdef DEBUG
|
|
7945 |
if (oop(addr)->klass() != NULL &&
|
|
7946 |
( !_collector->cms_should_unload_classes()
|
|
7947 |
|| oop(addr)->is_parsable())) {
|
|
7948 |
// Ignore mark word because we are running concurrent with mutators
|
|
7949 |
assert(oop(addr)->is_oop(true), "live block should be an oop");
|
|
7950 |
assert(size ==
|
|
7951 |
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
|
|
7952 |
"P-mark and computed size do not agree");
|
|
7953 |
}
|
|
7954 |
#endif
|
|
7955 |
|
|
7956 |
} else {
|
|
7957 |
// This should be an initialized object that's alive.
|
|
7958 |
assert(oop(addr)->klass() != NULL &&
|
|
7959 |
(!_collector->cms_should_unload_classes()
|
|
7960 |
|| oop(addr)->is_parsable()),
|
|
7961 |
"Should be an initialized object");
|
|
7962 |
// Ignore mark word because we are running concurrent with mutators
|
|
7963 |
assert(oop(addr)->is_oop(true), "live block should be an oop");
|
|
7964 |
// Verify that the bit map has no bits marked between
|
|
7965 |
// addr and purported end of this block.
|
|
7966 |
size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
|
|
7967 |
assert(size >= 3, "Necessary for Printezis marks to work");
|
|
7968 |
assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
|
|
7969 |
DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
|
|
7970 |
}
|
|
7971 |
return size;
|
|
7972 |
}
|
|
7973 |
|
|
7974 |
void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
|
|
7975 |
size_t chunkSize) {
|
|
7976 |
// doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
|
|
7977 |
// scheme.
|
|
7978 |
bool fcInFreeLists = fc->isFree();
|
|
7979 |
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
|
|
7980 |
assert((HeapWord*)fc <= _limit, "sweep invariant");
|
|
7981 |
if (CMSTestInFreeList && fcInFreeLists) {
|
|
7982 |
assert(_sp->verifyChunkInFreeLists(fc),
|
|
7983 |
"free chunk is not in free lists");
|
|
7984 |
}
|
|
7985 |
|
|
7986 |
|
|
7987 |
if (CMSTraceSweeper) {
|
|
7988 |
gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
|
|
7989 |
}
|
|
7990 |
|
|
7991 |
HeapWord* addr = (HeapWord*) fc;
|
|
7992 |
|
|
7993 |
bool coalesce;
|
|
7994 |
size_t left = pointer_delta(addr, freeFinger());
|
|
7995 |
size_t right = chunkSize;
|
|
7996 |
switch (FLSCoalescePolicy) {
|
|
7997 |
// numeric value forms a coalition aggressiveness metric
|
|
7998 |
case 0: { // never coalesce
|
|
7999 |
coalesce = false;
|
|
8000 |
break;
|
|
8001 |
}
|
|
8002 |
case 1: { // coalesce if left & right chunks on overpopulated lists
|
|
8003 |
coalesce = _sp->coalOverPopulated(left) &&
|
|
8004 |
_sp->coalOverPopulated(right);
|
|
8005 |
break;
|
|
8006 |
}
|
|
8007 |
case 2: { // coalesce if left chunk on overpopulated list (default)
|
|
8008 |
coalesce = _sp->coalOverPopulated(left);
|
|
8009 |
break;
|
|
8010 |
}
|
|
8011 |
case 3: { // coalesce if left OR right chunk on overpopulated list
|
|
8012 |
coalesce = _sp->coalOverPopulated(left) ||
|
|
8013 |
_sp->coalOverPopulated(right);
|
|
8014 |
break;
|
|
8015 |
}
|
|
8016 |
case 4: { // always coalesce
|
|
8017 |
coalesce = true;
|
|
8018 |
break;
|
|
8019 |
}
|
|
8020 |
default:
|
|
8021 |
ShouldNotReachHere();
|
|
8022 |
}
|
|
8023 |
|
|
8024 |
// Should the current free range be coalesced?
|
|
8025 |
// If the chunk is in a free range and either we decided to coalesce above
|
|
8026 |
// or the chunk is near the large block at the end of the heap
|
|
8027 |
// (isNearLargestChunk() returns true), then coalesce this chunk.
|
|
8028 |
bool doCoalesce = inFreeRange() &&
|
|
8029 |
(coalesce || _g->isNearLargestChunk((HeapWord*)fc));
|
|
8030 |
if (doCoalesce) {
|
|
8031 |
// Coalesce the current free range on the left with the new
|
|
8032 |
// chunk on the right. If either is on a free list,
|
|
8033 |
// it must be removed from the list and stashed in the closure.
|
|
8034 |
if (freeRangeInFreeLists()) {
|
|
8035 |
FreeChunk* ffc = (FreeChunk*)freeFinger();
|
|
8036 |
assert(ffc->size() == pointer_delta(addr, freeFinger()),
|
|
8037 |
"Size of free range is inconsistent with chunk size.");
|
|
8038 |
if (CMSTestInFreeList) {
|
|
8039 |
assert(_sp->verifyChunkInFreeLists(ffc),
|
|
8040 |
"Chunk is not in free lists");
|
|
8041 |
}
|
|
8042 |
_sp->coalDeath(ffc->size());
|
|
8043 |
_sp->removeFreeChunkFromFreeLists(ffc);
|
|
8044 |
set_freeRangeInFreeLists(false);
|
|
8045 |
}
|
|
8046 |
if (fcInFreeLists) {
|
|
8047 |
_sp->coalDeath(chunkSize);
|
|
8048 |
assert(fc->size() == chunkSize,
|
|
8049 |
"The chunk has the wrong size or is not in the free lists");
|
|
8050 |
_sp->removeFreeChunkFromFreeLists(fc);
|
|
8051 |
}
|
|
8052 |
set_lastFreeRangeCoalesced(true);
|
|
8053 |
} else { // not in a free range and/or should not coalesce
|
|
8054 |
// Return the current free range and start a new one.
|
|
8055 |
if (inFreeRange()) {
|
|
8056 |
// In a free range but cannot coalesce with the right hand chunk.
|
|
8057 |
// Put the current free range into the free lists.
|
|
8058 |
flushCurFreeChunk(freeFinger(),
|
|
8059 |
pointer_delta(addr, freeFinger()));
|
|
8060 |
}
|
|
8061 |
// Set up for new free range. Pass along whether the right hand
|
|
8062 |
// chunk is in the free lists.
|
|
8063 |
initialize_free_range((HeapWord*)fc, fcInFreeLists);
|
|
8064 |
}
|
|
8065 |
}
|
|
8066 |
void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
|
|
8067 |
assert(inFreeRange(), "Should only be called if currently in a free range.");
|
|
8068 |
assert(size > 0,
|
|
8069 |
"A zero sized chunk cannot be added to the free lists.");
|
|
8070 |
if (!freeRangeInFreeLists()) {
|
|
8071 |
if(CMSTestInFreeList) {
|
|
8072 |
FreeChunk* fc = (FreeChunk*) chunk;
|
|
8073 |
fc->setSize(size);
|
|
8074 |
assert(!_sp->verifyChunkInFreeLists(fc),
|
|
8075 |
"chunk should not be in free lists yet");
|
|
8076 |
}
|
|
8077 |
if (CMSTraceSweeper) {
|
|
8078 |
gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
|
|
8079 |
chunk, size);
|
|
8080 |
}
|
|
8081 |
// A new free range is going to be starting. The current
|
|
8082 |
// free range has not been added to the free lists yet or
|
|
8083 |
// was removed so add it back.
|
|
8084 |
// If the current free range was coalesced, then the death
|
|
8085 |
// of the free range was recorded. Record a birth now.
|
|
8086 |
if (lastFreeRangeCoalesced()) {
|
|
8087 |
_sp->coalBirth(size);
|
|
8088 |
}
|
|
8089 |
_sp->addChunkAndRepairOffsetTable(chunk, size,
|
|
8090 |
lastFreeRangeCoalesced());
|
|
8091 |
}
|
|
8092 |
set_inFreeRange(false);
|
|
8093 |
set_freeRangeInFreeLists(false);
|
|
8094 |
}
|
|
8095 |
|
|
8096 |
// We take a break if we've been at this for a while,
|
|
8097 |
// so as to avoid monopolizing the locks involved.
|
|
8098 |
void SweepClosure::do_yield_work(HeapWord* addr) {
|
|
8099 |
// Return current free chunk being used for coalescing (if any)
|
|
8100 |
// to the appropriate freelist. After yielding, the next
|
|
8101 |
// free block encountered will start a coalescing range of
|
|
8102 |
// free blocks. If the next free block is adjacent to the
|
|
8103 |
// chunk just flushed, they will need to wait for the next
|
|
8104 |
// sweep to be coalesced.
|
|
8105 |
if (inFreeRange()) {
|
|
8106 |
flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
|
|
8107 |
}
|
|
8108 |
|
|
8109 |
// First give up the locks, then yield, then re-lock.
|
|
8110 |
// We should probably use a constructor/destructor idiom to
|
|
8111 |
// do this unlock/lock or modify the MutexUnlocker class to
|
|
8112 |
// serve our purpose. XXX
|
|
8113 |
assert_lock_strong(_bitMap->lock());
|
|
8114 |
assert_lock_strong(_freelistLock);
|
|
8115 |
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
|
8116 |
"CMS thread should hold CMS token");
|
|
8117 |
_bitMap->lock()->unlock();
|
|
8118 |
_freelistLock->unlock();
|
|
8119 |
ConcurrentMarkSweepThread::desynchronize(true);
|
|
8120 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
8121 |
_collector->stopTimer();
|
|
8122 |
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
8123 |
if (PrintCMSStatistics != 0) {
|
|
8124 |
_collector->incrementYields();
|
|
8125 |
}
|
|
8126 |
_collector->icms_wait();
|
|
8127 |
|
|
8128 |
// See the comment in coordinator_yield()
|
|
8129 |
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
|
8130 |
ConcurrentMarkSweepThread::should_yield() &&
|
|
8131 |
!CMSCollector::foregroundGCIsActive(); ++i) {
|
|
8132 |
os::sleep(Thread::current(), 1, false);
|
|
8133 |
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
8134 |
}
|
|
8135 |
|
|
8136 |
ConcurrentMarkSweepThread::synchronize(true);
|
|
8137 |
_freelistLock->lock();
|
|
8138 |
_bitMap->lock()->lock_without_safepoint_check();
|
|
8139 |
_collector->startTimer();
|
|
8140 |
}
|
|
8141 |
|
|
8142 |
#ifndef PRODUCT
|
|
8143 |
// This is actually very useful in a product build if it can
|
|
8144 |
// be called from the debugger. Compile it into the product
|
|
8145 |
// as needed.
|
|
8146 |
bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
|
|
8147 |
return debug_cms_space->verifyChunkInFreeLists(fc);
|
|
8148 |
}
|
|
8149 |
|
|
8150 |
void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
|
|
8151 |
if (CMSTraceSweeper) {
|
|
8152 |
gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
|
|
8153 |
}
|
|
8154 |
}
|
|
8155 |
#endif
|
|
8156 |
|
|
8157 |
// CMSIsAliveClosure
|
|
8158 |
bool CMSIsAliveClosure::do_object_b(oop obj) {
|
|
8159 |
HeapWord* addr = (HeapWord*)obj;
|
|
8160 |
return addr != NULL &&
|
|
8161 |
(!_span.contains(addr) || _bit_map->isMarked(addr));
|
|
8162 |
}
|
|
8163 |
|
|
8164 |
// CMSKeepAliveClosure: the serial version
|
|
8165 |
void CMSKeepAliveClosure::do_oop(oop* p) {
|
|
8166 |
oop this_oop = *p;
|
|
8167 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
8168 |
if (_span.contains(addr) &&
|
|
8169 |
!_bit_map->isMarked(addr)) {
|
|
8170 |
_bit_map->mark(addr);
|
|
8171 |
bool simulate_overflow = false;
|
|
8172 |
NOT_PRODUCT(
|
|
8173 |
if (CMSMarkStackOverflowALot &&
|
|
8174 |
_collector->simulate_overflow()) {
|
|
8175 |
// simulate a stack overflow
|
|
8176 |
simulate_overflow = true;
|
|
8177 |
}
|
|
8178 |
)
|
|
8179 |
if (simulate_overflow || !_mark_stack->push(this_oop)) {
|
|
8180 |
_collector->push_on_overflow_list(this_oop);
|
|
8181 |
_collector->_ser_kac_ovflw++;
|
|
8182 |
}
|
|
8183 |
}
|
|
8184 |
}
|
|
8185 |
|
|
8186 |
// CMSParKeepAliveClosure: a parallel version of the above.
|
|
8187 |
// The work queues are private to each closure (thread),
|
|
8188 |
// but (may be) available for stealing by other threads.
|
|
8189 |
void CMSParKeepAliveClosure::do_oop(oop* p) {
|
|
8190 |
oop this_oop = *p;
|
|
8191 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
8192 |
if (_span.contains(addr) &&
|
|
8193 |
!_bit_map->isMarked(addr)) {
|
|
8194 |
// In general, during recursive tracing, several threads
|
|
8195 |
// may be concurrently getting here; the first one to
|
|
8196 |
// "tag" it, claims it.
|
|
8197 |
if (_bit_map->par_mark(addr)) {
|
|
8198 |
bool res = _work_queue->push(this_oop);
|
|
8199 |
assert(res, "Low water mark should be much less than capacity");
|
|
8200 |
// Do a recursive trim in the hope that this will keep
|
|
8201 |
// stack usage lower, but leave some oops for potential stealers
|
|
8202 |
trim_queue(_low_water_mark);
|
|
8203 |
} // Else, another thread got there first
|
|
8204 |
}
|
|
8205 |
}
|
|
8206 |
|
|
8207 |
void CMSParKeepAliveClosure::trim_queue(uint max) {
|
|
8208 |
while (_work_queue->size() > max) {
|
|
8209 |
oop new_oop;
|
|
8210 |
if (_work_queue->pop_local(new_oop)) {
|
|
8211 |
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
|
|
8212 |
assert(_bit_map->isMarked((HeapWord*)new_oop),
|
|
8213 |
"no white objects on this stack!");
|
|
8214 |
assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
|
|
8215 |
// iterate over the oops in this oop, marking and pushing
|
|
8216 |
// the ones in CMS heap (i.e. in _span).
|
|
8217 |
new_oop->oop_iterate(&_mark_and_push);
|
|
8218 |
}
|
|
8219 |
}
|
|
8220 |
}
|
|
8221 |
|
|
8222 |
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
|
|
8223 |
oop this_oop = *p;
|
|
8224 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
8225 |
if (_span.contains(addr) &&
|
|
8226 |
!_bit_map->isMarked(addr)) {
|
|
8227 |
if (_bit_map->par_mark(addr)) {
|
|
8228 |
bool simulate_overflow = false;
|
|
8229 |
NOT_PRODUCT(
|
|
8230 |
if (CMSMarkStackOverflowALot &&
|
|
8231 |
_collector->par_simulate_overflow()) {
|
|
8232 |
// simulate a stack overflow
|
|
8233 |
simulate_overflow = true;
|
|
8234 |
}
|
|
8235 |
)
|
|
8236 |
if (simulate_overflow || !_work_queue->push(this_oop)) {
|
|
8237 |
_collector->par_push_on_overflow_list(this_oop);
|
|
8238 |
_collector->_par_kac_ovflw++;
|
|
8239 |
}
|
|
8240 |
} // Else another thread got there already
|
|
8241 |
}
|
|
8242 |
}
|
|
8243 |
|
|
8244 |
//////////////////////////////////////////////////////////////////
|
|
8245 |
// CMSExpansionCause /////////////////////////////
|
|
8246 |
//////////////////////////////////////////////////////////////////
|
|
8247 |
const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
|
|
8248 |
switch (cause) {
|
|
8249 |
case _no_expansion:
|
|
8250 |
return "No expansion";
|
|
8251 |
case _satisfy_free_ratio:
|
|
8252 |
return "Free ratio";
|
|
8253 |
case _satisfy_promotion:
|
|
8254 |
return "Satisfy promotion";
|
|
8255 |
case _satisfy_allocation:
|
|
8256 |
return "allocation";
|
|
8257 |
case _allocate_par_lab:
|
|
8258 |
return "Par LAB";
|
|
8259 |
case _allocate_par_spooling_space:
|
|
8260 |
return "Par Spooling Space";
|
|
8261 |
case _adaptive_size_policy:
|
|
8262 |
return "Ergonomics";
|
|
8263 |
default:
|
|
8264 |
return "unknown";
|
|
8265 |
}
|
|
8266 |
}
|
|
8267 |
|
|
8268 |
void CMSDrainMarkingStackClosure::do_void() {
|
|
8269 |
// the max number to take from overflow list at a time
|
|
8270 |
const size_t num = _mark_stack->capacity()/4;
|
|
8271 |
while (!_mark_stack->isEmpty() ||
|
|
8272 |
// if stack is empty, check the overflow list
|
|
8273 |
_collector->take_from_overflow_list(num, _mark_stack)) {
|
|
8274 |
oop this_oop = _mark_stack->pop();
|
|
8275 |
HeapWord* addr = (HeapWord*)this_oop;
|
|
8276 |
assert(_span.contains(addr), "Should be within span");
|
|
8277 |
assert(_bit_map->isMarked(addr), "Should be marked");
|
|
8278 |
assert(this_oop->is_oop(), "Should be an oop");
|
|
8279 |
this_oop->oop_iterate(_keep_alive);
|
|
8280 |
}
|
|
8281 |
}
|
|
8282 |
|
|
8283 |
void CMSParDrainMarkingStackClosure::do_void() {
|
|
8284 |
// drain queue
|
|
8285 |
trim_queue(0);
|
|
8286 |
}
|
|
8287 |
|
|
8288 |
// Trim our work_queue so its length is below max at return
|
|
8289 |
void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
|
|
8290 |
while (_work_queue->size() > max) {
|
|
8291 |
oop new_oop;
|
|
8292 |
if (_work_queue->pop_local(new_oop)) {
|
|
8293 |
assert(new_oop->is_oop(), "Expected an oop");
|
|
8294 |
assert(_bit_map->isMarked((HeapWord*)new_oop),
|
|
8295 |
"no white objects on this stack!");
|
|
8296 |
assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
|
|
8297 |
// iterate over the oops in this oop, marking and pushing
|
|
8298 |
// the ones in CMS heap (i.e. in _span).
|
|
8299 |
new_oop->oop_iterate(&_mark_and_push);
|
|
8300 |
}
|
|
8301 |
}
|
|
8302 |
}
|
|
8303 |
|
|
8304 |
////////////////////////////////////////////////////////////////////
|
|
8305 |
// Support for Marking Stack Overflow list handling and related code
|
|
8306 |
////////////////////////////////////////////////////////////////////
|
|
8307 |
// Much of the following code is similar in shape and spirit to the
|
|
8308 |
// code used in ParNewGC. We should try and share that code
|
|
8309 |
// as much as possible in the future.
|
|
8310 |
|
|
8311 |
#ifndef PRODUCT
|
|
8312 |
// Debugging support for CMSStackOverflowALot
|
|
8313 |
|
|
8314 |
// It's OK to call this multi-threaded; the worst thing
|
|
8315 |
// that can happen is that we'll get a bunch of closely
|
|
8316 |
// spaced simulated oveflows, but that's OK, in fact
|
|
8317 |
// probably good as it would exercise the overflow code
|
|
8318 |
// under contention.
|
|
8319 |
bool CMSCollector::simulate_overflow() {
|
|
8320 |
if (_overflow_counter-- <= 0) { // just being defensive
|
|
8321 |
_overflow_counter = CMSMarkStackOverflowInterval;
|
|
8322 |
return true;
|
|
8323 |
} else {
|
|
8324 |
return false;
|
|
8325 |
}
|
|
8326 |
}
|
|
8327 |
|
|
8328 |
bool CMSCollector::par_simulate_overflow() {
|
|
8329 |
return simulate_overflow();
|
|
8330 |
}
|
|
8331 |
#endif
|
|
8332 |
|
|
8333 |
// Single-threaded
|
|
8334 |
bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
|
|
8335 |
assert(stack->isEmpty(), "Expected precondition");
|
|
8336 |
assert(stack->capacity() > num, "Shouldn't bite more than can chew");
|
|
8337 |
size_t i = num;
|
|
8338 |
oop cur = _overflow_list;
|
|
8339 |
const markOop proto = markOopDesc::prototype();
|
|
8340 |
NOT_PRODUCT(size_t n = 0;)
|
|
8341 |
for (oop next; i > 0 && cur != NULL; cur = next, i--) {
|
|
8342 |
next = oop(cur->mark());
|
|
8343 |
cur->set_mark(proto); // until proven otherwise
|
|
8344 |
assert(cur->is_oop(), "Should be an oop");
|
|
8345 |
bool res = stack->push(cur);
|
|
8346 |
assert(res, "Bit off more than can chew?");
|
|
8347 |
NOT_PRODUCT(n++;)
|
|
8348 |
}
|
|
8349 |
_overflow_list = cur;
|
|
8350 |
#ifndef PRODUCT
|
|
8351 |
assert(_num_par_pushes >= n, "Too many pops?");
|
|
8352 |
_num_par_pushes -=n;
|
|
8353 |
#endif
|
|
8354 |
return !stack->isEmpty();
|
|
8355 |
}
|
|
8356 |
|
|
8357 |
// Multi-threaded; use CAS to break off a prefix
|
|
8358 |
bool CMSCollector::par_take_from_overflow_list(size_t num,
|
|
8359 |
OopTaskQueue* work_q) {
|
|
8360 |
assert(work_q->size() == 0, "That's the current policy");
|
|
8361 |
assert(num < work_q->max_elems(), "Can't bite more than we can chew");
|
|
8362 |
if (_overflow_list == NULL) {
|
|
8363 |
return false;
|
|
8364 |
}
|
|
8365 |
// Grab the entire list; we'll put back a suffix
|
|
8366 |
oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
|
|
8367 |
if (prefix == NULL) { // someone grabbed it before we did ...
|
|
8368 |
// ... we could spin for a short while, but for now we don't
|
|
8369 |
return false;
|
|
8370 |
}
|
|
8371 |
size_t i = num;
|
|
8372 |
oop cur = prefix;
|
|
8373 |
for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
|
|
8374 |
if (cur->mark() != NULL) {
|
|
8375 |
oop suffix_head = cur->mark(); // suffix will be put back on global list
|
|
8376 |
cur->set_mark(NULL); // break off suffix
|
|
8377 |
// Find tail of suffix so we can prepend suffix to global list
|
|
8378 |
for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
|
|
8379 |
oop suffix_tail = cur;
|
|
8380 |
assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
|
|
8381 |
"Tautology");
|
|
8382 |
oop observed_overflow_list = _overflow_list;
|
|
8383 |
do {
|
|
8384 |
cur = observed_overflow_list;
|
|
8385 |
suffix_tail->set_mark(markOop(cur));
|
|
8386 |
observed_overflow_list =
|
|
8387 |
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
|
|
8388 |
} while (cur != observed_overflow_list);
|
|
8389 |
}
|
|
8390 |
|
|
8391 |
// Push the prefix elements on work_q
|
|
8392 |
assert(prefix != NULL, "control point invariant");
|
|
8393 |
const markOop proto = markOopDesc::prototype();
|
|
8394 |
oop next;
|
|
8395 |
NOT_PRODUCT(size_t n = 0;)
|
|
8396 |
for (cur = prefix; cur != NULL; cur = next) {
|
|
8397 |
next = oop(cur->mark());
|
|
8398 |
cur->set_mark(proto); // until proven otherwise
|
|
8399 |
assert(cur->is_oop(), "Should be an oop");
|
|
8400 |
bool res = work_q->push(cur);
|
|
8401 |
assert(res, "Bit off more than we can chew?");
|
|
8402 |
NOT_PRODUCT(n++;)
|
|
8403 |
}
|
|
8404 |
#ifndef PRODUCT
|
|
8405 |
assert(_num_par_pushes >= n, "Too many pops?");
|
|
8406 |
Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
|
|
8407 |
#endif
|
|
8408 |
return true;
|
|
8409 |
}
|
|
8410 |
|
|
8411 |
// Single-threaded
|
|
8412 |
void CMSCollector::push_on_overflow_list(oop p) {
|
|
8413 |
NOT_PRODUCT(_num_par_pushes++;)
|
|
8414 |
assert(p->is_oop(), "Not an oop");
|
|
8415 |
preserve_mark_if_necessary(p);
|
|
8416 |
p->set_mark((markOop)_overflow_list);
|
|
8417 |
_overflow_list = p;
|
|
8418 |
}
|
|
8419 |
|
|
8420 |
// Multi-threaded; use CAS to prepend to overflow list
|
|
8421 |
void CMSCollector::par_push_on_overflow_list(oop p) {
|
|
8422 |
NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
|
|
8423 |
assert(p->is_oop(), "Not an oop");
|
|
8424 |
par_preserve_mark_if_necessary(p);
|
|
8425 |
oop observed_overflow_list = _overflow_list;
|
|
8426 |
oop cur_overflow_list;
|
|
8427 |
do {
|
|
8428 |
cur_overflow_list = observed_overflow_list;
|
|
8429 |
p->set_mark(markOop(cur_overflow_list));
|
|
8430 |
observed_overflow_list =
|
|
8431 |
(oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
|
|
8432 |
} while (cur_overflow_list != observed_overflow_list);
|
|
8433 |
}
|
|
8434 |
|
|
8435 |
// Single threaded
|
|
8436 |
// General Note on GrowableArray: pushes may silently fail
|
|
8437 |
// because we are (temporarily) out of C-heap for expanding
|
|
8438 |
// the stack. The problem is quite ubiquitous and affects
|
|
8439 |
// a lot of code in the JVM. The prudent thing for GrowableArray
|
|
8440 |
// to do (for now) is to exit with an error. However, that may
|
|
8441 |
// be too draconian in some cases because the caller may be
|
|
8442 |
// able to recover without much harm. For suych cases, we
|
|
8443 |
// should probably introduce a "soft_push" method which returns
|
|
8444 |
// an indication of success or failure with the assumption that
|
|
8445 |
// the caller may be able to recover from a failure; code in
|
|
8446 |
// the VM can then be changed, incrementally, to deal with such
|
|
8447 |
// failures where possible, thus, incrementally hardening the VM
|
|
8448 |
// in such low resource situations.
|
|
8449 |
void CMSCollector::preserve_mark_work(oop p, markOop m) {
|
|
8450 |
int PreserveMarkStackSize = 128;
|
|
8451 |
|
|
8452 |
if (_preserved_oop_stack == NULL) {
|
|
8453 |
assert(_preserved_mark_stack == NULL,
|
|
8454 |
"bijection with preserved_oop_stack");
|
|
8455 |
// Allocate the stacks
|
|
8456 |
_preserved_oop_stack = new (ResourceObj::C_HEAP)
|
|
8457 |
GrowableArray<oop>(PreserveMarkStackSize, true);
|
|
8458 |
_preserved_mark_stack = new (ResourceObj::C_HEAP)
|
|
8459 |
GrowableArray<markOop>(PreserveMarkStackSize, true);
|
|
8460 |
if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
|
|
8461 |
vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
|
|
8462 |
"Preserved Mark/Oop Stack for CMS (C-heap)");
|
|
8463 |
}
|
|
8464 |
}
|
|
8465 |
_preserved_oop_stack->push(p);
|
|
8466 |
_preserved_mark_stack->push(m);
|
|
8467 |
assert(m == p->mark(), "Mark word changed");
|
|
8468 |
assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
|
|
8469 |
"bijection");
|
|
8470 |
}
|
|
8471 |
|
|
8472 |
// Single threaded
|
|
8473 |
void CMSCollector::preserve_mark_if_necessary(oop p) {
|
|
8474 |
markOop m = p->mark();
|
|
8475 |
if (m->must_be_preserved(p)) {
|
|
8476 |
preserve_mark_work(p, m);
|
|
8477 |
}
|
|
8478 |
}
|
|
8479 |
|
|
8480 |
void CMSCollector::par_preserve_mark_if_necessary(oop p) {
|
|
8481 |
markOop m = p->mark();
|
|
8482 |
if (m->must_be_preserved(p)) {
|
|
8483 |
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
|
8484 |
// Even though we read the mark word without holding
|
|
8485 |
// the lock, we are assured that it will not change
|
|
8486 |
// because we "own" this oop, so no other thread can
|
|
8487 |
// be trying to push it on the overflow list; see
|
|
8488 |
// the assertion in preserve_mark_work() that checks
|
|
8489 |
// that m == p->mark().
|
|
8490 |
preserve_mark_work(p, m);
|
|
8491 |
}
|
|
8492 |
}
|
|
8493 |
|
|
8494 |
// We should be able to do this multi-threaded,
|
|
8495 |
// a chunk of stack being a task (this is
|
|
8496 |
// correct because each oop only ever appears
|
|
8497 |
// once in the overflow list. However, it's
|
|
8498 |
// not very easy to completely overlap this with
|
|
8499 |
// other operations, so will generally not be done
|
|
8500 |
// until all work's been completed. Because we
|
|
8501 |
// expect the preserved oop stack (set) to be small,
|
|
8502 |
// it's probably fine to do this single-threaded.
|
|
8503 |
// We can explore cleverer concurrent/overlapped/parallel
|
|
8504 |
// processing of preserved marks if we feel the
|
|
8505 |
// need for this in the future. Stack overflow should
|
|
8506 |
// be so rare in practice and, when it happens, its
|
|
8507 |
// effect on performance so great that this will
|
|
8508 |
// likely just be in the noise anyway.
|
|
8509 |
void CMSCollector::restore_preserved_marks_if_any() {
|
|
8510 |
if (_preserved_oop_stack == NULL) {
|
|
8511 |
assert(_preserved_mark_stack == NULL,
|
|
8512 |
"bijection with preserved_oop_stack");
|
|
8513 |
return;
|
|
8514 |
}
|
|
8515 |
|
|
8516 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
8517 |
"world should be stopped");
|
|
8518 |
assert(Thread::current()->is_ConcurrentGC_thread() ||
|
|
8519 |
Thread::current()->is_VM_thread(),
|
|
8520 |
"should be single-threaded");
|
|
8521 |
|
|
8522 |
int length = _preserved_oop_stack->length();
|
|
8523 |
assert(_preserved_mark_stack->length() == length, "bijection");
|
|
8524 |
for (int i = 0; i < length; i++) {
|
|
8525 |
oop p = _preserved_oop_stack->at(i);
|
|
8526 |
assert(p->is_oop(), "Should be an oop");
|
|
8527 |
assert(_span.contains(p), "oop should be in _span");
|
|
8528 |
assert(p->mark() == markOopDesc::prototype(),
|
|
8529 |
"Set when taken from overflow list");
|
|
8530 |
markOop m = _preserved_mark_stack->at(i);
|
|
8531 |
p->set_mark(m);
|
|
8532 |
}
|
|
8533 |
_preserved_mark_stack->clear();
|
|
8534 |
_preserved_oop_stack->clear();
|
|
8535 |
assert(_preserved_mark_stack->is_empty() &&
|
|
8536 |
_preserved_oop_stack->is_empty(),
|
|
8537 |
"stacks were cleared above");
|
|
8538 |
}
|
|
8539 |
|
|
8540 |
#ifndef PRODUCT
|
|
8541 |
bool CMSCollector::no_preserved_marks() const {
|
|
8542 |
return ( ( _preserved_mark_stack == NULL
|
|
8543 |
&& _preserved_oop_stack == NULL)
|
|
8544 |
|| ( _preserved_mark_stack->is_empty()
|
|
8545 |
&& _preserved_oop_stack->is_empty()));
|
|
8546 |
}
|
|
8547 |
#endif
|
|
8548 |
|
|
8549 |
CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
|
|
8550 |
{
|
|
8551 |
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
|
|
8552 |
CMSAdaptiveSizePolicy* size_policy =
|
|
8553 |
(CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
|
|
8554 |
assert(size_policy->is_gc_cms_adaptive_size_policy(),
|
|
8555 |
"Wrong type for size policy");
|
|
8556 |
return size_policy;
|
|
8557 |
}
|
|
8558 |
|
|
8559 |
void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
|
|
8560 |
size_t desired_promo_size) {
|
|
8561 |
if (cur_promo_size < desired_promo_size) {
|
|
8562 |
size_t expand_bytes = desired_promo_size - cur_promo_size;
|
|
8563 |
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
8564 |
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
|
|
8565 |
"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
|
|
8566 |
expand_bytes);
|
|
8567 |
}
|
|
8568 |
expand(expand_bytes,
|
|
8569 |
MinHeapDeltaBytes,
|
|
8570 |
CMSExpansionCause::_adaptive_size_policy);
|
|
8571 |
} else if (desired_promo_size < cur_promo_size) {
|
|
8572 |
size_t shrink_bytes = cur_promo_size - desired_promo_size;
|
|
8573 |
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
8574 |
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
|
|
8575 |
"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
|
|
8576 |
shrink_bytes);
|
|
8577 |
}
|
|
8578 |
shrink(shrink_bytes);
|
|
8579 |
}
|
|
8580 |
}
|
|
8581 |
|
|
8582 |
CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
|
|
8583 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
8584 |
CMSGCAdaptivePolicyCounters* counters =
|
|
8585 |
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
|
|
8586 |
assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
|
8587 |
"Wrong kind of counters");
|
|
8588 |
return counters;
|
|
8589 |
}
|
|
8590 |
|
|
8591 |
|
|
8592 |
void ASConcurrentMarkSweepGeneration::update_counters() {
|
|
8593 |
if (UsePerfData) {
|
|
8594 |
_space_counters->update_all();
|
|
8595 |
_gen_counters->update_all();
|
|
8596 |
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
|
|
8597 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
8598 |
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
|
|
8599 |
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
|
|
8600 |
"Wrong gc statistics type");
|
|
8601 |
counters->update_counters(gc_stats_l);
|
|
8602 |
}
|
|
8603 |
}
|
|
8604 |
|
|
8605 |
void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
|
|
8606 |
if (UsePerfData) {
|
|
8607 |
_space_counters->update_used(used);
|
|
8608 |
_space_counters->update_capacity();
|
|
8609 |
_gen_counters->update_all();
|
|
8610 |
|
|
8611 |
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
|
|
8612 |
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
8613 |
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
|
|
8614 |
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
|
|
8615 |
"Wrong gc statistics type");
|
|
8616 |
counters->update_counters(gc_stats_l);
|
|
8617 |
}
|
|
8618 |
}
|
|
8619 |
|
|
8620 |
// The desired expansion delta is computed so that:
|
|
8621 |
// . desired free percentage or greater is used
|
|
8622 |
void ASConcurrentMarkSweepGeneration::compute_new_size() {
|
|
8623 |
assert_locked_or_safepoint(Heap_lock);
|
|
8624 |
|
|
8625 |
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
|
|
8626 |
|
|
8627 |
// If incremental collection failed, we just want to expand
|
|
8628 |
// to the limit.
|
|
8629 |
if (incremental_collection_failed()) {
|
|
8630 |
clear_incremental_collection_failed();
|
|
8631 |
grow_to_reserved();
|
|
8632 |
return;
|
|
8633 |
}
|
|
8634 |
|
|
8635 |
assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
|
|
8636 |
|
|
8637 |
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
|
8638 |
"Wrong type of heap");
|
|
8639 |
int prev_level = level() - 1;
|
|
8640 |
assert(prev_level >= 0, "The cms generation is the lowest generation");
|
|
8641 |
Generation* prev_gen = gch->get_gen(prev_level);
|
|
8642 |
assert(prev_gen->kind() == Generation::ASParNew,
|
|
8643 |
"Wrong type of young generation");
|
|
8644 |
ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
|
|
8645 |
size_t cur_eden = younger_gen->eden()->capacity();
|
|
8646 |
CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
|
|
8647 |
size_t cur_promo = free();
|
|
8648 |
size_policy->compute_tenured_generation_free_space(cur_promo,
|
|
8649 |
max_available(),
|
|
8650 |
cur_eden);
|
|
8651 |
resize(cur_promo, size_policy->promo_size());
|
|
8652 |
|
|
8653 |
// Record the new size of the space in the cms generation
|
|
8654 |
// that is available for promotions. This is temporary.
|
|
8655 |
// It should be the desired promo size.
|
|
8656 |
size_policy->avg_cms_promo()->sample(free());
|
|
8657 |
size_policy->avg_old_live()->sample(used());
|
|
8658 |
|
|
8659 |
if (UsePerfData) {
|
|
8660 |
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
|
|
8661 |
counters->update_cms_capacity_counter(capacity());
|
|
8662 |
}
|
|
8663 |
}
|
|
8664 |
|
|
8665 |
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
|
|
8666 |
assert_locked_or_safepoint(Heap_lock);
|
|
8667 |
assert_lock_strong(freelistLock());
|
|
8668 |
HeapWord* old_end = _cmsSpace->end();
|
|
8669 |
HeapWord* unallocated_start = _cmsSpace->unallocated_block();
|
|
8670 |
assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
|
|
8671 |
FreeChunk* chunk_at_end = find_chunk_at_end();
|
|
8672 |
if (chunk_at_end == NULL) {
|
|
8673 |
// No room to shrink
|
|
8674 |
if (PrintGCDetails && Verbose) {
|
|
8675 |
gclog_or_tty->print_cr("No room to shrink: old_end "
|
|
8676 |
PTR_FORMAT " unallocated_start " PTR_FORMAT
|
|
8677 |
" chunk_at_end " PTR_FORMAT,
|
|
8678 |
old_end, unallocated_start, chunk_at_end);
|
|
8679 |
}
|
|
8680 |
return;
|
|
8681 |
} else {
|
|
8682 |
|
|
8683 |
// Find the chunk at the end of the space and determine
|
|
8684 |
// how much it can be shrunk.
|
|
8685 |
size_t shrinkable_size_in_bytes = chunk_at_end->size();
|
|
8686 |
size_t aligned_shrinkable_size_in_bytes =
|
|
8687 |
align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
|
|
8688 |
assert(unallocated_start <= chunk_at_end->end(),
|
|
8689 |
"Inconsistent chunk at end of space");
|
|
8690 |
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
|
|
8691 |
size_t word_size_before = heap_word_size(_virtual_space.committed_size());
|
|
8692 |
|
|
8693 |
// Shrink the underlying space
|
|
8694 |
_virtual_space.shrink_by(bytes);
|
|
8695 |
if (PrintGCDetails && Verbose) {
|
|
8696 |
gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
|
|
8697 |
" desired_bytes " SIZE_FORMAT
|
|
8698 |
" shrinkable_size_in_bytes " SIZE_FORMAT
|
|
8699 |
" aligned_shrinkable_size_in_bytes " SIZE_FORMAT
|
|
8700 |
" bytes " SIZE_FORMAT,
|
|
8701 |
desired_bytes, shrinkable_size_in_bytes,
|
|
8702 |
aligned_shrinkable_size_in_bytes, bytes);
|
|
8703 |
gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
|
|
8704 |
" unallocated_start " SIZE_FORMAT,
|
|
8705 |
old_end, unallocated_start);
|
|
8706 |
}
|
|
8707 |
|
|
8708 |
// If the space did shrink (shrinking is not guaranteed),
|
|
8709 |
// shrink the chunk at the end by the appropriate amount.
|
|
8710 |
if (((HeapWord*)_virtual_space.high()) < old_end) {
|
|
8711 |
size_t new_word_size =
|
|
8712 |
heap_word_size(_virtual_space.committed_size());
|
|
8713 |
|
|
8714 |
// Have to remove the chunk from the dictionary because it is changing
|
|
8715 |
// size and might be someplace elsewhere in the dictionary.
|
|
8716 |
|
|
8717 |
// Get the chunk at end, shrink it, and put it
|
|
8718 |
// back.
|
|
8719 |
_cmsSpace->removeChunkFromDictionary(chunk_at_end);
|
|
8720 |
size_t word_size_change = word_size_before - new_word_size;
|
|
8721 |
size_t chunk_at_end_old_size = chunk_at_end->size();
|
|
8722 |
assert(chunk_at_end_old_size >= word_size_change,
|
|
8723 |
"Shrink is too large");
|
|
8724 |
chunk_at_end->setSize(chunk_at_end_old_size -
|
|
8725 |
word_size_change);
|
|
8726 |
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
|
|
8727 |
word_size_change);
|
|
8728 |
|
|
8729 |
_cmsSpace->returnChunkToDictionary(chunk_at_end);
|
|
8730 |
|
|
8731 |
MemRegion mr(_cmsSpace->bottom(), new_word_size);
|
|
8732 |
_bts->resize(new_word_size); // resize the block offset shared array
|
|
8733 |
Universe::heap()->barrier_set()->resize_covered_region(mr);
|
|
8734 |
_cmsSpace->assert_locked();
|
|
8735 |
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
|
|
8736 |
|
|
8737 |
NOT_PRODUCT(_cmsSpace->dictionary()->verify());
|
|
8738 |
|
|
8739 |
// update the space and generation capacity counters
|
|
8740 |
if (UsePerfData) {
|
|
8741 |
_space_counters->update_capacity();
|
|
8742 |
_gen_counters->update_all();
|
|
8743 |
}
|
|
8744 |
|
|
8745 |
if (Verbose && PrintGCDetails) {
|
|
8746 |
size_t new_mem_size = _virtual_space.committed_size();
|
|
8747 |
size_t old_mem_size = new_mem_size + bytes;
|
|
8748 |
gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
|
|
8749 |
name(), old_mem_size/K, bytes/K, new_mem_size/K);
|
|
8750 |
}
|
|
8751 |
}
|
|
8752 |
|
|
8753 |
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
|
|
8754 |
"Inconsistency at end of space");
|
|
8755 |
assert(chunk_at_end->end() == _cmsSpace->end(),
|
|
8756 |
"Shrinking is inconsistent");
|
|
8757 |
return;
|
|
8758 |
}
|
|
8759 |
}
|
|
8760 |
|
|
8761 |
// Transfer some number of overflown objects to usual marking
|
|
8762 |
// stack. Return true if some objects were transferred.
|
|
8763 |
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
|
|
8764 |
size_t num = MIN2((size_t)_mark_stack->capacity()/4,
|
|
8765 |
(size_t)ParGCDesiredObjsFromOverflowList);
|
|
8766 |
|
|
8767 |
bool res = _collector->take_from_overflow_list(num, _mark_stack);
|
|
8768 |
assert(_collector->overflow_list_is_empty() || res,
|
|
8769 |
"If list is not empty, we should have taken something");
|
|
8770 |
assert(!res || !_mark_stack->isEmpty(),
|
|
8771 |
"If we took something, it should now be on our stack");
|
|
8772 |
return res;
|
|
8773 |
}
|
|
8774 |
|
|
8775 |
size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
|
|
8776 |
size_t res = _sp->block_size_no_stall(addr, _collector);
|
|
8777 |
assert(res != 0, "Should always be able to compute a size");
|
|
8778 |
if (_sp->block_is_obj(addr)) {
|
|
8779 |
if (_live_bit_map->isMarked(addr)) {
|
|
8780 |
// It can't have been dead in a previous cycle
|
|
8781 |
guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
|
|
8782 |
} else {
|
|
8783 |
_dead_bit_map->mark(addr); // mark the dead object
|
|
8784 |
}
|
|
8785 |
}
|
|
8786 |
return res;
|
|
8787 |
}
|