1
|
1 |
/*
|
|
2 |
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
# include "incls/_precompiled.incl"
|
|
26 |
# include "incls/_parallelScavengeHeap.cpp.incl"
|
|
27 |
|
|
28 |
PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
|
|
29 |
PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
|
|
30 |
PSPermGen* ParallelScavengeHeap::_perm_gen = NULL;
|
|
31 |
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
|
|
32 |
PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
|
|
33 |
ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
|
|
34 |
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
|
|
35 |
|
|
36 |
static void trace_gen_sizes(const char* const str,
|
|
37 |
size_t pg_min, size_t pg_max,
|
|
38 |
size_t og_min, size_t og_max,
|
|
39 |
size_t yg_min, size_t yg_max)
|
|
40 |
{
|
|
41 |
if (TracePageSizes) {
|
|
42 |
tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
|
|
43 |
SIZE_FORMAT "," SIZE_FORMAT " "
|
|
44 |
SIZE_FORMAT "," SIZE_FORMAT " "
|
|
45 |
SIZE_FORMAT,
|
|
46 |
str, pg_min / K, pg_max / K,
|
|
47 |
og_min / K, og_max / K,
|
|
48 |
yg_min / K, yg_max / K,
|
|
49 |
(pg_max + og_max + yg_max) / K);
|
|
50 |
}
|
|
51 |
}
|
|
52 |
|
|
53 |
jint ParallelScavengeHeap::initialize() {
|
|
54 |
// Cannot be initialized until after the flags are parsed
|
|
55 |
GenerationSizer flag_parser;
|
|
56 |
|
|
57 |
size_t yg_min_size = flag_parser.min_young_gen_size();
|
|
58 |
size_t yg_max_size = flag_parser.max_young_gen_size();
|
|
59 |
size_t og_min_size = flag_parser.min_old_gen_size();
|
|
60 |
size_t og_max_size = flag_parser.max_old_gen_size();
|
|
61 |
// Why isn't there a min_perm_gen_size()?
|
|
62 |
size_t pg_min_size = flag_parser.perm_gen_size();
|
|
63 |
size_t pg_max_size = flag_parser.max_perm_gen_size();
|
|
64 |
|
|
65 |
trace_gen_sizes("ps heap raw",
|
|
66 |
pg_min_size, pg_max_size,
|
|
67 |
og_min_size, og_max_size,
|
|
68 |
yg_min_size, yg_max_size);
|
|
69 |
|
|
70 |
// The ReservedSpace ctor used below requires that the page size for the perm
|
|
71 |
// gen is <= the page size for the rest of the heap (young + old gens).
|
|
72 |
const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
|
|
73 |
yg_max_size + og_max_size,
|
|
74 |
8);
|
|
75 |
const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
|
|
76 |
pg_max_size, 16),
|
|
77 |
og_page_sz);
|
|
78 |
|
|
79 |
const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz);
|
|
80 |
const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
|
|
81 |
const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
|
|
82 |
|
|
83 |
// Update sizes to reflect the selected page size(s).
|
|
84 |
//
|
|
85 |
// NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
|
|
86 |
// should check UseAdaptiveSizePolicy. Changes from generationSizer could
|
|
87 |
// move to the common code.
|
|
88 |
yg_min_size = align_size_up(yg_min_size, yg_align);
|
|
89 |
yg_max_size = align_size_up(yg_max_size, yg_align);
|
|
90 |
size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align);
|
|
91 |
yg_cur_size = MAX2(yg_cur_size, yg_min_size);
|
|
92 |
|
|
93 |
og_min_size = align_size_up(og_min_size, og_align);
|
|
94 |
og_max_size = align_size_up(og_max_size, og_align);
|
|
95 |
size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align);
|
|
96 |
og_cur_size = MAX2(og_cur_size, og_min_size);
|
|
97 |
|
|
98 |
pg_min_size = align_size_up(pg_min_size, pg_align);
|
|
99 |
pg_max_size = align_size_up(pg_max_size, pg_align);
|
|
100 |
size_t pg_cur_size = pg_min_size;
|
|
101 |
|
|
102 |
trace_gen_sizes("ps heap rnd",
|
|
103 |
pg_min_size, pg_max_size,
|
|
104 |
og_min_size, og_max_size,
|
|
105 |
yg_min_size, yg_max_size);
|
|
106 |
|
|
107 |
// The main part of the heap (old gen + young gen) can often use a larger page
|
|
108 |
// size than is needed or wanted for the perm gen. Use the "compound
|
|
109 |
// alignment" ReservedSpace ctor to avoid having to use the same page size for
|
|
110 |
// all gens.
|
|
111 |
ReservedSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
|
|
112 |
og_align);
|
|
113 |
os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
|
|
114 |
heap_rs.base(), pg_max_size);
|
|
115 |
os::trace_page_sizes("ps main", og_min_size + yg_min_size,
|
|
116 |
og_max_size + yg_max_size, og_page_sz,
|
|
117 |
heap_rs.base() + pg_max_size,
|
|
118 |
heap_rs.size() - pg_max_size);
|
|
119 |
if (!heap_rs.is_reserved()) {
|
|
120 |
vm_shutdown_during_initialization(
|
|
121 |
"Could not reserve enough space for object heap");
|
|
122 |
return JNI_ENOMEM;
|
|
123 |
}
|
|
124 |
|
|
125 |
_reserved = MemRegion((HeapWord*)heap_rs.base(),
|
|
126 |
(HeapWord*)(heap_rs.base() + heap_rs.size()));
|
|
127 |
|
|
128 |
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
|
|
129 |
_barrier_set = barrier_set;
|
|
130 |
oopDesc::set_bs(_barrier_set);
|
|
131 |
if (_barrier_set == NULL) {
|
|
132 |
vm_shutdown_during_initialization(
|
|
133 |
"Could not reserve enough space for barrier set");
|
|
134 |
return JNI_ENOMEM;
|
|
135 |
}
|
|
136 |
|
|
137 |
// Initial young gen size is 4 Mb
|
|
138 |
//
|
|
139 |
// XXX - what about flag_parser.young_gen_size()?
|
|
140 |
const size_t init_young_size = align_size_up(4 * M, yg_align);
|
|
141 |
yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
|
|
142 |
|
|
143 |
// Split the reserved space into perm gen and the main heap (everything else).
|
|
144 |
// The main heap uses a different alignment.
|
|
145 |
ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
|
|
146 |
ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
|
|
147 |
|
|
148 |
// Make up the generations
|
|
149 |
// Calculate the maximum size that a generation can grow. This
|
|
150 |
// includes growth into the other generation. Note that the
|
|
151 |
// parameter _max_gen_size is kept as the maximum
|
|
152 |
// size of the generation as the boundaries currently stand.
|
|
153 |
// _max_gen_size is still used as that value.
|
|
154 |
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
|
|
155 |
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
|
|
156 |
|
|
157 |
_gens = new AdjoiningGenerations(main_rs,
|
|
158 |
og_cur_size,
|
|
159 |
og_min_size,
|
|
160 |
og_max_size,
|
|
161 |
yg_cur_size,
|
|
162 |
yg_min_size,
|
|
163 |
yg_max_size,
|
|
164 |
yg_align);
|
|
165 |
|
|
166 |
_old_gen = _gens->old_gen();
|
|
167 |
_young_gen = _gens->young_gen();
|
|
168 |
|
|
169 |
const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
|
|
170 |
const size_t old_capacity = _old_gen->capacity_in_bytes();
|
|
171 |
const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
|
|
172 |
_size_policy =
|
|
173 |
new PSAdaptiveSizePolicy(eden_capacity,
|
|
174 |
initial_promo_size,
|
|
175 |
young_gen()->to_space()->capacity_in_bytes(),
|
|
176 |
intra_generation_alignment(),
|
|
177 |
max_gc_pause_sec,
|
|
178 |
max_gc_minor_pause_sec,
|
|
179 |
GCTimeRatio
|
|
180 |
);
|
|
181 |
|
|
182 |
_perm_gen = new PSPermGen(perm_rs,
|
|
183 |
pg_align,
|
|
184 |
pg_cur_size,
|
|
185 |
pg_cur_size,
|
|
186 |
pg_max_size,
|
|
187 |
"perm", 2);
|
|
188 |
|
|
189 |
assert(!UseAdaptiveGCBoundary ||
|
|
190 |
(old_gen()->virtual_space()->high_boundary() ==
|
|
191 |
young_gen()->virtual_space()->low_boundary()),
|
|
192 |
"Boundaries must meet");
|
|
193 |
// initialize the policy counters - 2 collectors, 3 generations
|
|
194 |
_gc_policy_counters =
|
|
195 |
new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
|
|
196 |
_psh = this;
|
|
197 |
|
|
198 |
// Set up the GCTaskManager
|
|
199 |
_gc_task_manager = GCTaskManager::create(ParallelGCThreads);
|
|
200 |
|
|
201 |
if (UseParallelOldGC && !PSParallelCompact::initialize()) {
|
|
202 |
return JNI_ENOMEM;
|
|
203 |
}
|
|
204 |
|
|
205 |
return JNI_OK;
|
|
206 |
}
|
|
207 |
|
|
208 |
void ParallelScavengeHeap::post_initialize() {
|
|
209 |
// Need to init the tenuring threshold
|
|
210 |
PSScavenge::initialize();
|
|
211 |
if (UseParallelOldGC) {
|
|
212 |
PSParallelCompact::post_initialize();
|
|
213 |
if (VerifyParallelOldWithMarkSweep) {
|
|
214 |
// Will be used for verification of par old.
|
|
215 |
PSMarkSweep::initialize();
|
|
216 |
}
|
|
217 |
} else {
|
|
218 |
PSMarkSweep::initialize();
|
|
219 |
}
|
|
220 |
PSPromotionManager::initialize();
|
|
221 |
}
|
|
222 |
|
|
223 |
void ParallelScavengeHeap::update_counters() {
|
|
224 |
young_gen()->update_counters();
|
|
225 |
old_gen()->update_counters();
|
|
226 |
perm_gen()->update_counters();
|
|
227 |
}
|
|
228 |
|
|
229 |
size_t ParallelScavengeHeap::capacity() const {
|
|
230 |
size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
|
|
231 |
return value;
|
|
232 |
}
|
|
233 |
|
|
234 |
size_t ParallelScavengeHeap::used() const {
|
|
235 |
size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
|
|
236 |
return value;
|
|
237 |
}
|
|
238 |
|
|
239 |
bool ParallelScavengeHeap::is_maximal_no_gc() const {
|
|
240 |
return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
|
|
241 |
}
|
|
242 |
|
|
243 |
|
|
244 |
size_t ParallelScavengeHeap::permanent_capacity() const {
|
|
245 |
return perm_gen()->capacity_in_bytes();
|
|
246 |
}
|
|
247 |
|
|
248 |
size_t ParallelScavengeHeap::permanent_used() const {
|
|
249 |
return perm_gen()->used_in_bytes();
|
|
250 |
}
|
|
251 |
|
|
252 |
size_t ParallelScavengeHeap::max_capacity() const {
|
|
253 |
size_t estimated = reserved_region().byte_size();
|
|
254 |
estimated -= perm_gen()->reserved().byte_size();
|
|
255 |
if (UseAdaptiveSizePolicy) {
|
|
256 |
estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
|
|
257 |
} else {
|
|
258 |
estimated -= young_gen()->to_space()->capacity_in_bytes();
|
|
259 |
}
|
|
260 |
return MAX2(estimated, capacity());
|
|
261 |
}
|
|
262 |
|
|
263 |
bool ParallelScavengeHeap::is_in(const void* p) const {
|
|
264 |
if (young_gen()->is_in(p)) {
|
|
265 |
return true;
|
|
266 |
}
|
|
267 |
|
|
268 |
if (old_gen()->is_in(p)) {
|
|
269 |
return true;
|
|
270 |
}
|
|
271 |
|
|
272 |
if (perm_gen()->is_in(p)) {
|
|
273 |
return true;
|
|
274 |
}
|
|
275 |
|
|
276 |
return false;
|
|
277 |
}
|
|
278 |
|
|
279 |
bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
|
|
280 |
if (young_gen()->is_in_reserved(p)) {
|
|
281 |
return true;
|
|
282 |
}
|
|
283 |
|
|
284 |
if (old_gen()->is_in_reserved(p)) {
|
|
285 |
return true;
|
|
286 |
}
|
|
287 |
|
|
288 |
if (perm_gen()->is_in_reserved(p)) {
|
|
289 |
return true;
|
|
290 |
}
|
|
291 |
|
|
292 |
return false;
|
|
293 |
}
|
|
294 |
|
|
295 |
// Static method
|
|
296 |
bool ParallelScavengeHeap::is_in_young(oop* p) {
|
|
297 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
|
298 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
|
|
299 |
"Must be ParallelScavengeHeap");
|
|
300 |
|
|
301 |
PSYoungGen* young_gen = heap->young_gen();
|
|
302 |
|
|
303 |
if (young_gen->is_in_reserved(p)) {
|
|
304 |
return true;
|
|
305 |
}
|
|
306 |
|
|
307 |
return false;
|
|
308 |
}
|
|
309 |
|
|
310 |
// Static method
|
|
311 |
bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) {
|
|
312 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
|
313 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
|
|
314 |
"Must be ParallelScavengeHeap");
|
|
315 |
|
|
316 |
PSOldGen* old_gen = heap->old_gen();
|
|
317 |
PSPermGen* perm_gen = heap->perm_gen();
|
|
318 |
|
|
319 |
if (old_gen->is_in_reserved(p)) {
|
|
320 |
return true;
|
|
321 |
}
|
|
322 |
|
|
323 |
if (perm_gen->is_in_reserved(p)) {
|
|
324 |
return true;
|
|
325 |
}
|
|
326 |
|
|
327 |
return false;
|
|
328 |
}
|
|
329 |
|
|
330 |
// There are two levels of allocation policy here.
|
|
331 |
//
|
|
332 |
// When an allocation request fails, the requesting thread must invoke a VM
|
|
333 |
// operation, transfer control to the VM thread, and await the results of a
|
|
334 |
// garbage collection. That is quite expensive, and we should avoid doing it
|
|
335 |
// multiple times if possible.
|
|
336 |
//
|
|
337 |
// To accomplish this, we have a basic allocation policy, and also a
|
|
338 |
// failed allocation policy.
|
|
339 |
//
|
|
340 |
// The basic allocation policy controls how you allocate memory without
|
|
341 |
// attempting garbage collection. It is okay to grab locks and
|
|
342 |
// expand the heap, if that can be done without coming to a safepoint.
|
|
343 |
// It is likely that the basic allocation policy will not be very
|
|
344 |
// aggressive.
|
|
345 |
//
|
|
346 |
// The failed allocation policy is invoked from the VM thread after
|
|
347 |
// the basic allocation policy is unable to satisfy a mem_allocate
|
|
348 |
// request. This policy needs to cover the entire range of collection,
|
|
349 |
// heap expansion, and out-of-memory conditions. It should make every
|
|
350 |
// attempt to allocate the requested memory.
|
|
351 |
|
|
352 |
// Basic allocation policy. Should never be called at a safepoint, or
|
|
353 |
// from the VM thread.
|
|
354 |
//
|
|
355 |
// This method must handle cases where many mem_allocate requests fail
|
|
356 |
// simultaneously. When that happens, only one VM operation will succeed,
|
|
357 |
// and the rest will not be executed. For that reason, this method loops
|
|
358 |
// during failed allocation attempts. If the java heap becomes exhausted,
|
|
359 |
// we rely on the size_policy object to force a bail out.
|
|
360 |
HeapWord* ParallelScavengeHeap::mem_allocate(
|
|
361 |
size_t size,
|
|
362 |
bool is_noref,
|
|
363 |
bool is_tlab,
|
|
364 |
bool* gc_overhead_limit_was_exceeded) {
|
|
365 |
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
|
|
366 |
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
|
|
367 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
368 |
|
|
369 |
HeapWord* result = young_gen()->allocate(size, is_tlab);
|
|
370 |
|
|
371 |
uint loop_count = 0;
|
|
372 |
uint gc_count = 0;
|
|
373 |
|
|
374 |
while (result == NULL) {
|
|
375 |
// We don't want to have multiple collections for a single filled generation.
|
|
376 |
// To prevent this, each thread tracks the total_collections() value, and if
|
|
377 |
// the count has changed, does not do a new collection.
|
|
378 |
//
|
|
379 |
// The collection count must be read only while holding the heap lock. VM
|
|
380 |
// operations also hold the heap lock during collections. There is a lock
|
|
381 |
// contention case where thread A blocks waiting on the Heap_lock, while
|
|
382 |
// thread B is holding it doing a collection. When thread A gets the lock,
|
|
383 |
// the collection count has already changed. To prevent duplicate collections,
|
|
384 |
// The policy MUST attempt allocations during the same period it reads the
|
|
385 |
// total_collections() value!
|
|
386 |
{
|
|
387 |
MutexLocker ml(Heap_lock);
|
|
388 |
gc_count = Universe::heap()->total_collections();
|
|
389 |
|
|
390 |
result = young_gen()->allocate(size, is_tlab);
|
|
391 |
|
|
392 |
// (1) If the requested object is too large to easily fit in the
|
|
393 |
// young_gen, or
|
|
394 |
// (2) If GC is locked out via GCLocker, young gen is full and
|
|
395 |
// the need for a GC already signalled to GCLocker (done
|
|
396 |
// at a safepoint),
|
|
397 |
// ... then, rather than force a safepoint and (a potentially futile)
|
|
398 |
// collection (attempt) for each allocation, try allocation directly
|
|
399 |
// in old_gen. For case (2) above, we may in the future allow
|
|
400 |
// TLAB allocation directly in the old gen.
|
|
401 |
if (result != NULL) {
|
|
402 |
return result;
|
|
403 |
}
|
|
404 |
if (!is_tlab &&
|
|
405 |
size >= (young_gen()->eden_space()->capacity_in_words() / 2)) {
|
|
406 |
result = old_gen()->allocate(size, is_tlab);
|
|
407 |
if (result != NULL) {
|
|
408 |
return result;
|
|
409 |
}
|
|
410 |
}
|
|
411 |
if (GC_locker::is_active_and_needs_gc()) {
|
|
412 |
// GC is locked out. If this is a TLAB allocation,
|
|
413 |
// return NULL; the requestor will retry allocation
|
|
414 |
// of an idividual object at a time.
|
|
415 |
if (is_tlab) {
|
|
416 |
return NULL;
|
|
417 |
}
|
|
418 |
|
|
419 |
// If this thread is not in a jni critical section, we stall
|
|
420 |
// the requestor until the critical section has cleared and
|
|
421 |
// GC allowed. When the critical section clears, a GC is
|
|
422 |
// initiated by the last thread exiting the critical section; so
|
|
423 |
// we retry the allocation sequence from the beginning of the loop,
|
|
424 |
// rather than causing more, now probably unnecessary, GC attempts.
|
|
425 |
JavaThread* jthr = JavaThread::current();
|
|
426 |
if (!jthr->in_critical()) {
|
|
427 |
MutexUnlocker mul(Heap_lock);
|
|
428 |
GC_locker::stall_until_clear();
|
|
429 |
continue;
|
|
430 |
} else {
|
|
431 |
if (CheckJNICalls) {
|
|
432 |
fatal("Possible deadlock due to allocating while"
|
|
433 |
" in jni critical section");
|
|
434 |
}
|
|
435 |
return NULL;
|
|
436 |
}
|
|
437 |
}
|
|
438 |
}
|
|
439 |
|
|
440 |
if (result == NULL) {
|
|
441 |
|
|
442 |
// Exit the loop if if the gc time limit has been exceeded.
|
|
443 |
// The allocation must have failed above (result must be NULL),
|
|
444 |
// and the most recent collection must have exceeded the
|
|
445 |
// gc time limit. Exit the loop so that an out-of-memory
|
|
446 |
// will be thrown (returning a NULL will do that), but
|
|
447 |
// clear gc_time_limit_exceeded so that the next collection
|
|
448 |
// will succeeded if the applications decides to handle the
|
|
449 |
// out-of-memory and tries to go on.
|
|
450 |
*gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
|
|
451 |
if (size_policy()->gc_time_limit_exceeded()) {
|
|
452 |
size_policy()->set_gc_time_limit_exceeded(false);
|
|
453 |
if (PrintGCDetails && Verbose) {
|
|
454 |
gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
|
|
455 |
"return NULL because gc_time_limit_exceeded is set");
|
|
456 |
}
|
|
457 |
return NULL;
|
|
458 |
}
|
|
459 |
|
|
460 |
// Generate a VM operation
|
|
461 |
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
|
|
462 |
VMThread::execute(&op);
|
|
463 |
|
|
464 |
// Did the VM operation execute? If so, return the result directly.
|
|
465 |
// This prevents us from looping until time out on requests that can
|
|
466 |
// not be satisfied.
|
|
467 |
if (op.prologue_succeeded()) {
|
|
468 |
assert(Universe::heap()->is_in_or_null(op.result()),
|
|
469 |
"result not in heap");
|
|
470 |
|
|
471 |
// If GC was locked out during VM operation then retry allocation
|
|
472 |
// and/or stall as necessary.
|
|
473 |
if (op.gc_locked()) {
|
|
474 |
assert(op.result() == NULL, "must be NULL if gc_locked() is true");
|
|
475 |
continue; // retry and/or stall as necessary
|
|
476 |
}
|
|
477 |
// If a NULL result is being returned, an out-of-memory
|
|
478 |
// will be thrown now. Clear the gc_time_limit_exceeded
|
|
479 |
// flag to avoid the following situation.
|
|
480 |
// gc_time_limit_exceeded is set during a collection
|
|
481 |
// the collection fails to return enough space and an OOM is thrown
|
|
482 |
// the next GC is skipped because the gc_time_limit_exceeded
|
|
483 |
// flag is set and another OOM is thrown
|
|
484 |
if (op.result() == NULL) {
|
|
485 |
size_policy()->set_gc_time_limit_exceeded(false);
|
|
486 |
}
|
|
487 |
return op.result();
|
|
488 |
}
|
|
489 |
}
|
|
490 |
|
|
491 |
// The policy object will prevent us from looping forever. If the
|
|
492 |
// time spent in gc crosses a threshold, we will bail out.
|
|
493 |
loop_count++;
|
|
494 |
if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
|
|
495 |
(loop_count % QueuedAllocationWarningCount == 0)) {
|
|
496 |
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
|
|
497 |
" size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
|
|
498 |
}
|
|
499 |
}
|
|
500 |
|
|
501 |
return result;
|
|
502 |
}
|
|
503 |
|
|
504 |
// Failed allocation policy. Must be called from the VM thread, and
|
|
505 |
// only at a safepoint! Note that this method has policy for allocation
|
|
506 |
// flow, and NOT collection policy. So we do not check for gc collection
|
|
507 |
// time over limit here, that is the responsibility of the heap specific
|
|
508 |
// collection methods. This method decides where to attempt allocations,
|
|
509 |
// and when to attempt collections, but no collection specific policy.
|
|
510 |
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
|
|
511 |
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
512 |
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
|
513 |
assert(!Universe::heap()->is_gc_active(), "not reentrant");
|
|
514 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
515 |
|
|
516 |
size_t mark_sweep_invocation_count = total_invocations();
|
|
517 |
|
|
518 |
// We assume (and assert!) that an allocation at this point will fail
|
|
519 |
// unless we collect.
|
|
520 |
|
|
521 |
// First level allocation failure, scavenge and allocate in young gen.
|
|
522 |
GCCauseSetter gccs(this, GCCause::_allocation_failure);
|
|
523 |
PSScavenge::invoke();
|
|
524 |
HeapWord* result = young_gen()->allocate(size, is_tlab);
|
|
525 |
|
|
526 |
// Second level allocation failure.
|
|
527 |
// Mark sweep and allocate in young generation.
|
|
528 |
if (result == NULL) {
|
|
529 |
// There is some chance the scavenge method decided to invoke mark_sweep.
|
|
530 |
// Don't mark sweep twice if so.
|
|
531 |
if (mark_sweep_invocation_count == total_invocations()) {
|
|
532 |
invoke_full_gc(false);
|
|
533 |
result = young_gen()->allocate(size, is_tlab);
|
|
534 |
}
|
|
535 |
}
|
|
536 |
|
|
537 |
// Third level allocation failure.
|
|
538 |
// After mark sweep and young generation allocation failure,
|
|
539 |
// allocate in old generation.
|
|
540 |
if (result == NULL && !is_tlab) {
|
|
541 |
result = old_gen()->allocate(size, is_tlab);
|
|
542 |
}
|
|
543 |
|
|
544 |
// Fourth level allocation failure. We're running out of memory.
|
|
545 |
// More complete mark sweep and allocate in young generation.
|
|
546 |
if (result == NULL) {
|
|
547 |
invoke_full_gc(true);
|
|
548 |
result = young_gen()->allocate(size, is_tlab);
|
|
549 |
}
|
|
550 |
|
|
551 |
// Fifth level allocation failure.
|
|
552 |
// After more complete mark sweep, allocate in old generation.
|
|
553 |
if (result == NULL && !is_tlab) {
|
|
554 |
result = old_gen()->allocate(size, is_tlab);
|
|
555 |
}
|
|
556 |
|
|
557 |
return result;
|
|
558 |
}
|
|
559 |
|
|
560 |
//
|
|
561 |
// This is the policy loop for allocating in the permanent generation.
|
|
562 |
// If the initial allocation fails, we create a vm operation which will
|
|
563 |
// cause a collection.
|
|
564 |
HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
|
|
565 |
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
|
|
566 |
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
|
|
567 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
568 |
|
|
569 |
HeapWord* result;
|
|
570 |
|
|
571 |
uint loop_count = 0;
|
|
572 |
uint gc_count = 0;
|
|
573 |
uint full_gc_count = 0;
|
|
574 |
|
|
575 |
do {
|
|
576 |
// We don't want to have multiple collections for a single filled generation.
|
|
577 |
// To prevent this, each thread tracks the total_collections() value, and if
|
|
578 |
// the count has changed, does not do a new collection.
|
|
579 |
//
|
|
580 |
// The collection count must be read only while holding the heap lock. VM
|
|
581 |
// operations also hold the heap lock during collections. There is a lock
|
|
582 |
// contention case where thread A blocks waiting on the Heap_lock, while
|
|
583 |
// thread B is holding it doing a collection. When thread A gets the lock,
|
|
584 |
// the collection count has already changed. To prevent duplicate collections,
|
|
585 |
// The policy MUST attempt allocations during the same period it reads the
|
|
586 |
// total_collections() value!
|
|
587 |
{
|
|
588 |
MutexLocker ml(Heap_lock);
|
|
589 |
gc_count = Universe::heap()->total_collections();
|
|
590 |
full_gc_count = Universe::heap()->total_full_collections();
|
|
591 |
|
|
592 |
result = perm_gen()->allocate_permanent(size);
|
|
593 |
}
|
|
594 |
|
|
595 |
if (result == NULL) {
|
|
596 |
|
|
597 |
// Exit the loop if the gc time limit has been exceeded.
|
|
598 |
// The allocation must have failed above (result must be NULL),
|
|
599 |
// and the most recent collection must have exceeded the
|
|
600 |
// gc time limit. Exit the loop so that an out-of-memory
|
|
601 |
// will be thrown (returning a NULL will do that), but
|
|
602 |
// clear gc_time_limit_exceeded so that the next collection
|
|
603 |
// will succeeded if the applications decides to handle the
|
|
604 |
// out-of-memory and tries to go on.
|
|
605 |
if (size_policy()->gc_time_limit_exceeded()) {
|
|
606 |
size_policy()->set_gc_time_limit_exceeded(false);
|
|
607 |
if (PrintGCDetails && Verbose) {
|
|
608 |
gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
|
|
609 |
"return NULL because gc_time_limit_exceeded is set");
|
|
610 |
}
|
|
611 |
assert(result == NULL, "Allocation did not fail");
|
|
612 |
return NULL;
|
|
613 |
}
|
|
614 |
|
|
615 |
// Generate a VM operation
|
|
616 |
VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
|
|
617 |
VMThread::execute(&op);
|
|
618 |
|
|
619 |
// Did the VM operation execute? If so, return the result directly.
|
|
620 |
// This prevents us from looping until time out on requests that can
|
|
621 |
// not be satisfied.
|
|
622 |
if (op.prologue_succeeded()) {
|
|
623 |
assert(Universe::heap()->is_in_permanent_or_null(op.result()),
|
|
624 |
"result not in heap");
|
|
625 |
// If a NULL results is being returned, an out-of-memory
|
|
626 |
// will be thrown now. Clear the gc_time_limit_exceeded
|
|
627 |
// flag to avoid the following situation.
|
|
628 |
// gc_time_limit_exceeded is set during a collection
|
|
629 |
// the collection fails to return enough space and an OOM is thrown
|
|
630 |
// the next GC is skipped because the gc_time_limit_exceeded
|
|
631 |
// flag is set and another OOM is thrown
|
|
632 |
if (op.result() == NULL) {
|
|
633 |
size_policy()->set_gc_time_limit_exceeded(false);
|
|
634 |
}
|
|
635 |
return op.result();
|
|
636 |
}
|
|
637 |
}
|
|
638 |
|
|
639 |
// The policy object will prevent us from looping forever. If the
|
|
640 |
// time spent in gc crosses a threshold, we will bail out.
|
|
641 |
loop_count++;
|
|
642 |
if ((QueuedAllocationWarningCount > 0) &&
|
|
643 |
(loop_count % QueuedAllocationWarningCount == 0)) {
|
|
644 |
warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
|
|
645 |
" size=%d", loop_count, size);
|
|
646 |
}
|
|
647 |
} while (result == NULL);
|
|
648 |
|
|
649 |
return result;
|
|
650 |
}
|
|
651 |
|
|
652 |
//
|
|
653 |
// This is the policy code for permanent allocations which have failed
|
|
654 |
// and require a collection. Note that just as in failed_mem_allocate,
|
|
655 |
// we do not set collection policy, only where & when to allocate and
|
|
656 |
// collect.
|
|
657 |
HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
|
|
658 |
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
659 |
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
|
660 |
assert(!Universe::heap()->is_gc_active(), "not reentrant");
|
|
661 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
662 |
assert(size > perm_gen()->free_in_words(), "Allocation should fail");
|
|
663 |
|
|
664 |
// We assume (and assert!) that an allocation at this point will fail
|
|
665 |
// unless we collect.
|
|
666 |
|
|
667 |
// First level allocation failure. Mark-sweep and allocate in perm gen.
|
|
668 |
GCCauseSetter gccs(this, GCCause::_allocation_failure);
|
|
669 |
invoke_full_gc(false);
|
|
670 |
HeapWord* result = perm_gen()->allocate_permanent(size);
|
|
671 |
|
|
672 |
// Second level allocation failure. We're running out of memory.
|
|
673 |
if (result == NULL) {
|
|
674 |
invoke_full_gc(true);
|
|
675 |
result = perm_gen()->allocate_permanent(size);
|
|
676 |
}
|
|
677 |
|
|
678 |
return result;
|
|
679 |
}
|
|
680 |
|
|
681 |
void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
|
|
682 |
CollectedHeap::ensure_parsability(retire_tlabs);
|
|
683 |
young_gen()->eden_space()->ensure_parsability();
|
|
684 |
}
|
|
685 |
|
|
686 |
size_t ParallelScavengeHeap::unsafe_max_alloc() {
|
|
687 |
return young_gen()->eden_space()->free_in_bytes();
|
|
688 |
}
|
|
689 |
|
|
690 |
size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
|
|
691 |
return young_gen()->eden_space()->tlab_capacity(thr);
|
|
692 |
}
|
|
693 |
|
|
694 |
size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
|
695 |
return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
|
|
696 |
}
|
|
697 |
|
|
698 |
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
|
|
699 |
return young_gen()->allocate(size, true);
|
|
700 |
}
|
|
701 |
|
|
702 |
void ParallelScavengeHeap::fill_all_tlabs(bool retire) {
|
|
703 |
CollectedHeap::fill_all_tlabs(retire);
|
|
704 |
}
|
|
705 |
|
|
706 |
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
|
|
707 |
CollectedHeap::accumulate_statistics_all_tlabs();
|
|
708 |
}
|
|
709 |
|
|
710 |
void ParallelScavengeHeap::resize_all_tlabs() {
|
|
711 |
CollectedHeap::resize_all_tlabs();
|
|
712 |
}
|
|
713 |
|
|
714 |
// This method is used by System.gc() and JVMTI.
|
|
715 |
void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
|
716 |
assert(!Heap_lock->owned_by_self(),
|
|
717 |
"this thread should not own the Heap_lock");
|
|
718 |
|
|
719 |
unsigned int gc_count = 0;
|
|
720 |
unsigned int full_gc_count = 0;
|
|
721 |
{
|
|
722 |
MutexLocker ml(Heap_lock);
|
|
723 |
// This value is guarded by the Heap_lock
|
|
724 |
gc_count = Universe::heap()->total_collections();
|
|
725 |
full_gc_count = Universe::heap()->total_full_collections();
|
|
726 |
}
|
|
727 |
|
|
728 |
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
|
|
729 |
VMThread::execute(&op);
|
|
730 |
}
|
|
731 |
|
|
732 |
// This interface assumes that it's being called by the
|
|
733 |
// vm thread. It collects the heap assuming that the
|
|
734 |
// heap lock is already held and that we are executing in
|
|
735 |
// the context of the vm thread.
|
|
736 |
void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
|
737 |
assert(Thread::current()->is_VM_thread(), "Precondition#1");
|
|
738 |
assert(Heap_lock->is_locked(), "Precondition#2");
|
|
739 |
GCCauseSetter gcs(this, cause);
|
|
740 |
switch (cause) {
|
|
741 |
case GCCause::_heap_inspection:
|
|
742 |
case GCCause::_heap_dump: {
|
|
743 |
HandleMark hm;
|
|
744 |
invoke_full_gc(false);
|
|
745 |
break;
|
|
746 |
}
|
|
747 |
default: // XXX FIX ME
|
|
748 |
ShouldNotReachHere();
|
|
749 |
}
|
|
750 |
}
|
|
751 |
|
|
752 |
|
|
753 |
void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
|
|
754 |
Unimplemented();
|
|
755 |
}
|
|
756 |
|
|
757 |
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
|
|
758 |
young_gen()->object_iterate(cl);
|
|
759 |
old_gen()->object_iterate(cl);
|
|
760 |
perm_gen()->object_iterate(cl);
|
|
761 |
}
|
|
762 |
|
|
763 |
void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
|
|
764 |
Unimplemented();
|
|
765 |
}
|
|
766 |
|
|
767 |
void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
|
|
768 |
perm_gen()->object_iterate(cl);
|
|
769 |
}
|
|
770 |
|
|
771 |
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
|
|
772 |
if (young_gen()->is_in_reserved(addr)) {
|
|
773 |
assert(young_gen()->is_in(addr),
|
|
774 |
"addr should be in allocated part of young gen");
|
|
775 |
Unimplemented();
|
|
776 |
} else if (old_gen()->is_in_reserved(addr)) {
|
|
777 |
assert(old_gen()->is_in(addr),
|
|
778 |
"addr should be in allocated part of old gen");
|
|
779 |
return old_gen()->start_array()->object_start((HeapWord*)addr);
|
|
780 |
} else if (perm_gen()->is_in_reserved(addr)) {
|
|
781 |
assert(perm_gen()->is_in(addr),
|
|
782 |
"addr should be in allocated part of perm gen");
|
|
783 |
return perm_gen()->start_array()->object_start((HeapWord*)addr);
|
|
784 |
}
|
|
785 |
return 0;
|
|
786 |
}
|
|
787 |
|
|
788 |
size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
|
|
789 |
return oop(addr)->size();
|
|
790 |
}
|
|
791 |
|
|
792 |
bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
|
|
793 |
return block_start(addr) == addr;
|
|
794 |
}
|
|
795 |
|
|
796 |
jlong ParallelScavengeHeap::millis_since_last_gc() {
|
|
797 |
return UseParallelOldGC ?
|
|
798 |
PSParallelCompact::millis_since_last_gc() :
|
|
799 |
PSMarkSweep::millis_since_last_gc();
|
|
800 |
}
|
|
801 |
|
|
802 |
void ParallelScavengeHeap::prepare_for_verify() {
|
|
803 |
ensure_parsability(false); // no need to retire TLABs for verification
|
|
804 |
}
|
|
805 |
|
|
806 |
void ParallelScavengeHeap::print() const { print_on(tty); }
|
|
807 |
|
|
808 |
void ParallelScavengeHeap::print_on(outputStream* st) const {
|
|
809 |
young_gen()->print_on(st);
|
|
810 |
old_gen()->print_on(st);
|
|
811 |
perm_gen()->print_on(st);
|
|
812 |
}
|
|
813 |
|
|
814 |
void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
|
|
815 |
PSScavenge::gc_task_manager()->threads_do(tc);
|
|
816 |
}
|
|
817 |
|
|
818 |
void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
|
|
819 |
PSScavenge::gc_task_manager()->print_threads_on(st);
|
|
820 |
}
|
|
821 |
|
|
822 |
void ParallelScavengeHeap::print_tracing_info() const {
|
|
823 |
if (TraceGen0Time) {
|
|
824 |
double time = PSScavenge::accumulated_time()->seconds();
|
|
825 |
tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
|
|
826 |
}
|
|
827 |
if (TraceGen1Time) {
|
|
828 |
double time = PSMarkSweep::accumulated_time()->seconds();
|
|
829 |
tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
|
|
830 |
}
|
|
831 |
}
|
|
832 |
|
|
833 |
|
|
834 |
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) {
|
|
835 |
// Why do we need the total_collections()-filter below?
|
|
836 |
if (total_collections() > 0) {
|
|
837 |
if (!silent) {
|
|
838 |
gclog_or_tty->print("permanent ");
|
|
839 |
}
|
|
840 |
perm_gen()->verify(allow_dirty);
|
|
841 |
|
|
842 |
if (!silent) {
|
|
843 |
gclog_or_tty->print("tenured ");
|
|
844 |
}
|
|
845 |
old_gen()->verify(allow_dirty);
|
|
846 |
|
|
847 |
if (!silent) {
|
|
848 |
gclog_or_tty->print("eden ");
|
|
849 |
}
|
|
850 |
young_gen()->verify(allow_dirty);
|
|
851 |
}
|
|
852 |
if (!silent) {
|
|
853 |
gclog_or_tty->print("ref_proc ");
|
|
854 |
}
|
|
855 |
ReferenceProcessor::verify();
|
|
856 |
}
|
|
857 |
|
|
858 |
void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
|
|
859 |
if (PrintGCDetails && Verbose) {
|
|
860 |
gclog_or_tty->print(" " SIZE_FORMAT
|
|
861 |
"->" SIZE_FORMAT
|
|
862 |
"(" SIZE_FORMAT ")",
|
|
863 |
prev_used, used(), capacity());
|
|
864 |
} else {
|
|
865 |
gclog_or_tty->print(" " SIZE_FORMAT "K"
|
|
866 |
"->" SIZE_FORMAT "K"
|
|
867 |
"(" SIZE_FORMAT "K)",
|
|
868 |
prev_used / K, used() / K, capacity() / K);
|
|
869 |
}
|
|
870 |
}
|
|
871 |
|
|
872 |
ParallelScavengeHeap* ParallelScavengeHeap::heap() {
|
|
873 |
assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
|
|
874 |
assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
|
|
875 |
return _psh;
|
|
876 |
}
|
|
877 |
|
|
878 |
// Before delegating the resize to the young generation,
|
|
879 |
// the reserved space for the young and old generations
|
|
880 |
// may be changed to accomodate the desired resize.
|
|
881 |
void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
|
|
882 |
size_t survivor_size) {
|
|
883 |
if (UseAdaptiveGCBoundary) {
|
|
884 |
if (size_policy()->bytes_absorbed_from_eden() != 0) {
|
|
885 |
size_policy()->reset_bytes_absorbed_from_eden();
|
|
886 |
return; // The generation changed size already.
|
|
887 |
}
|
|
888 |
gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
|
|
889 |
}
|
|
890 |
|
|
891 |
// Delegate the resize to the generation.
|
|
892 |
_young_gen->resize(eden_size, survivor_size);
|
|
893 |
}
|
|
894 |
|
|
895 |
// Before delegating the resize to the old generation,
|
|
896 |
// the reserved space for the young and old generations
|
|
897 |
// may be changed to accomodate the desired resize.
|
|
898 |
void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
|
|
899 |
if (UseAdaptiveGCBoundary) {
|
|
900 |
if (size_policy()->bytes_absorbed_from_eden() != 0) {
|
|
901 |
size_policy()->reset_bytes_absorbed_from_eden();
|
|
902 |
return; // The generation changed size already.
|
|
903 |
}
|
|
904 |
gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
|
|
905 |
}
|
|
906 |
|
|
907 |
// Delegate the resize to the generation.
|
|
908 |
_old_gen->resize(desired_free_space);
|
|
909 |
}
|