author | kvn |
Thu, 12 Mar 2009 10:37:46 -0700 | |
changeset 2254 | f13dda645a4b |
parent 1406 | e5e2b519fc11 |
child 2332 | 5c7b6f4ce0a1 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
670 | 2 |
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
# include "incls/_precompiled.incl" |
|
26 |
# include "incls/_parallelScavengeHeap.cpp.incl" |
|
27 |
||
28 |
PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; |
|
29 |
PSOldGen* ParallelScavengeHeap::_old_gen = NULL; |
|
30 |
PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; |
|
31 |
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; |
|
32 |
PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; |
|
33 |
ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; |
|
34 |
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; |
|
35 |
||
36 |
static void trace_gen_sizes(const char* const str, |
|
37 |
size_t pg_min, size_t pg_max, |
|
38 |
size_t og_min, size_t og_max, |
|
39 |
size_t yg_min, size_t yg_max) |
|
40 |
{ |
|
41 |
if (TracePageSizes) { |
|
42 |
tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " |
|
43 |
SIZE_FORMAT "," SIZE_FORMAT " " |
|
44 |
SIZE_FORMAT "," SIZE_FORMAT " " |
|
45 |
SIZE_FORMAT, |
|
46 |
str, pg_min / K, pg_max / K, |
|
47 |
og_min / K, og_max / K, |
|
48 |
yg_min / K, yg_max / K, |
|
49 |
(pg_max + og_max + yg_max) / K); |
|
50 |
} |
|
51 |
} |
|
52 |
||
53 |
jint ParallelScavengeHeap::initialize() { |
|
54 |
// Cannot be initialized until after the flags are parsed |
|
55 |
GenerationSizer flag_parser; |
|
56 |
||
57 |
size_t yg_min_size = flag_parser.min_young_gen_size(); |
|
58 |
size_t yg_max_size = flag_parser.max_young_gen_size(); |
|
59 |
size_t og_min_size = flag_parser.min_old_gen_size(); |
|
60 |
size_t og_max_size = flag_parser.max_old_gen_size(); |
|
61 |
// Why isn't there a min_perm_gen_size()? |
|
62 |
size_t pg_min_size = flag_parser.perm_gen_size(); |
|
63 |
size_t pg_max_size = flag_parser.max_perm_gen_size(); |
|
64 |
||
65 |
trace_gen_sizes("ps heap raw", |
|
66 |
pg_min_size, pg_max_size, |
|
67 |
og_min_size, og_max_size, |
|
68 |
yg_min_size, yg_max_size); |
|
69 |
||
70 |
// The ReservedSpace ctor used below requires that the page size for the perm |
|
71 |
// gen is <= the page size for the rest of the heap (young + old gens). |
|
72 |
const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, |
|
73 |
yg_max_size + og_max_size, |
|
74 |
8); |
|
75 |
const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, |
|
76 |
pg_max_size, 16), |
|
77 |
og_page_sz); |
|
78 |
||
79 |
const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); |
|
80 |
const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); |
|
81 |
const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); |
|
82 |
||
83 |
// Update sizes to reflect the selected page size(s). |
|
84 |
// |
|
85 |
// NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it |
|
86 |
// should check UseAdaptiveSizePolicy. Changes from generationSizer could |
|
87 |
// move to the common code. |
|
88 |
yg_min_size = align_size_up(yg_min_size, yg_align); |
|
89 |
yg_max_size = align_size_up(yg_max_size, yg_align); |
|
90 |
size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align); |
|
91 |
yg_cur_size = MAX2(yg_cur_size, yg_min_size); |
|
92 |
||
93 |
og_min_size = align_size_up(og_min_size, og_align); |
|
94 |
og_max_size = align_size_up(og_max_size, og_align); |
|
95 |
size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align); |
|
96 |
og_cur_size = MAX2(og_cur_size, og_min_size); |
|
97 |
||
98 |
pg_min_size = align_size_up(pg_min_size, pg_align); |
|
99 |
pg_max_size = align_size_up(pg_max_size, pg_align); |
|
100 |
size_t pg_cur_size = pg_min_size; |
|
101 |
||
102 |
trace_gen_sizes("ps heap rnd", |
|
103 |
pg_min_size, pg_max_size, |
|
104 |
og_min_size, og_max_size, |
|
105 |
yg_min_size, yg_max_size); |
|
106 |
||
2254
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
107 |
const size_t total_reserved = pg_max_size + og_max_size + yg_max_size; |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
108 |
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
109 |
|
1 | 110 |
// The main part of the heap (old gen + young gen) can often use a larger page |
111 |
// size than is needed or wanted for the perm gen. Use the "compound |
|
112 |
// alignment" ReservedSpace ctor to avoid having to use the same page size for |
|
113 |
// all gens. |
|
2254
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
114 |
|
823
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
670
diff
changeset
|
115 |
ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, |
2254
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
116 |
og_align, addr); |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
117 |
|
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
118 |
if (UseCompressedOops) { |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
119 |
if (addr != NULL && !heap_rs.is_reserved()) { |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
120 |
// Failed to reserve at specified address - the requested memory |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
121 |
// region is taken already, for example, by 'java' launcher. |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
122 |
// Try again to reserver heap higher. |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
123 |
addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
124 |
ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
125 |
og_align, addr); |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
126 |
if (addr != NULL && !heap_rs0.is_reserved()) { |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
127 |
// Failed to reserve at specified address again - give up. |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
128 |
addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
129 |
assert(addr == NULL, ""); |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
130 |
ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
131 |
og_align, addr); |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
132 |
heap_rs = heap_rs1; |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
133 |
} else { |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
134 |
heap_rs = heap_rs0; |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
135 |
} |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
136 |
} |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
137 |
} |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
1406
diff
changeset
|
138 |
|
1 | 139 |
os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, |
140 |
heap_rs.base(), pg_max_size); |
|
141 |
os::trace_page_sizes("ps main", og_min_size + yg_min_size, |
|
142 |
og_max_size + yg_max_size, og_page_sz, |
|
143 |
heap_rs.base() + pg_max_size, |
|
144 |
heap_rs.size() - pg_max_size); |
|
145 |
if (!heap_rs.is_reserved()) { |
|
146 |
vm_shutdown_during_initialization( |
|
147 |
"Could not reserve enough space for object heap"); |
|
148 |
return JNI_ENOMEM; |
|
149 |
} |
|
150 |
||
151 |
_reserved = MemRegion((HeapWord*)heap_rs.base(), |
|
152 |
(HeapWord*)(heap_rs.base() + heap_rs.size())); |
|
153 |
||
154 |
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); |
|
155 |
_barrier_set = barrier_set; |
|
156 |
oopDesc::set_bs(_barrier_set); |
|
157 |
if (_barrier_set == NULL) { |
|
158 |
vm_shutdown_during_initialization( |
|
159 |
"Could not reserve enough space for barrier set"); |
|
160 |
return JNI_ENOMEM; |
|
161 |
} |
|
162 |
||
163 |
// Initial young gen size is 4 Mb |
|
164 |
// |
|
165 |
// XXX - what about flag_parser.young_gen_size()? |
|
166 |
const size_t init_young_size = align_size_up(4 * M, yg_align); |
|
167 |
yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); |
|
168 |
||
169 |
// Split the reserved space into perm gen and the main heap (everything else). |
|
170 |
// The main heap uses a different alignment. |
|
171 |
ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); |
|
172 |
ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); |
|
173 |
||
174 |
// Make up the generations |
|
175 |
// Calculate the maximum size that a generation can grow. This |
|
176 |
// includes growth into the other generation. Note that the |
|
177 |
// parameter _max_gen_size is kept as the maximum |
|
178 |
// size of the generation as the boundaries currently stand. |
|
179 |
// _max_gen_size is still used as that value. |
|
180 |
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; |
|
181 |
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; |
|
182 |
||
183 |
_gens = new AdjoiningGenerations(main_rs, |
|
184 |
og_cur_size, |
|
185 |
og_min_size, |
|
186 |
og_max_size, |
|
187 |
yg_cur_size, |
|
188 |
yg_min_size, |
|
189 |
yg_max_size, |
|
190 |
yg_align); |
|
191 |
||
192 |
_old_gen = _gens->old_gen(); |
|
193 |
_young_gen = _gens->young_gen(); |
|
194 |
||
195 |
const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); |
|
196 |
const size_t old_capacity = _old_gen->capacity_in_bytes(); |
|
197 |
const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); |
|
198 |
_size_policy = |
|
199 |
new PSAdaptiveSizePolicy(eden_capacity, |
|
200 |
initial_promo_size, |
|
201 |
young_gen()->to_space()->capacity_in_bytes(), |
|
186
32e6c95f8d9b
6557851: CMS: ergonomics defaults are not set with FLAG_SET_ERGO
jmasa
parents:
1
diff
changeset
|
202 |
intra_heap_alignment(), |
1 | 203 |
max_gc_pause_sec, |
204 |
max_gc_minor_pause_sec, |
|
205 |
GCTimeRatio |
|
206 |
); |
|
207 |
||
208 |
_perm_gen = new PSPermGen(perm_rs, |
|
209 |
pg_align, |
|
210 |
pg_cur_size, |
|
211 |
pg_cur_size, |
|
212 |
pg_max_size, |
|
213 |
"perm", 2); |
|
214 |
||
215 |
assert(!UseAdaptiveGCBoundary || |
|
216 |
(old_gen()->virtual_space()->high_boundary() == |
|
217 |
young_gen()->virtual_space()->low_boundary()), |
|
218 |
"Boundaries must meet"); |
|
219 |
// initialize the policy counters - 2 collectors, 3 generations |
|
220 |
_gc_policy_counters = |
|
221 |
new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); |
|
222 |
_psh = this; |
|
223 |
||
224 |
// Set up the GCTaskManager |
|
225 |
_gc_task_manager = GCTaskManager::create(ParallelGCThreads); |
|
226 |
||
227 |
if (UseParallelOldGC && !PSParallelCompact::initialize()) { |
|
228 |
return JNI_ENOMEM; |
|
229 |
} |
|
230 |
||
231 |
return JNI_OK; |
|
232 |
} |
|
233 |
||
234 |
void ParallelScavengeHeap::post_initialize() { |
|
235 |
// Need to init the tenuring threshold |
|
236 |
PSScavenge::initialize(); |
|
237 |
if (UseParallelOldGC) { |
|
238 |
PSParallelCompact::post_initialize(); |
|
239 |
} else { |
|
240 |
PSMarkSweep::initialize(); |
|
241 |
} |
|
242 |
PSPromotionManager::initialize(); |
|
243 |
} |
|
244 |
||
245 |
void ParallelScavengeHeap::update_counters() { |
|
246 |
young_gen()->update_counters(); |
|
247 |
old_gen()->update_counters(); |
|
248 |
perm_gen()->update_counters(); |
|
249 |
} |
|
250 |
||
251 |
size_t ParallelScavengeHeap::capacity() const { |
|
252 |
size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); |
|
253 |
return value; |
|
254 |
} |
|
255 |
||
256 |
size_t ParallelScavengeHeap::used() const { |
|
257 |
size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); |
|
258 |
return value; |
|
259 |
} |
|
260 |
||
261 |
bool ParallelScavengeHeap::is_maximal_no_gc() const { |
|
262 |
return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); |
|
263 |
} |
|
264 |
||
265 |
||
266 |
size_t ParallelScavengeHeap::permanent_capacity() const { |
|
267 |
return perm_gen()->capacity_in_bytes(); |
|
268 |
} |
|
269 |
||
270 |
size_t ParallelScavengeHeap::permanent_used() const { |
|
271 |
return perm_gen()->used_in_bytes(); |
|
272 |
} |
|
273 |
||
274 |
size_t ParallelScavengeHeap::max_capacity() const { |
|
275 |
size_t estimated = reserved_region().byte_size(); |
|
276 |
estimated -= perm_gen()->reserved().byte_size(); |
|
277 |
if (UseAdaptiveSizePolicy) { |
|
278 |
estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); |
|
279 |
} else { |
|
280 |
estimated -= young_gen()->to_space()->capacity_in_bytes(); |
|
281 |
} |
|
282 |
return MAX2(estimated, capacity()); |
|
283 |
} |
|
284 |
||
285 |
bool ParallelScavengeHeap::is_in(const void* p) const { |
|
286 |
if (young_gen()->is_in(p)) { |
|
287 |
return true; |
|
288 |
} |
|
289 |
||
290 |
if (old_gen()->is_in(p)) { |
|
291 |
return true; |
|
292 |
} |
|
293 |
||
294 |
if (perm_gen()->is_in(p)) { |
|
295 |
return true; |
|
296 |
} |
|
297 |
||
298 |
return false; |
|
299 |
} |
|
300 |
||
301 |
bool ParallelScavengeHeap::is_in_reserved(const void* p) const { |
|
302 |
if (young_gen()->is_in_reserved(p)) { |
|
303 |
return true; |
|
304 |
} |
|
305 |
||
306 |
if (old_gen()->is_in_reserved(p)) { |
|
307 |
return true; |
|
308 |
} |
|
309 |
||
310 |
if (perm_gen()->is_in_reserved(p)) { |
|
311 |
return true; |
|
312 |
} |
|
313 |
||
314 |
return false; |
|
315 |
} |
|
316 |
||
317 |
// Static method |
|
318 |
bool ParallelScavengeHeap::is_in_young(oop* p) { |
|
319 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
320 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, |
|
321 |
"Must be ParallelScavengeHeap"); |
|
322 |
||
323 |
PSYoungGen* young_gen = heap->young_gen(); |
|
324 |
||
325 |
if (young_gen->is_in_reserved(p)) { |
|
326 |
return true; |
|
327 |
} |
|
328 |
||
329 |
return false; |
|
330 |
} |
|
331 |
||
332 |
// Static method |
|
333 |
bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) { |
|
334 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
335 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, |
|
336 |
"Must be ParallelScavengeHeap"); |
|
337 |
||
338 |
PSOldGen* old_gen = heap->old_gen(); |
|
339 |
PSPermGen* perm_gen = heap->perm_gen(); |
|
340 |
||
341 |
if (old_gen->is_in_reserved(p)) { |
|
342 |
return true; |
|
343 |
} |
|
344 |
||
345 |
if (perm_gen->is_in_reserved(p)) { |
|
346 |
return true; |
|
347 |
} |
|
348 |
||
349 |
return false; |
|
350 |
} |
|
351 |
||
352 |
// There are two levels of allocation policy here. |
|
353 |
// |
|
354 |
// When an allocation request fails, the requesting thread must invoke a VM |
|
355 |
// operation, transfer control to the VM thread, and await the results of a |
|
356 |
// garbage collection. That is quite expensive, and we should avoid doing it |
|
357 |
// multiple times if possible. |
|
358 |
// |
|
359 |
// To accomplish this, we have a basic allocation policy, and also a |
|
360 |
// failed allocation policy. |
|
361 |
// |
|
362 |
// The basic allocation policy controls how you allocate memory without |
|
363 |
// attempting garbage collection. It is okay to grab locks and |
|
364 |
// expand the heap, if that can be done without coming to a safepoint. |
|
365 |
// It is likely that the basic allocation policy will not be very |
|
366 |
// aggressive. |
|
367 |
// |
|
368 |
// The failed allocation policy is invoked from the VM thread after |
|
369 |
// the basic allocation policy is unable to satisfy a mem_allocate |
|
370 |
// request. This policy needs to cover the entire range of collection, |
|
371 |
// heap expansion, and out-of-memory conditions. It should make every |
|
372 |
// attempt to allocate the requested memory. |
|
373 |
||
374 |
// Basic allocation policy. Should never be called at a safepoint, or |
|
375 |
// from the VM thread. |
|
376 |
// |
|
377 |
// This method must handle cases where many mem_allocate requests fail |
|
378 |
// simultaneously. When that happens, only one VM operation will succeed, |
|
379 |
// and the rest will not be executed. For that reason, this method loops |
|
380 |
// during failed allocation attempts. If the java heap becomes exhausted, |
|
381 |
// we rely on the size_policy object to force a bail out. |
|
382 |
HeapWord* ParallelScavengeHeap::mem_allocate( |
|
383 |
size_t size, |
|
384 |
bool is_noref, |
|
385 |
bool is_tlab, |
|
386 |
bool* gc_overhead_limit_was_exceeded) { |
|
387 |
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); |
|
388 |
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); |
|
389 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
|
390 |
||
391 |
HeapWord* result = young_gen()->allocate(size, is_tlab); |
|
392 |
||
393 |
uint loop_count = 0; |
|
394 |
uint gc_count = 0; |
|
395 |
||
396 |
while (result == NULL) { |
|
397 |
// We don't want to have multiple collections for a single filled generation. |
|
398 |
// To prevent this, each thread tracks the total_collections() value, and if |
|
399 |
// the count has changed, does not do a new collection. |
|
400 |
// |
|
401 |
// The collection count must be read only while holding the heap lock. VM |
|
402 |
// operations also hold the heap lock during collections. There is a lock |
|
403 |
// contention case where thread A blocks waiting on the Heap_lock, while |
|
404 |
// thread B is holding it doing a collection. When thread A gets the lock, |
|
405 |
// the collection count has already changed. To prevent duplicate collections, |
|
406 |
// The policy MUST attempt allocations during the same period it reads the |
|
407 |
// total_collections() value! |
|
408 |
{ |
|
409 |
MutexLocker ml(Heap_lock); |
|
410 |
gc_count = Universe::heap()->total_collections(); |
|
411 |
||
412 |
result = young_gen()->allocate(size, is_tlab); |
|
413 |
||
414 |
// (1) If the requested object is too large to easily fit in the |
|
415 |
// young_gen, or |
|
416 |
// (2) If GC is locked out via GCLocker, young gen is full and |
|
417 |
// the need for a GC already signalled to GCLocker (done |
|
418 |
// at a safepoint), |
|
419 |
// ... then, rather than force a safepoint and (a potentially futile) |
|
420 |
// collection (attempt) for each allocation, try allocation directly |
|
421 |
// in old_gen. For case (2) above, we may in the future allow |
|
422 |
// TLAB allocation directly in the old gen. |
|
423 |
if (result != NULL) { |
|
424 |
return result; |
|
425 |
} |
|
426 |
if (!is_tlab && |
|
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
427 |
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { |
1 | 428 |
result = old_gen()->allocate(size, is_tlab); |
429 |
if (result != NULL) { |
|
430 |
return result; |
|
431 |
} |
|
432 |
} |
|
433 |
if (GC_locker::is_active_and_needs_gc()) { |
|
434 |
// GC is locked out. If this is a TLAB allocation, |
|
435 |
// return NULL; the requestor will retry allocation |
|
436 |
// of an idividual object at a time. |
|
437 |
if (is_tlab) { |
|
438 |
return NULL; |
|
439 |
} |
|
440 |
||
441 |
// If this thread is not in a jni critical section, we stall |
|
442 |
// the requestor until the critical section has cleared and |
|
443 |
// GC allowed. When the critical section clears, a GC is |
|
444 |
// initiated by the last thread exiting the critical section; so |
|
445 |
// we retry the allocation sequence from the beginning of the loop, |
|
446 |
// rather than causing more, now probably unnecessary, GC attempts. |
|
447 |
JavaThread* jthr = JavaThread::current(); |
|
448 |
if (!jthr->in_critical()) { |
|
449 |
MutexUnlocker mul(Heap_lock); |
|
450 |
GC_locker::stall_until_clear(); |
|
451 |
continue; |
|
452 |
} else { |
|
453 |
if (CheckJNICalls) { |
|
454 |
fatal("Possible deadlock due to allocating while" |
|
455 |
" in jni critical section"); |
|
456 |
} |
|
457 |
return NULL; |
|
458 |
} |
|
459 |
} |
|
460 |
} |
|
461 |
||
462 |
if (result == NULL) { |
|
463 |
||
464 |
// Exit the loop if if the gc time limit has been exceeded. |
|
465 |
// The allocation must have failed above (result must be NULL), |
|
466 |
// and the most recent collection must have exceeded the |
|
467 |
// gc time limit. Exit the loop so that an out-of-memory |
|
468 |
// will be thrown (returning a NULL will do that), but |
|
469 |
// clear gc_time_limit_exceeded so that the next collection |
|
470 |
// will succeeded if the applications decides to handle the |
|
471 |
// out-of-memory and tries to go on. |
|
472 |
*gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded(); |
|
473 |
if (size_policy()->gc_time_limit_exceeded()) { |
|
474 |
size_policy()->set_gc_time_limit_exceeded(false); |
|
475 |
if (PrintGCDetails && Verbose) { |
|
476 |
gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " |
|
477 |
"return NULL because gc_time_limit_exceeded is set"); |
|
478 |
} |
|
479 |
return NULL; |
|
480 |
} |
|
481 |
||
482 |
// Generate a VM operation |
|
483 |
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); |
|
484 |
VMThread::execute(&op); |
|
485 |
||
486 |
// Did the VM operation execute? If so, return the result directly. |
|
487 |
// This prevents us from looping until time out on requests that can |
|
488 |
// not be satisfied. |
|
489 |
if (op.prologue_succeeded()) { |
|
490 |
assert(Universe::heap()->is_in_or_null(op.result()), |
|
491 |
"result not in heap"); |
|
492 |
||
493 |
// If GC was locked out during VM operation then retry allocation |
|
494 |
// and/or stall as necessary. |
|
495 |
if (op.gc_locked()) { |
|
496 |
assert(op.result() == NULL, "must be NULL if gc_locked() is true"); |
|
497 |
continue; // retry and/or stall as necessary |
|
498 |
} |
|
499 |
// If a NULL result is being returned, an out-of-memory |
|
500 |
// will be thrown now. Clear the gc_time_limit_exceeded |
|
501 |
// flag to avoid the following situation. |
|
502 |
// gc_time_limit_exceeded is set during a collection |
|
503 |
// the collection fails to return enough space and an OOM is thrown |
|
504 |
// the next GC is skipped because the gc_time_limit_exceeded |
|
505 |
// flag is set and another OOM is thrown |
|
506 |
if (op.result() == NULL) { |
|
507 |
size_policy()->set_gc_time_limit_exceeded(false); |
|
508 |
} |
|
509 |
return op.result(); |
|
510 |
} |
|
511 |
} |
|
512 |
||
513 |
// The policy object will prevent us from looping forever. If the |
|
514 |
// time spent in gc crosses a threshold, we will bail out. |
|
515 |
loop_count++; |
|
516 |
if ((result == NULL) && (QueuedAllocationWarningCount > 0) && |
|
517 |
(loop_count % QueuedAllocationWarningCount == 0)) { |
|
518 |
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" |
|
519 |
" size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); |
|
520 |
} |
|
521 |
} |
|
522 |
||
523 |
return result; |
|
524 |
} |
|
525 |
||
526 |
// Failed allocation policy. Must be called from the VM thread, and |
|
527 |
// only at a safepoint! Note that this method has policy for allocation |
|
528 |
// flow, and NOT collection policy. So we do not check for gc collection |
|
529 |
// time over limit here, that is the responsibility of the heap specific |
|
530 |
// collection methods. This method decides where to attempt allocations, |
|
531 |
// and when to attempt collections, but no collection specific policy. |
|
532 |
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { |
|
533 |
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
|
534 |
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); |
|
535 |
assert(!Universe::heap()->is_gc_active(), "not reentrant"); |
|
536 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
|
537 |
||
538 |
size_t mark_sweep_invocation_count = total_invocations(); |
|
539 |
||
540 |
// We assume (and assert!) that an allocation at this point will fail |
|
541 |
// unless we collect. |
|
542 |
||
543 |
// First level allocation failure, scavenge and allocate in young gen. |
|
544 |
GCCauseSetter gccs(this, GCCause::_allocation_failure); |
|
545 |
PSScavenge::invoke(); |
|
546 |
HeapWord* result = young_gen()->allocate(size, is_tlab); |
|
547 |
||
548 |
// Second level allocation failure. |
|
549 |
// Mark sweep and allocate in young generation. |
|
550 |
if (result == NULL) { |
|
551 |
// There is some chance the scavenge method decided to invoke mark_sweep. |
|
552 |
// Don't mark sweep twice if so. |
|
553 |
if (mark_sweep_invocation_count == total_invocations()) { |
|
554 |
invoke_full_gc(false); |
|
555 |
result = young_gen()->allocate(size, is_tlab); |
|
556 |
} |
|
557 |
} |
|
558 |
||
559 |
// Third level allocation failure. |
|
560 |
// After mark sweep and young generation allocation failure, |
|
561 |
// allocate in old generation. |
|
562 |
if (result == NULL && !is_tlab) { |
|
563 |
result = old_gen()->allocate(size, is_tlab); |
|
564 |
} |
|
565 |
||
566 |
// Fourth level allocation failure. We're running out of memory. |
|
567 |
// More complete mark sweep and allocate in young generation. |
|
568 |
if (result == NULL) { |
|
569 |
invoke_full_gc(true); |
|
570 |
result = young_gen()->allocate(size, is_tlab); |
|
571 |
} |
|
572 |
||
573 |
// Fifth level allocation failure. |
|
574 |
// After more complete mark sweep, allocate in old generation. |
|
575 |
if (result == NULL && !is_tlab) { |
|
576 |
result = old_gen()->allocate(size, is_tlab); |
|
577 |
} |
|
578 |
||
579 |
return result; |
|
580 |
} |
|
581 |
||
582 |
// |
|
583 |
// This is the policy loop for allocating in the permanent generation. |
|
584 |
// If the initial allocation fails, we create a vm operation which will |
|
585 |
// cause a collection. |
|
586 |
HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { |
|
587 |
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); |
|
588 |
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); |
|
589 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
|
590 |
||
591 |
HeapWord* result; |
|
592 |
||
593 |
uint loop_count = 0; |
|
594 |
uint gc_count = 0; |
|
595 |
uint full_gc_count = 0; |
|
596 |
||
597 |
do { |
|
598 |
// We don't want to have multiple collections for a single filled generation. |
|
599 |
// To prevent this, each thread tracks the total_collections() value, and if |
|
600 |
// the count has changed, does not do a new collection. |
|
601 |
// |
|
602 |
// The collection count must be read only while holding the heap lock. VM |
|
603 |
// operations also hold the heap lock during collections. There is a lock |
|
604 |
// contention case where thread A blocks waiting on the Heap_lock, while |
|
605 |
// thread B is holding it doing a collection. When thread A gets the lock, |
|
606 |
// the collection count has already changed. To prevent duplicate collections, |
|
607 |
// The policy MUST attempt allocations during the same period it reads the |
|
608 |
// total_collections() value! |
|
609 |
{ |
|
610 |
MutexLocker ml(Heap_lock); |
|
611 |
gc_count = Universe::heap()->total_collections(); |
|
612 |
full_gc_count = Universe::heap()->total_full_collections(); |
|
613 |
||
614 |
result = perm_gen()->allocate_permanent(size); |
|
386
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
615 |
|
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
616 |
if (result != NULL) { |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
617 |
return result; |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
618 |
} |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
619 |
|
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
620 |
if (GC_locker::is_active_and_needs_gc()) { |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
621 |
// If this thread is not in a jni critical section, we stall |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
622 |
// the requestor until the critical section has cleared and |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
623 |
// GC allowed. When the critical section clears, a GC is |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
624 |
// initiated by the last thread exiting the critical section; so |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
625 |
// we retry the allocation sequence from the beginning of the loop, |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
626 |
// rather than causing more, now probably unnecessary, GC attempts. |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
627 |
JavaThread* jthr = JavaThread::current(); |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
628 |
if (!jthr->in_critical()) { |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
629 |
MutexUnlocker mul(Heap_lock); |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
630 |
GC_locker::stall_until_clear(); |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
631 |
continue; |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
632 |
} else { |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
633 |
if (CheckJNICalls) { |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
634 |
fatal("Possible deadlock due to allocating while" |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
635 |
" in jni critical section"); |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
636 |
} |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
637 |
return NULL; |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
638 |
} |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
639 |
} |
1 | 640 |
} |
641 |
||
642 |
if (result == NULL) { |
|
643 |
||
644 |
// Exit the loop if the gc time limit has been exceeded. |
|
645 |
// The allocation must have failed above (result must be NULL), |
|
646 |
// and the most recent collection must have exceeded the |
|
647 |
// gc time limit. Exit the loop so that an out-of-memory |
|
648 |
// will be thrown (returning a NULL will do that), but |
|
649 |
// clear gc_time_limit_exceeded so that the next collection |
|
650 |
// will succeeded if the applications decides to handle the |
|
651 |
// out-of-memory and tries to go on. |
|
652 |
if (size_policy()->gc_time_limit_exceeded()) { |
|
653 |
size_policy()->set_gc_time_limit_exceeded(false); |
|
654 |
if (PrintGCDetails && Verbose) { |
|
655 |
gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: " |
|
656 |
"return NULL because gc_time_limit_exceeded is set"); |
|
657 |
} |
|
658 |
assert(result == NULL, "Allocation did not fail"); |
|
659 |
return NULL; |
|
660 |
} |
|
661 |
||
662 |
// Generate a VM operation |
|
663 |
VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); |
|
664 |
VMThread::execute(&op); |
|
665 |
||
666 |
// Did the VM operation execute? If so, return the result directly. |
|
667 |
// This prevents us from looping until time out on requests that can |
|
668 |
// not be satisfied. |
|
669 |
if (op.prologue_succeeded()) { |
|
670 |
assert(Universe::heap()->is_in_permanent_or_null(op.result()), |
|
671 |
"result not in heap"); |
|
386
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
672 |
// If GC was locked out during VM operation then retry allocation |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
673 |
// and/or stall as necessary. |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
674 |
if (op.gc_locked()) { |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
675 |
assert(op.result() == NULL, "must be NULL if gc_locked() is true"); |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
676 |
continue; // retry and/or stall as necessary |
7f121b1192f2
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
186
diff
changeset
|
677 |
} |
1 | 678 |
// If a NULL results is being returned, an out-of-memory |
679 |
// will be thrown now. Clear the gc_time_limit_exceeded |
|
680 |
// flag to avoid the following situation. |
|
681 |
// gc_time_limit_exceeded is set during a collection |
|
682 |
// the collection fails to return enough space and an OOM is thrown |
|
683 |
// the next GC is skipped because the gc_time_limit_exceeded |
|
684 |
// flag is set and another OOM is thrown |
|
685 |
if (op.result() == NULL) { |
|
686 |
size_policy()->set_gc_time_limit_exceeded(false); |
|
687 |
} |
|
688 |
return op.result(); |
|
689 |
} |
|
690 |
} |
|
691 |
||
692 |
// The policy object will prevent us from looping forever. If the |
|
693 |
// time spent in gc crosses a threshold, we will bail out. |
|
694 |
loop_count++; |
|
695 |
if ((QueuedAllocationWarningCount > 0) && |
|
696 |
(loop_count % QueuedAllocationWarningCount == 0)) { |
|
697 |
warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" |
|
698 |
" size=%d", loop_count, size); |
|
699 |
} |
|
700 |
} while (result == NULL); |
|
701 |
||
702 |
return result; |
|
703 |
} |
|
704 |
||
705 |
// |
|
706 |
// This is the policy code for permanent allocations which have failed |
|
707 |
// and require a collection. Note that just as in failed_mem_allocate, |
|
708 |
// we do not set collection policy, only where & when to allocate and |
|
709 |
// collect. |
|
710 |
HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { |
|
711 |
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
|
712 |
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); |
|
713 |
assert(!Universe::heap()->is_gc_active(), "not reentrant"); |
|
714 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
|
715 |
assert(size > perm_gen()->free_in_words(), "Allocation should fail"); |
|
716 |
||
717 |
// We assume (and assert!) that an allocation at this point will fail |
|
718 |
// unless we collect. |
|
719 |
||
720 |
// First level allocation failure. Mark-sweep and allocate in perm gen. |
|
721 |
GCCauseSetter gccs(this, GCCause::_allocation_failure); |
|
722 |
invoke_full_gc(false); |
|
723 |
HeapWord* result = perm_gen()->allocate_permanent(size); |
|
724 |
||
725 |
// Second level allocation failure. We're running out of memory. |
|
726 |
if (result == NULL) { |
|
727 |
invoke_full_gc(true); |
|
728 |
result = perm_gen()->allocate_permanent(size); |
|
729 |
} |
|
730 |
||
731 |
return result; |
|
732 |
} |
|
733 |
||
734 |
void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { |
|
735 |
CollectedHeap::ensure_parsability(retire_tlabs); |
|
736 |
young_gen()->eden_space()->ensure_parsability(); |
|
737 |
} |
|
738 |
||
739 |
size_t ParallelScavengeHeap::unsafe_max_alloc() { |
|
740 |
return young_gen()->eden_space()->free_in_bytes(); |
|
741 |
} |
|
742 |
||
743 |
size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { |
|
744 |
return young_gen()->eden_space()->tlab_capacity(thr); |
|
745 |
} |
|
746 |
||
747 |
size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
|
748 |
return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); |
|
749 |
} |
|
750 |
||
751 |
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { |
|
752 |
return young_gen()->allocate(size, true); |
|
753 |
} |
|
754 |
||
755 |
void ParallelScavengeHeap::fill_all_tlabs(bool retire) { |
|
756 |
CollectedHeap::fill_all_tlabs(retire); |
|
757 |
} |
|
758 |
||
759 |
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { |
|
760 |
CollectedHeap::accumulate_statistics_all_tlabs(); |
|
761 |
} |
|
762 |
||
763 |
void ParallelScavengeHeap::resize_all_tlabs() { |
|
764 |
CollectedHeap::resize_all_tlabs(); |
|
765 |
} |
|
766 |
||
767 |
// This method is used by System.gc() and JVMTI. |
|
768 |
void ParallelScavengeHeap::collect(GCCause::Cause cause) { |
|
769 |
assert(!Heap_lock->owned_by_self(), |
|
770 |
"this thread should not own the Heap_lock"); |
|
771 |
||
772 |
unsigned int gc_count = 0; |
|
773 |
unsigned int full_gc_count = 0; |
|
774 |
{ |
|
775 |
MutexLocker ml(Heap_lock); |
|
776 |
// This value is guarded by the Heap_lock |
|
777 |
gc_count = Universe::heap()->total_collections(); |
|
778 |
full_gc_count = Universe::heap()->total_full_collections(); |
|
779 |
} |
|
780 |
||
781 |
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); |
|
782 |
VMThread::execute(&op); |
|
783 |
} |
|
784 |
||
785 |
// This interface assumes that it's being called by the |
|
786 |
// vm thread. It collects the heap assuming that the |
|
787 |
// heap lock is already held and that we are executing in |
|
788 |
// the context of the vm thread. |
|
789 |
void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { |
|
790 |
assert(Thread::current()->is_VM_thread(), "Precondition#1"); |
|
791 |
assert(Heap_lock->is_locked(), "Precondition#2"); |
|
792 |
GCCauseSetter gcs(this, cause); |
|
793 |
switch (cause) { |
|
794 |
case GCCause::_heap_inspection: |
|
795 |
case GCCause::_heap_dump: { |
|
796 |
HandleMark hm; |
|
797 |
invoke_full_gc(false); |
|
798 |
break; |
|
799 |
} |
|
800 |
default: // XXX FIX ME |
|
801 |
ShouldNotReachHere(); |
|
802 |
} |
|
803 |
} |
|
804 |
||
805 |
||
806 |
void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { |
|
807 |
Unimplemented(); |
|
808 |
} |
|
809 |
||
810 |
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { |
|
811 |
young_gen()->object_iterate(cl); |
|
812 |
old_gen()->object_iterate(cl); |
|
813 |
perm_gen()->object_iterate(cl); |
|
814 |
} |
|
815 |
||
816 |
void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { |
|
817 |
Unimplemented(); |
|
818 |
} |
|
819 |
||
820 |
void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { |
|
821 |
perm_gen()->object_iterate(cl); |
|
822 |
} |
|
823 |
||
824 |
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { |
|
825 |
if (young_gen()->is_in_reserved(addr)) { |
|
826 |
assert(young_gen()->is_in(addr), |
|
827 |
"addr should be in allocated part of young gen"); |
|
828 |
Unimplemented(); |
|
829 |
} else if (old_gen()->is_in_reserved(addr)) { |
|
830 |
assert(old_gen()->is_in(addr), |
|
831 |
"addr should be in allocated part of old gen"); |
|
832 |
return old_gen()->start_array()->object_start((HeapWord*)addr); |
|
833 |
} else if (perm_gen()->is_in_reserved(addr)) { |
|
834 |
assert(perm_gen()->is_in(addr), |
|
835 |
"addr should be in allocated part of perm gen"); |
|
836 |
return perm_gen()->start_array()->object_start((HeapWord*)addr); |
|
837 |
} |
|
838 |
return 0; |
|
839 |
} |
|
840 |
||
841 |
size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { |
|
842 |
return oop(addr)->size(); |
|
843 |
} |
|
844 |
||
845 |
bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { |
|
846 |
return block_start(addr) == addr; |
|
847 |
} |
|
848 |
||
849 |
jlong ParallelScavengeHeap::millis_since_last_gc() { |
|
850 |
return UseParallelOldGC ? |
|
851 |
PSParallelCompact::millis_since_last_gc() : |
|
852 |
PSMarkSweep::millis_since_last_gc(); |
|
853 |
} |
|
854 |
||
855 |
void ParallelScavengeHeap::prepare_for_verify() { |
|
856 |
ensure_parsability(false); // no need to retire TLABs for verification |
|
857 |
} |
|
858 |
||
859 |
void ParallelScavengeHeap::print() const { print_on(tty); } |
|
860 |
||
861 |
void ParallelScavengeHeap::print_on(outputStream* st) const { |
|
862 |
young_gen()->print_on(st); |
|
863 |
old_gen()->print_on(st); |
|
864 |
perm_gen()->print_on(st); |
|
865 |
} |
|
866 |
||
867 |
void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { |
|
868 |
PSScavenge::gc_task_manager()->threads_do(tc); |
|
869 |
} |
|
870 |
||
871 |
void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { |
|
872 |
PSScavenge::gc_task_manager()->print_threads_on(st); |
|
873 |
} |
|
874 |
||
875 |
void ParallelScavengeHeap::print_tracing_info() const { |
|
876 |
if (TraceGen0Time) { |
|
877 |
double time = PSScavenge::accumulated_time()->seconds(); |
|
878 |
tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); |
|
879 |
} |
|
880 |
if (TraceGen1Time) { |
|
881 |
double time = PSMarkSweep::accumulated_time()->seconds(); |
|
882 |
tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); |
|
883 |
} |
|
884 |
} |
|
885 |
||
886 |
||
887 |
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) { |
|
888 |
// Why do we need the total_collections()-filter below? |
|
889 |
if (total_collections() > 0) { |
|
890 |
if (!silent) { |
|
891 |
gclog_or_tty->print("permanent "); |
|
892 |
} |
|
893 |
perm_gen()->verify(allow_dirty); |
|
894 |
||
895 |
if (!silent) { |
|
896 |
gclog_or_tty->print("tenured "); |
|
897 |
} |
|
898 |
old_gen()->verify(allow_dirty); |
|
899 |
||
900 |
if (!silent) { |
|
901 |
gclog_or_tty->print("eden "); |
|
902 |
} |
|
903 |
young_gen()->verify(allow_dirty); |
|
904 |
} |
|
905 |
if (!silent) { |
|
906 |
gclog_or_tty->print("ref_proc "); |
|
907 |
} |
|
908 |
ReferenceProcessor::verify(); |
|
909 |
} |
|
910 |
||
911 |
void ParallelScavengeHeap::print_heap_change(size_t prev_used) { |
|
912 |
if (PrintGCDetails && Verbose) { |
|
913 |
gclog_or_tty->print(" " SIZE_FORMAT |
|
914 |
"->" SIZE_FORMAT |
|
915 |
"(" SIZE_FORMAT ")", |
|
916 |
prev_used, used(), capacity()); |
|
917 |
} else { |
|
918 |
gclog_or_tty->print(" " SIZE_FORMAT "K" |
|
919 |
"->" SIZE_FORMAT "K" |
|
920 |
"(" SIZE_FORMAT "K)", |
|
921 |
prev_used / K, used() / K, capacity() / K); |
|
922 |
} |
|
923 |
} |
|
924 |
||
925 |
ParallelScavengeHeap* ParallelScavengeHeap::heap() { |
|
926 |
assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); |
|
927 |
assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); |
|
928 |
return _psh; |
|
929 |
} |
|
930 |
||
931 |
// Before delegating the resize to the young generation, |
|
932 |
// the reserved space for the young and old generations |
|
933 |
// may be changed to accomodate the desired resize. |
|
934 |
void ParallelScavengeHeap::resize_young_gen(size_t eden_size, |
|
935 |
size_t survivor_size) { |
|
936 |
if (UseAdaptiveGCBoundary) { |
|
937 |
if (size_policy()->bytes_absorbed_from_eden() != 0) { |
|
938 |
size_policy()->reset_bytes_absorbed_from_eden(); |
|
939 |
return; // The generation changed size already. |
|
940 |
} |
|
941 |
gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); |
|
942 |
} |
|
943 |
||
944 |
// Delegate the resize to the generation. |
|
945 |
_young_gen->resize(eden_size, survivor_size); |
|
946 |
} |
|
947 |
||
948 |
// Before delegating the resize to the old generation, |
|
949 |
// the reserved space for the young and old generations |
|
950 |
// may be changed to accomodate the desired resize. |
|
951 |
void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { |
|
952 |
if (UseAdaptiveGCBoundary) { |
|
953 |
if (size_policy()->bytes_absorbed_from_eden() != 0) { |
|
954 |
size_policy()->reset_bytes_absorbed_from_eden(); |
|
955 |
return; // The generation changed size already. |
|
956 |
} |
|
957 |
gens()->adjust_boundary_for_old_gen_needs(desired_free_space); |
|
958 |
} |
|
959 |
||
960 |
// Delegate the resize to the generation. |
|
961 |
_old_gen->resize(desired_free_space); |
|
962 |
} |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
963 |
|
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
964 |
#ifndef PRODUCT |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
965 |
void ParallelScavengeHeap::record_gen_tops_before_GC() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
966 |
if (ZapUnusedHeapArea) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
967 |
young_gen()->record_spaces_top(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
968 |
old_gen()->record_spaces_top(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
969 |
perm_gen()->record_spaces_top(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
970 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
971 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
972 |
|
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
973 |
void ParallelScavengeHeap::gen_mangle_unused_area() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
974 |
if (ZapUnusedHeapArea) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
975 |
young_gen()->eden_space()->mangle_unused_area(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
976 |
young_gen()->to_space()->mangle_unused_area(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
977 |
young_gen()->from_space()->mangle_unused_area(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
978 |
old_gen()->object_space()->mangle_unused_area(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
979 |
perm_gen()->object_space()->mangle_unused_area(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
980 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
981 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
386
diff
changeset
|
982 |
#endif |