1
|
1 |
/*
|
|
2 |
* Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
# include "incls/_precompiled.incl"
|
|
26 |
# include "incls/_genCollectedHeap.cpp.incl"
|
|
27 |
|
|
28 |
GenCollectedHeap* GenCollectedHeap::_gch;
|
|
29 |
NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
|
|
30 |
|
|
31 |
// The set of potentially parallel tasks in strong root scanning.
|
|
32 |
enum GCH_process_strong_roots_tasks {
|
|
33 |
// We probably want to parallelize both of these internally, but for now...
|
|
34 |
GCH_PS_younger_gens,
|
|
35 |
// Leave this one last.
|
|
36 |
GCH_PS_NumElements
|
|
37 |
};
|
|
38 |
|
|
39 |
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
|
|
40 |
SharedHeap(policy),
|
|
41 |
_gen_policy(policy),
|
|
42 |
_gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
|
|
43 |
_full_collections_completed(0)
|
|
44 |
{
|
|
45 |
if (_gen_process_strong_tasks == NULL ||
|
|
46 |
!_gen_process_strong_tasks->valid()) {
|
|
47 |
vm_exit_during_initialization("Failed necessary allocation.");
|
|
48 |
}
|
|
49 |
assert(policy != NULL, "Sanity check");
|
|
50 |
_preloading_shared_classes = false;
|
|
51 |
}
|
|
52 |
|
|
53 |
jint GenCollectedHeap::initialize() {
|
|
54 |
int i;
|
|
55 |
_n_gens = gen_policy()->number_of_generations();
|
|
56 |
|
|
57 |
// While there are no constraints in the GC code that HeapWordSize
|
|
58 |
// be any particular value, there are multiple other areas in the
|
|
59 |
// system which believe this to be true (e.g. oop->object_size in some
|
|
60 |
// cases incorrectly returns the size in wordSize units rather than
|
|
61 |
// HeapWordSize).
|
|
62 |
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
|
|
63 |
|
|
64 |
// The heap must be at least as aligned as generations.
|
|
65 |
size_t alignment = Generation::GenGrain;
|
|
66 |
|
|
67 |
_gen_specs = gen_policy()->generations();
|
|
68 |
PermanentGenerationSpec *perm_gen_spec =
|
|
69 |
collector_policy()->permanent_generation();
|
|
70 |
|
|
71 |
// Make sure the sizes are all aligned.
|
|
72 |
for (i = 0; i < _n_gens; i++) {
|
|
73 |
_gen_specs[i]->align(alignment);
|
|
74 |
}
|
|
75 |
perm_gen_spec->align(alignment);
|
|
76 |
|
|
77 |
// If we are dumping the heap, then allocate a wasted block of address
|
|
78 |
// space in order to push the heap to a lower address. This extra
|
|
79 |
// address range allows for other (or larger) libraries to be loaded
|
|
80 |
// without them occupying the space required for the shared spaces.
|
|
81 |
|
|
82 |
if (DumpSharedSpaces) {
|
|
83 |
uintx reserved = 0;
|
|
84 |
uintx block_size = 64*1024*1024;
|
|
85 |
while (reserved < SharedDummyBlockSize) {
|
|
86 |
char* dummy = os::reserve_memory(block_size);
|
|
87 |
reserved += block_size;
|
|
88 |
}
|
|
89 |
}
|
|
90 |
|
|
91 |
// Allocate space for the heap.
|
|
92 |
|
|
93 |
char* heap_address;
|
|
94 |
size_t total_reserved = 0;
|
|
95 |
int n_covered_regions = 0;
|
|
96 |
ReservedSpace heap_rs(0);
|
|
97 |
|
|
98 |
heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
|
|
99 |
&n_covered_regions, &heap_rs);
|
|
100 |
|
|
101 |
if (UseSharedSpaces) {
|
|
102 |
if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
|
|
103 |
if (heap_rs.is_reserved()) {
|
|
104 |
heap_rs.release();
|
|
105 |
}
|
|
106 |
FileMapInfo* mapinfo = FileMapInfo::current_info();
|
|
107 |
mapinfo->fail_continue("Unable to reserve shared region.");
|
|
108 |
allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
|
|
109 |
&heap_rs);
|
|
110 |
}
|
|
111 |
}
|
|
112 |
|
|
113 |
if (!heap_rs.is_reserved()) {
|
|
114 |
vm_shutdown_during_initialization(
|
|
115 |
"Could not reserve enough space for object heap");
|
|
116 |
return JNI_ENOMEM;
|
|
117 |
}
|
|
118 |
|
|
119 |
_reserved = MemRegion((HeapWord*)heap_rs.base(),
|
|
120 |
(HeapWord*)(heap_rs.base() + heap_rs.size()));
|
|
121 |
|
|
122 |
// It is important to do this in a way such that concurrent readers can't
|
|
123 |
// temporarily think somethings in the heap. (Seen this happen in asserts.)
|
|
124 |
_reserved.set_word_size(0);
|
|
125 |
_reserved.set_start((HeapWord*)heap_rs.base());
|
|
126 |
size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
|
|
127 |
- perm_gen_spec->misc_code_size();
|
|
128 |
_reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
|
|
129 |
|
|
130 |
_rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
|
|
131 |
set_barrier_set(rem_set()->bs());
|
|
132 |
_gch = this;
|
|
133 |
|
|
134 |
for (i = 0; i < _n_gens; i++) {
|
|
135 |
ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
|
|
136 |
UseSharedSpaces, UseSharedSpaces);
|
|
137 |
_gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
|
|
138 |
heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
|
|
139 |
}
|
|
140 |
_perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
|
|
141 |
|
|
142 |
clear_incremental_collection_will_fail();
|
|
143 |
clear_last_incremental_collection_failed();
|
|
144 |
|
|
145 |
#ifndef SERIALGC
|
|
146 |
// If we are running CMS, create the collector responsible
|
|
147 |
// for collecting the CMS generations.
|
|
148 |
if (collector_policy()->is_concurrent_mark_sweep_policy()) {
|
|
149 |
bool success = create_cms_collector();
|
|
150 |
if (!success) return JNI_ENOMEM;
|
|
151 |
}
|
|
152 |
#endif // SERIALGC
|
|
153 |
|
|
154 |
return JNI_OK;
|
|
155 |
}
|
|
156 |
|
|
157 |
|
|
158 |
char* GenCollectedHeap::allocate(size_t alignment,
|
|
159 |
PermanentGenerationSpec* perm_gen_spec,
|
|
160 |
size_t* _total_reserved,
|
|
161 |
int* _n_covered_regions,
|
|
162 |
ReservedSpace* heap_rs){
|
|
163 |
const char overflow_msg[] = "The size of the object heap + VM data exceeds "
|
|
164 |
"the maximum representable size";
|
|
165 |
|
|
166 |
// Now figure out the total size.
|
|
167 |
size_t total_reserved = 0;
|
|
168 |
int n_covered_regions = 0;
|
|
169 |
const size_t pageSize = UseLargePages ?
|
|
170 |
os::large_page_size() : os::vm_page_size();
|
|
171 |
|
|
172 |
for (int i = 0; i < _n_gens; i++) {
|
|
173 |
total_reserved += _gen_specs[i]->max_size();
|
|
174 |
if (total_reserved < _gen_specs[i]->max_size()) {
|
|
175 |
vm_exit_during_initialization(overflow_msg);
|
|
176 |
}
|
|
177 |
n_covered_regions += _gen_specs[i]->n_covered_regions();
|
|
178 |
}
|
|
179 |
assert(total_reserved % pageSize == 0, "Gen size");
|
|
180 |
total_reserved += perm_gen_spec->max_size();
|
|
181 |
assert(total_reserved % pageSize == 0, "Perm Gen size");
|
|
182 |
|
|
183 |
if (total_reserved < perm_gen_spec->max_size()) {
|
|
184 |
vm_exit_during_initialization(overflow_msg);
|
|
185 |
}
|
|
186 |
n_covered_regions += perm_gen_spec->n_covered_regions();
|
|
187 |
|
|
188 |
// Add the size of the data area which shares the same reserved area
|
|
189 |
// as the heap, but which is not actually part of the heap.
|
|
190 |
size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
|
|
191 |
|
|
192 |
total_reserved += s;
|
|
193 |
if (total_reserved < s) {
|
|
194 |
vm_exit_during_initialization(overflow_msg);
|
|
195 |
}
|
|
196 |
|
|
197 |
if (UseLargePages) {
|
|
198 |
assert(total_reserved != 0, "total_reserved cannot be 0");
|
|
199 |
total_reserved = round_to(total_reserved, os::large_page_size());
|
|
200 |
if (total_reserved < os::large_page_size()) {
|
|
201 |
vm_exit_during_initialization(overflow_msg);
|
|
202 |
}
|
|
203 |
}
|
|
204 |
|
|
205 |
// Calculate the address at which the heap must reside in order for
|
|
206 |
// the shared data to be at the required address.
|
|
207 |
|
|
208 |
char* heap_address;
|
|
209 |
if (UseSharedSpaces) {
|
|
210 |
|
|
211 |
// Calculate the address of the first word beyond the heap.
|
|
212 |
FileMapInfo* mapinfo = FileMapInfo::current_info();
|
|
213 |
int lr = CompactingPermGenGen::n_regions - 1;
|
|
214 |
size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
|
|
215 |
heap_address = mapinfo->region_base(lr) + capacity;
|
|
216 |
|
|
217 |
// Calculate the address of the first word of the heap.
|
|
218 |
heap_address -= total_reserved;
|
|
219 |
} else {
|
|
220 |
heap_address = NULL; // any address will do.
|
|
221 |
}
|
|
222 |
|
|
223 |
*_total_reserved = total_reserved;
|
|
224 |
*_n_covered_regions = n_covered_regions;
|
|
225 |
*heap_rs = ReservedSpace(total_reserved, alignment,
|
|
226 |
UseLargePages, heap_address);
|
|
227 |
|
|
228 |
return heap_address;
|
|
229 |
}
|
|
230 |
|
|
231 |
|
|
232 |
void GenCollectedHeap::post_initialize() {
|
|
233 |
SharedHeap::post_initialize();
|
|
234 |
TwoGenerationCollectorPolicy *policy =
|
|
235 |
(TwoGenerationCollectorPolicy *)collector_policy();
|
|
236 |
guarantee(policy->is_two_generation_policy(), "Illegal policy type");
|
|
237 |
DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
|
|
238 |
assert(def_new_gen->kind() == Generation::DefNew ||
|
|
239 |
def_new_gen->kind() == Generation::ParNew ||
|
|
240 |
def_new_gen->kind() == Generation::ASParNew,
|
|
241 |
"Wrong generation kind");
|
|
242 |
|
|
243 |
Generation* old_gen = get_gen(1);
|
|
244 |
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
|
|
245 |
old_gen->kind() == Generation::ASConcurrentMarkSweep ||
|
|
246 |
old_gen->kind() == Generation::MarkSweepCompact,
|
|
247 |
"Wrong generation kind");
|
|
248 |
|
|
249 |
policy->initialize_size_policy(def_new_gen->eden()->capacity(),
|
|
250 |
old_gen->capacity(),
|
|
251 |
def_new_gen->from()->capacity());
|
|
252 |
policy->initialize_gc_policy_counters();
|
|
253 |
}
|
|
254 |
|
|
255 |
void GenCollectedHeap::ref_processing_init() {
|
|
256 |
SharedHeap::ref_processing_init();
|
|
257 |
for (int i = 0; i < _n_gens; i++) {
|
|
258 |
_gens[i]->ref_processor_init();
|
|
259 |
}
|
|
260 |
}
|
|
261 |
|
|
262 |
size_t GenCollectedHeap::capacity() const {
|
|
263 |
size_t res = 0;
|
|
264 |
for (int i = 0; i < _n_gens; i++) {
|
|
265 |
res += _gens[i]->capacity();
|
|
266 |
}
|
|
267 |
return res;
|
|
268 |
}
|
|
269 |
|
|
270 |
size_t GenCollectedHeap::used() const {
|
|
271 |
size_t res = 0;
|
|
272 |
for (int i = 0; i < _n_gens; i++) {
|
|
273 |
res += _gens[i]->used();
|
|
274 |
}
|
|
275 |
return res;
|
|
276 |
}
|
|
277 |
|
|
278 |
// Save the "used_region" for generations level and lower,
|
|
279 |
// and, if perm is true, for perm gen.
|
|
280 |
void GenCollectedHeap::save_used_regions(int level, bool perm) {
|
|
281 |
assert(level < _n_gens, "Illegal level parameter");
|
|
282 |
for (int i = level; i >= 0; i--) {
|
|
283 |
_gens[i]->save_used_region();
|
|
284 |
}
|
|
285 |
if (perm) {
|
|
286 |
perm_gen()->save_used_region();
|
|
287 |
}
|
|
288 |
}
|
|
289 |
|
|
290 |
size_t GenCollectedHeap::max_capacity() const {
|
|
291 |
size_t res = 0;
|
|
292 |
for (int i = 0; i < _n_gens; i++) {
|
|
293 |
res += _gens[i]->max_capacity();
|
|
294 |
}
|
|
295 |
return res;
|
|
296 |
}
|
|
297 |
|
|
298 |
// Update the _full_collections_completed counter
|
|
299 |
// at the end of a stop-world full GC.
|
|
300 |
unsigned int GenCollectedHeap::update_full_collections_completed() {
|
|
301 |
MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
|
302 |
assert(_full_collections_completed <= _total_full_collections,
|
|
303 |
"Can't complete more collections than were started");
|
|
304 |
_full_collections_completed = _total_full_collections;
|
|
305 |
ml.notify_all();
|
|
306 |
return _full_collections_completed;
|
|
307 |
}
|
|
308 |
|
|
309 |
// Update the _full_collections_completed counter, as appropriate,
|
|
310 |
// at the end of a concurrent GC cycle. Note the conditional update
|
|
311 |
// below to allow this method to be called by a concurrent collector
|
|
312 |
// without synchronizing in any manner with the VM thread (which
|
|
313 |
// may already have initiated a STW full collection "concurrently").
|
|
314 |
unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
|
|
315 |
MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
|
316 |
assert((_full_collections_completed <= _total_full_collections) &&
|
|
317 |
(count <= _total_full_collections),
|
|
318 |
"Can't complete more collections than were started");
|
|
319 |
if (count > _full_collections_completed) {
|
|
320 |
_full_collections_completed = count;
|
|
321 |
ml.notify_all();
|
|
322 |
}
|
|
323 |
return _full_collections_completed;
|
|
324 |
}
|
|
325 |
|
|
326 |
|
|
327 |
#ifndef PRODUCT
|
|
328 |
// Override of memory state checking method in CollectedHeap:
|
|
329 |
// Some collectors (CMS for example) can't have badHeapWordVal written
|
|
330 |
// in the first two words of an object. (For instance , in the case of
|
|
331 |
// CMS these words hold state used to synchronize between certain
|
|
332 |
// (concurrent) GC steps and direct allocating mutators.)
|
|
333 |
// The skip_header_HeapWords() method below, allows us to skip
|
|
334 |
// over the requisite number of HeapWord's. Note that (for
|
|
335 |
// generational collectors) this means that those many words are
|
|
336 |
// skipped in each object, irrespective of the generation in which
|
|
337 |
// that object lives. The resultant loss of precision seems to be
|
|
338 |
// harmless and the pain of avoiding that imprecision appears somewhat
|
|
339 |
// higher than we are prepared to pay for such rudimentary debugging
|
|
340 |
// support.
|
|
341 |
void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
|
|
342 |
size_t size) {
|
|
343 |
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
|
|
344 |
// We are asked to check a size in HeapWords,
|
|
345 |
// but the memory is mangled in juint words.
|
|
346 |
juint* start = (juint*) (addr + skip_header_HeapWords());
|
|
347 |
juint* end = (juint*) (addr + size);
|
|
348 |
for (juint* slot = start; slot < end; slot += 1) {
|
|
349 |
assert(*slot == badHeapWordVal,
|
|
350 |
"Found non badHeapWordValue in pre-allocation check");
|
|
351 |
}
|
|
352 |
}
|
|
353 |
}
|
|
354 |
#endif
|
|
355 |
|
|
356 |
HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
|
|
357 |
bool is_tlab,
|
|
358 |
bool first_only) {
|
|
359 |
HeapWord* res;
|
|
360 |
for (int i = 0; i < _n_gens; i++) {
|
|
361 |
if (_gens[i]->should_allocate(size, is_tlab)) {
|
|
362 |
res = _gens[i]->allocate(size, is_tlab);
|
|
363 |
if (res != NULL) return res;
|
|
364 |
else if (first_only) break;
|
|
365 |
}
|
|
366 |
}
|
|
367 |
// Otherwise...
|
|
368 |
return NULL;
|
|
369 |
}
|
|
370 |
|
|
371 |
HeapWord* GenCollectedHeap::mem_allocate(size_t size,
|
|
372 |
bool is_large_noref,
|
|
373 |
bool is_tlab,
|
|
374 |
bool* gc_overhead_limit_was_exceeded) {
|
|
375 |
return collector_policy()->mem_allocate_work(size,
|
|
376 |
is_tlab,
|
|
377 |
gc_overhead_limit_was_exceeded);
|
|
378 |
}
|
|
379 |
|
|
380 |
bool GenCollectedHeap::must_clear_all_soft_refs() {
|
|
381 |
return _gc_cause == GCCause::_last_ditch_collection;
|
|
382 |
}
|
|
383 |
|
|
384 |
bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
|
385 |
return (cause == GCCause::_java_lang_system_gc ||
|
|
386 |
cause == GCCause::_gc_locker) &&
|
|
387 |
UseConcMarkSweepGC && ExplicitGCInvokesConcurrent;
|
|
388 |
}
|
|
389 |
|
|
390 |
void GenCollectedHeap::do_collection(bool full,
|
|
391 |
bool clear_all_soft_refs,
|
|
392 |
size_t size,
|
|
393 |
bool is_tlab,
|
|
394 |
int max_level) {
|
|
395 |
bool prepared_for_verification = false;
|
|
396 |
ResourceMark rm;
|
|
397 |
DEBUG_ONLY(Thread* my_thread = Thread::current();)
|
|
398 |
|
|
399 |
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
400 |
assert(my_thread->is_VM_thread() ||
|
|
401 |
my_thread->is_ConcurrentGC_thread(),
|
|
402 |
"incorrect thread type capability");
|
|
403 |
assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock");
|
|
404 |
guarantee(!is_gc_active(), "collection is not reentrant");
|
|
405 |
assert(max_level < n_gens(), "sanity check");
|
|
406 |
|
|
407 |
if (GC_locker::check_active_before_gc()) {
|
|
408 |
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
|
409 |
}
|
|
410 |
|
|
411 |
const size_t perm_prev_used = perm_gen()->used();
|
|
412 |
|
|
413 |
if (PrintHeapAtGC) {
|
|
414 |
Universe::print_heap_before_gc();
|
|
415 |
if (Verbose) {
|
|
416 |
gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
|
|
417 |
}
|
|
418 |
}
|
|
419 |
|
|
420 |
{
|
|
421 |
FlagSetting fl(_is_gc_active, true);
|
|
422 |
|
|
423 |
bool complete = full && (max_level == (n_gens()-1));
|
|
424 |
const char* gc_cause_str = "GC ";
|
|
425 |
if (complete) {
|
|
426 |
GCCause::Cause cause = gc_cause();
|
|
427 |
if (cause == GCCause::_java_lang_system_gc) {
|
|
428 |
gc_cause_str = "Full GC (System) ";
|
|
429 |
} else {
|
|
430 |
gc_cause_str = "Full GC ";
|
|
431 |
}
|
|
432 |
}
|
|
433 |
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
434 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
|
435 |
TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
|
|
436 |
|
|
437 |
gc_prologue(complete);
|
|
438 |
increment_total_collections(complete);
|
|
439 |
|
|
440 |
size_t gch_prev_used = used();
|
|
441 |
|
|
442 |
int starting_level = 0;
|
|
443 |
if (full) {
|
|
444 |
// Search for the oldest generation which will collect all younger
|
|
445 |
// generations, and start collection loop there.
|
|
446 |
for (int i = max_level; i >= 0; i--) {
|
|
447 |
if (_gens[i]->full_collects_younger_generations()) {
|
|
448 |
starting_level = i;
|
|
449 |
break;
|
|
450 |
}
|
|
451 |
}
|
|
452 |
}
|
|
453 |
|
|
454 |
bool must_restore_marks_for_biased_locking = false;
|
|
455 |
|
|
456 |
int max_level_collected = starting_level;
|
|
457 |
for (int i = starting_level; i <= max_level; i++) {
|
|
458 |
if (_gens[i]->should_collect(full, size, is_tlab)) {
|
|
459 |
// Timer for individual generations. Last argument is false: no CR
|
|
460 |
TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
|
|
461 |
TraceCollectorStats tcs(_gens[i]->counters());
|
|
462 |
TraceMemoryManagerStats tmms(_gens[i]->kind());
|
|
463 |
|
|
464 |
size_t prev_used = _gens[i]->used();
|
|
465 |
_gens[i]->stat_record()->invocations++;
|
|
466 |
_gens[i]->stat_record()->accumulated_time.start();
|
|
467 |
|
|
468 |
if (PrintGC && Verbose) {
|
|
469 |
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
|
|
470 |
i,
|
|
471 |
_gens[i]->stat_record()->invocations,
|
|
472 |
size*HeapWordSize);
|
|
473 |
}
|
|
474 |
|
|
475 |
if (VerifyBeforeGC && i >= VerifyGCLevel &&
|
|
476 |
total_collections() >= VerifyGCStartAt) {
|
|
477 |
HandleMark hm; // Discard invalid handles created during verification
|
|
478 |
if (!prepared_for_verification) {
|
|
479 |
prepare_for_verify();
|
|
480 |
prepared_for_verification = true;
|
|
481 |
}
|
|
482 |
gclog_or_tty->print(" VerifyBeforeGC:");
|
|
483 |
Universe::verify(true);
|
|
484 |
}
|
|
485 |
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
|
486 |
|
|
487 |
if (!must_restore_marks_for_biased_locking &&
|
|
488 |
_gens[i]->performs_in_place_marking()) {
|
|
489 |
// We perform this mark word preservation work lazily
|
|
490 |
// because it's only at this point that we know whether we
|
|
491 |
// absolutely have to do it; we want to avoid doing it for
|
|
492 |
// scavenge-only collections where it's unnecessary
|
|
493 |
must_restore_marks_for_biased_locking = true;
|
|
494 |
BiasedLocking::preserve_marks();
|
|
495 |
}
|
|
496 |
|
|
497 |
// Do collection work
|
|
498 |
{
|
|
499 |
// Note on ref discovery: For what appear to be historical reasons,
|
|
500 |
// GCH enables and disabled (by enqueing) refs discovery.
|
|
501 |
// In the future this should be moved into the generation's
|
|
502 |
// collect method so that ref discovery and enqueueing concerns
|
|
503 |
// are local to a generation. The collect method could return
|
|
504 |
// an appropriate indication in the case that notification on
|
|
505 |
// the ref lock was needed. This will make the treatment of
|
|
506 |
// weak refs more uniform (and indeed remove such concerns
|
|
507 |
// from GCH). XXX
|
|
508 |
|
|
509 |
HandleMark hm; // Discard invalid handles created during gc
|
|
510 |
save_marks(); // save marks for all gens
|
|
511 |
// We want to discover references, but not process them yet.
|
|
512 |
// This mode is disabled in process_discovered_references if the
|
|
513 |
// generation does some collection work, or in
|
|
514 |
// enqueue_discovered_references if the generation returns
|
|
515 |
// without doing any work.
|
|
516 |
ReferenceProcessor* rp = _gens[i]->ref_processor();
|
|
517 |
// If the discovery of ("weak") refs in this generation is
|
|
518 |
// atomic wrt other collectors in this configuration, we
|
|
519 |
// are guaranteed to have empty discovered ref lists.
|
|
520 |
if (rp->discovery_is_atomic()) {
|
|
521 |
rp->verify_no_references_recorded();
|
|
522 |
rp->enable_discovery();
|
|
523 |
} else {
|
|
524 |
// collect() will enable discovery as appropriate
|
|
525 |
}
|
|
526 |
_gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
|
|
527 |
if (!rp->enqueuing_is_done()) {
|
|
528 |
rp->enqueue_discovered_references();
|
|
529 |
} else {
|
|
530 |
rp->set_enqueuing_is_done(false);
|
|
531 |
}
|
|
532 |
rp->verify_no_references_recorded();
|
|
533 |
}
|
|
534 |
max_level_collected = i;
|
|
535 |
|
|
536 |
// Determine if allocation request was met.
|
|
537 |
if (size > 0) {
|
|
538 |
if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
|
|
539 |
if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
|
|
540 |
size = 0;
|
|
541 |
}
|
|
542 |
}
|
|
543 |
}
|
|
544 |
|
|
545 |
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
|
546 |
|
|
547 |
_gens[i]->stat_record()->accumulated_time.stop();
|
|
548 |
|
|
549 |
update_gc_stats(i, full);
|
|
550 |
|
|
551 |
if (VerifyAfterGC && i >= VerifyGCLevel &&
|
|
552 |
total_collections() >= VerifyGCStartAt) {
|
|
553 |
HandleMark hm; // Discard invalid handles created during verification
|
|
554 |
gclog_or_tty->print(" VerifyAfterGC:");
|
|
555 |
Universe::verify(false);
|
|
556 |
}
|
|
557 |
|
|
558 |
if (PrintGCDetails) {
|
|
559 |
gclog_or_tty->print(":");
|
|
560 |
_gens[i]->print_heap_change(prev_used);
|
|
561 |
}
|
|
562 |
}
|
|
563 |
}
|
|
564 |
|
|
565 |
// Update "complete" boolean wrt what actually transpired --
|
|
566 |
// for instance, a promotion failure could have led to
|
|
567 |
// a whole heap collection.
|
|
568 |
complete = complete || (max_level_collected == n_gens() - 1);
|
|
569 |
|
|
570 |
if (PrintGCDetails) {
|
|
571 |
print_heap_change(gch_prev_used);
|
|
572 |
|
|
573 |
// Print perm gen info for full GC with PrintGCDetails flag.
|
|
574 |
if (complete) {
|
|
575 |
print_perm_heap_change(perm_prev_used);
|
|
576 |
}
|
|
577 |
}
|
|
578 |
|
|
579 |
for (int j = max_level_collected; j >= 0; j -= 1) {
|
|
580 |
// Adjust generation sizes.
|
|
581 |
_gens[j]->compute_new_size();
|
|
582 |
}
|
|
583 |
|
|
584 |
if (complete) {
|
|
585 |
// Ask the permanent generation to adjust size for full collections
|
|
586 |
perm()->compute_new_size();
|
|
587 |
update_full_collections_completed();
|
|
588 |
}
|
|
589 |
|
|
590 |
// Track memory usage and detect low memory after GC finishes
|
|
591 |
MemoryService::track_memory_usage();
|
|
592 |
|
|
593 |
gc_epilogue(complete);
|
|
594 |
|
|
595 |
if (must_restore_marks_for_biased_locking) {
|
|
596 |
BiasedLocking::restore_marks();
|
|
597 |
}
|
|
598 |
}
|
|
599 |
|
|
600 |
AdaptiveSizePolicy* sp = gen_policy()->size_policy();
|
|
601 |
AdaptiveSizePolicyOutput(sp, total_collections());
|
|
602 |
|
|
603 |
if (PrintHeapAtGC) {
|
|
604 |
Universe::print_heap_after_gc();
|
|
605 |
}
|
|
606 |
|
|
607 |
if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
|
|
608 |
tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
|
|
609 |
vm_exit(-1);
|
|
610 |
}
|
|
611 |
}
|
|
612 |
|
|
613 |
HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
|
|
614 |
return collector_policy()->satisfy_failed_allocation(size, is_tlab);
|
|
615 |
}
|
|
616 |
|
|
617 |
void GenCollectedHeap::set_par_threads(int t) {
|
|
618 |
SharedHeap::set_par_threads(t);
|
|
619 |
_gen_process_strong_tasks->set_par_threads(t);
|
|
620 |
}
|
|
621 |
|
|
622 |
class AssertIsPermClosure: public OopClosure {
|
|
623 |
public:
|
|
624 |
void do_oop(oop* p) {
|
|
625 |
assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
|
|
626 |
}
|
|
627 |
};
|
|
628 |
static AssertIsPermClosure assert_is_perm_closure;
|
|
629 |
|
|
630 |
void GenCollectedHeap::
|
|
631 |
gen_process_strong_roots(int level,
|
|
632 |
bool younger_gens_as_roots,
|
|
633 |
bool collecting_perm_gen,
|
|
634 |
SharedHeap::ScanningOption so,
|
|
635 |
OopsInGenClosure* older_gens,
|
|
636 |
OopsInGenClosure* not_older_gens) {
|
|
637 |
// General strong roots.
|
|
638 |
SharedHeap::process_strong_roots(collecting_perm_gen, so,
|
|
639 |
not_older_gens, older_gens);
|
|
640 |
|
|
641 |
if (younger_gens_as_roots) {
|
|
642 |
if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
|
|
643 |
for (int i = 0; i < level; i++) {
|
|
644 |
not_older_gens->set_generation(_gens[i]);
|
|
645 |
_gens[i]->oop_iterate(not_older_gens);
|
|
646 |
}
|
|
647 |
not_older_gens->reset_generation();
|
|
648 |
}
|
|
649 |
}
|
|
650 |
// When collection is parallel, all threads get to cooperate to do
|
|
651 |
// older-gen scanning.
|
|
652 |
for (int i = level+1; i < _n_gens; i++) {
|
|
653 |
older_gens->set_generation(_gens[i]);
|
|
654 |
rem_set()->younger_refs_iterate(_gens[i], older_gens);
|
|
655 |
older_gens->reset_generation();
|
|
656 |
}
|
|
657 |
|
|
658 |
_gen_process_strong_tasks->all_tasks_completed();
|
|
659 |
}
|
|
660 |
|
|
661 |
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
|
|
662 |
OopClosure* non_root_closure) {
|
|
663 |
SharedHeap::process_weak_roots(root_closure, non_root_closure);
|
|
664 |
// "Local" "weak" refs
|
|
665 |
for (int i = 0; i < _n_gens; i++) {
|
|
666 |
_gens[i]->ref_processor()->weak_oops_do(root_closure);
|
|
667 |
}
|
|
668 |
}
|
|
669 |
|
|
670 |
#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
|
671 |
void GenCollectedHeap:: \
|
|
672 |
oop_since_save_marks_iterate(int level, \
|
|
673 |
OopClosureType* cur, \
|
|
674 |
OopClosureType* older) { \
|
|
675 |
_gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
|
|
676 |
for (int i = level+1; i < n_gens(); i++) { \
|
|
677 |
_gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
|
|
678 |
} \
|
|
679 |
perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
|
|
680 |
}
|
|
681 |
|
|
682 |
ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
|
|
683 |
|
|
684 |
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
|
|
685 |
|
|
686 |
bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
|
|
687 |
for (int i = level; i < _n_gens; i++) {
|
|
688 |
if (!_gens[i]->no_allocs_since_save_marks()) return false;
|
|
689 |
}
|
|
690 |
return perm_gen()->no_allocs_since_save_marks();
|
|
691 |
}
|
|
692 |
|
|
693 |
bool GenCollectedHeap::supports_inline_contig_alloc() const {
|
|
694 |
return _gens[0]->supports_inline_contig_alloc();
|
|
695 |
}
|
|
696 |
|
|
697 |
HeapWord** GenCollectedHeap::top_addr() const {
|
|
698 |
return _gens[0]->top_addr();
|
|
699 |
}
|
|
700 |
|
|
701 |
HeapWord** GenCollectedHeap::end_addr() const {
|
|
702 |
return _gens[0]->end_addr();
|
|
703 |
}
|
|
704 |
|
|
705 |
size_t GenCollectedHeap::unsafe_max_alloc() {
|
|
706 |
return _gens[0]->unsafe_max_alloc_nogc();
|
|
707 |
}
|
|
708 |
|
|
709 |
// public collection interfaces
|
|
710 |
|
|
711 |
void GenCollectedHeap::collect(GCCause::Cause cause) {
|
|
712 |
if (should_do_concurrent_full_gc(cause)) {
|
|
713 |
#ifndef SERIALGC
|
|
714 |
// mostly concurrent full collection
|
|
715 |
collect_mostly_concurrent(cause);
|
|
716 |
#else // SERIALGC
|
|
717 |
ShouldNotReachHere();
|
|
718 |
#endif // SERIALGC
|
|
719 |
} else {
|
|
720 |
#ifdef ASSERT
|
|
721 |
if (cause == GCCause::_scavenge_alot) {
|
|
722 |
// minor collection only
|
|
723 |
collect(cause, 0);
|
|
724 |
} else {
|
|
725 |
// Stop-the-world full collection
|
|
726 |
collect(cause, n_gens() - 1);
|
|
727 |
}
|
|
728 |
#else
|
|
729 |
// Stop-the-world full collection
|
|
730 |
collect(cause, n_gens() - 1);
|
|
731 |
#endif
|
|
732 |
}
|
|
733 |
}
|
|
734 |
|
|
735 |
void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
|
|
736 |
// The caller doesn't have the Heap_lock
|
|
737 |
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
738 |
MutexLocker ml(Heap_lock);
|
|
739 |
collect_locked(cause, max_level);
|
|
740 |
}
|
|
741 |
|
|
742 |
// This interface assumes that it's being called by the
|
|
743 |
// vm thread. It collects the heap assuming that the
|
|
744 |
// heap lock is already held and that we are executing in
|
|
745 |
// the context of the vm thread.
|
|
746 |
void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
|
747 |
assert(Thread::current()->is_VM_thread(), "Precondition#1");
|
|
748 |
assert(Heap_lock->is_locked(), "Precondition#2");
|
|
749 |
GCCauseSetter gcs(this, cause);
|
|
750 |
switch (cause) {
|
|
751 |
case GCCause::_heap_inspection:
|
|
752 |
case GCCause::_heap_dump: {
|
|
753 |
HandleMark hm;
|
|
754 |
do_full_collection(false, // don't clear all soft refs
|
|
755 |
n_gens() - 1);
|
|
756 |
break;
|
|
757 |
}
|
|
758 |
default: // XXX FIX ME
|
|
759 |
ShouldNotReachHere(); // Unexpected use of this function
|
|
760 |
}
|
|
761 |
}
|
|
762 |
|
|
763 |
void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
|
|
764 |
// The caller has the Heap_lock
|
|
765 |
assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
|
|
766 |
collect_locked(cause, n_gens() - 1);
|
|
767 |
}
|
|
768 |
|
|
769 |
// this is the private collection interface
|
|
770 |
// The Heap_lock is expected to be held on entry.
|
|
771 |
|
|
772 |
void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
|
|
773 |
if (_preloading_shared_classes) {
|
|
774 |
warning("\nThe permanent generation is not large enough to preload "
|
|
775 |
"requested classes.\nUse -XX:PermSize= to increase the initial "
|
|
776 |
"size of the permanent generation.\n");
|
|
777 |
vm_exit(2);
|
|
778 |
}
|
|
779 |
// Read the GC count while holding the Heap_lock
|
|
780 |
unsigned int gc_count_before = total_collections();
|
|
781 |
unsigned int full_gc_count_before = total_full_collections();
|
|
782 |
{
|
|
783 |
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
|
784 |
VM_GenCollectFull op(gc_count_before, full_gc_count_before,
|
|
785 |
cause, max_level);
|
|
786 |
VMThread::execute(&op);
|
|
787 |
}
|
|
788 |
}
|
|
789 |
|
|
790 |
#ifndef SERIALGC
|
|
791 |
bool GenCollectedHeap::create_cms_collector() {
|
|
792 |
|
|
793 |
assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
|
|
794 |
(_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
|
|
795 |
_perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
|
|
796 |
"Unexpected generation kinds");
|
|
797 |
// Skip two header words in the block content verification
|
|
798 |
NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
|
|
799 |
CMSCollector* collector = new CMSCollector(
|
|
800 |
(ConcurrentMarkSweepGeneration*)_gens[1],
|
|
801 |
(ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
|
|
802 |
_rem_set->as_CardTableRS(),
|
|
803 |
(ConcurrentMarkSweepPolicy*) collector_policy());
|
|
804 |
|
|
805 |
if (collector == NULL || !collector->completed_initialization()) {
|
|
806 |
if (collector) {
|
|
807 |
delete collector; // Be nice in embedded situation
|
|
808 |
}
|
|
809 |
vm_shutdown_during_initialization("Could not create CMS collector");
|
|
810 |
return false;
|
|
811 |
}
|
|
812 |
return true; // success
|
|
813 |
}
|
|
814 |
|
|
815 |
void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
|
|
816 |
assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
|
|
817 |
|
|
818 |
MutexLocker ml(Heap_lock);
|
|
819 |
// Read the GC counts while holding the Heap_lock
|
|
820 |
unsigned int full_gc_count_before = total_full_collections();
|
|
821 |
unsigned int gc_count_before = total_collections();
|
|
822 |
{
|
|
823 |
MutexUnlocker mu(Heap_lock);
|
|
824 |
VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
|
|
825 |
VMThread::execute(&op);
|
|
826 |
}
|
|
827 |
}
|
|
828 |
#endif // SERIALGC
|
|
829 |
|
|
830 |
|
|
831 |
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
|
|
832 |
int max_level) {
|
|
833 |
int local_max_level;
|
|
834 |
if (!incremental_collection_will_fail() &&
|
|
835 |
gc_cause() == GCCause::_gc_locker) {
|
|
836 |
local_max_level = 0;
|
|
837 |
} else {
|
|
838 |
local_max_level = max_level;
|
|
839 |
}
|
|
840 |
|
|
841 |
do_collection(true /* full */,
|
|
842 |
clear_all_soft_refs /* clear_all_soft_refs */,
|
|
843 |
0 /* size */,
|
|
844 |
false /* is_tlab */,
|
|
845 |
local_max_level /* max_level */);
|
|
846 |
// Hack XXX FIX ME !!!
|
|
847 |
// A scavenge may not have been attempted, or may have
|
|
848 |
// been attempted and failed, because the old gen was too full
|
|
849 |
if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
|
|
850 |
incremental_collection_will_fail()) {
|
|
851 |
if (PrintGCDetails) {
|
|
852 |
gclog_or_tty->print_cr("GC locker: Trying a full collection "
|
|
853 |
"because scavenge failed");
|
|
854 |
}
|
|
855 |
// This time allow the old gen to be collected as well
|
|
856 |
do_collection(true /* full */,
|
|
857 |
clear_all_soft_refs /* clear_all_soft_refs */,
|
|
858 |
0 /* size */,
|
|
859 |
false /* is_tlab */,
|
|
860 |
n_gens() - 1 /* max_level */);
|
|
861 |
}
|
|
862 |
}
|
|
863 |
|
|
864 |
// Returns "TRUE" iff "p" points into the allocated area of the heap.
|
|
865 |
bool GenCollectedHeap::is_in(const void* p) const {
|
|
866 |
#ifndef ASSERT
|
|
867 |
guarantee(VerifyBeforeGC ||
|
|
868 |
VerifyDuringGC ||
|
|
869 |
VerifyBeforeExit ||
|
|
870 |
VerifyAfterGC, "too expensive");
|
|
871 |
#endif
|
|
872 |
// This might be sped up with a cache of the last generation that
|
|
873 |
// answered yes.
|
|
874 |
for (int i = 0; i < _n_gens; i++) {
|
|
875 |
if (_gens[i]->is_in(p)) return true;
|
|
876 |
}
|
|
877 |
if (_perm_gen->as_gen()->is_in(p)) return true;
|
|
878 |
// Otherwise...
|
|
879 |
return false;
|
|
880 |
}
|
|
881 |
|
|
882 |
// Returns "TRUE" iff "p" points into the allocated area of the heap.
|
|
883 |
bool GenCollectedHeap::is_in_youngest(void* p) {
|
|
884 |
return _gens[0]->is_in(p);
|
|
885 |
}
|
|
886 |
|
|
887 |
void GenCollectedHeap::oop_iterate(OopClosure* cl) {
|
|
888 |
for (int i = 0; i < _n_gens; i++) {
|
|
889 |
_gens[i]->oop_iterate(cl);
|
|
890 |
}
|
|
891 |
}
|
|
892 |
|
|
893 |
void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
|
|
894 |
for (int i = 0; i < _n_gens; i++) {
|
|
895 |
_gens[i]->oop_iterate(mr, cl);
|
|
896 |
}
|
|
897 |
}
|
|
898 |
|
|
899 |
void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
|
|
900 |
for (int i = 0; i < _n_gens; i++) {
|
|
901 |
_gens[i]->object_iterate(cl);
|
|
902 |
}
|
|
903 |
perm_gen()->object_iterate(cl);
|
|
904 |
}
|
|
905 |
|
|
906 |
void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
|
|
907 |
for (int i = 0; i < _n_gens; i++) {
|
|
908 |
_gens[i]->object_iterate_since_last_GC(cl);
|
|
909 |
}
|
|
910 |
}
|
|
911 |
|
|
912 |
Space* GenCollectedHeap::space_containing(const void* addr) const {
|
|
913 |
for (int i = 0; i < _n_gens; i++) {
|
|
914 |
Space* res = _gens[i]->space_containing(addr);
|
|
915 |
if (res != NULL) return res;
|
|
916 |
}
|
|
917 |
Space* res = perm_gen()->space_containing(addr);
|
|
918 |
if (res != NULL) return res;
|
|
919 |
// Otherwise...
|
|
920 |
assert(false, "Could not find containing space");
|
|
921 |
return NULL;
|
|
922 |
}
|
|
923 |
|
|
924 |
|
|
925 |
HeapWord* GenCollectedHeap::block_start(const void* addr) const {
|
|
926 |
assert(is_in_reserved(addr), "block_start of address outside of heap");
|
|
927 |
for (int i = 0; i < _n_gens; i++) {
|
|
928 |
if (_gens[i]->is_in_reserved(addr)) {
|
|
929 |
assert(_gens[i]->is_in(addr),
|
|
930 |
"addr should be in allocated part of generation");
|
|
931 |
return _gens[i]->block_start(addr);
|
|
932 |
}
|
|
933 |
}
|
|
934 |
if (perm_gen()->is_in_reserved(addr)) {
|
|
935 |
assert(perm_gen()->is_in(addr),
|
|
936 |
"addr should be in allocated part of perm gen");
|
|
937 |
return perm_gen()->block_start(addr);
|
|
938 |
}
|
|
939 |
assert(false, "Some generation should contain the address");
|
|
940 |
return NULL;
|
|
941 |
}
|
|
942 |
|
|
943 |
size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
|
|
944 |
assert(is_in_reserved(addr), "block_size of address outside of heap");
|
|
945 |
for (int i = 0; i < _n_gens; i++) {
|
|
946 |
if (_gens[i]->is_in_reserved(addr)) {
|
|
947 |
assert(_gens[i]->is_in(addr),
|
|
948 |
"addr should be in allocated part of generation");
|
|
949 |
return _gens[i]->block_size(addr);
|
|
950 |
}
|
|
951 |
}
|
|
952 |
if (perm_gen()->is_in_reserved(addr)) {
|
|
953 |
assert(perm_gen()->is_in(addr),
|
|
954 |
"addr should be in allocated part of perm gen");
|
|
955 |
return perm_gen()->block_size(addr);
|
|
956 |
}
|
|
957 |
assert(false, "Some generation should contain the address");
|
|
958 |
return 0;
|
|
959 |
}
|
|
960 |
|
|
961 |
bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
|
|
962 |
assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
|
|
963 |
assert(block_start(addr) == addr, "addr must be a block start");
|
|
964 |
for (int i = 0; i < _n_gens; i++) {
|
|
965 |
if (_gens[i]->is_in_reserved(addr)) {
|
|
966 |
return _gens[i]->block_is_obj(addr);
|
|
967 |
}
|
|
968 |
}
|
|
969 |
if (perm_gen()->is_in_reserved(addr)) {
|
|
970 |
return perm_gen()->block_is_obj(addr);
|
|
971 |
}
|
|
972 |
assert(false, "Some generation should contain the address");
|
|
973 |
return false;
|
|
974 |
}
|
|
975 |
|
|
976 |
bool GenCollectedHeap::supports_tlab_allocation() const {
|
|
977 |
for (int i = 0; i < _n_gens; i += 1) {
|
|
978 |
if (_gens[i]->supports_tlab_allocation()) {
|
|
979 |
return true;
|
|
980 |
}
|
|
981 |
}
|
|
982 |
return false;
|
|
983 |
}
|
|
984 |
|
|
985 |
size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
|
|
986 |
size_t result = 0;
|
|
987 |
for (int i = 0; i < _n_gens; i += 1) {
|
|
988 |
if (_gens[i]->supports_tlab_allocation()) {
|
|
989 |
result += _gens[i]->tlab_capacity();
|
|
990 |
}
|
|
991 |
}
|
|
992 |
return result;
|
|
993 |
}
|
|
994 |
|
|
995 |
size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
|
996 |
size_t result = 0;
|
|
997 |
for (int i = 0; i < _n_gens; i += 1) {
|
|
998 |
if (_gens[i]->supports_tlab_allocation()) {
|
|
999 |
result += _gens[i]->unsafe_max_tlab_alloc();
|
|
1000 |
}
|
|
1001 |
}
|
|
1002 |
return result;
|
|
1003 |
}
|
|
1004 |
|
|
1005 |
HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
|
|
1006 |
bool gc_overhead_limit_was_exceeded;
|
|
1007 |
HeapWord* result = mem_allocate(size /* size */,
|
|
1008 |
false /* is_large_noref */,
|
|
1009 |
true /* is_tlab */,
|
|
1010 |
&gc_overhead_limit_was_exceeded);
|
|
1011 |
return result;
|
|
1012 |
}
|
|
1013 |
|
|
1014 |
// Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
|
|
1015 |
// from the list headed by "*prev_ptr".
|
|
1016 |
static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
|
|
1017 |
bool first = true;
|
|
1018 |
size_t min_size = 0; // "first" makes this conceptually infinite.
|
|
1019 |
ScratchBlock **smallest_ptr, *smallest;
|
|
1020 |
ScratchBlock *cur = *prev_ptr;
|
|
1021 |
while (cur) {
|
|
1022 |
assert(*prev_ptr == cur, "just checking");
|
|
1023 |
if (first || cur->num_words < min_size) {
|
|
1024 |
smallest_ptr = prev_ptr;
|
|
1025 |
smallest = cur;
|
|
1026 |
min_size = smallest->num_words;
|
|
1027 |
first = false;
|
|
1028 |
}
|
|
1029 |
prev_ptr = &cur->next;
|
|
1030 |
cur = cur->next;
|
|
1031 |
}
|
|
1032 |
smallest = *smallest_ptr;
|
|
1033 |
*smallest_ptr = smallest->next;
|
|
1034 |
return smallest;
|
|
1035 |
}
|
|
1036 |
|
|
1037 |
// Sort the scratch block list headed by res into decreasing size order,
|
|
1038 |
// and set "res" to the result.
|
|
1039 |
static void sort_scratch_list(ScratchBlock*& list) {
|
|
1040 |
ScratchBlock* sorted = NULL;
|
|
1041 |
ScratchBlock* unsorted = list;
|
|
1042 |
while (unsorted) {
|
|
1043 |
ScratchBlock *smallest = removeSmallestScratch(&unsorted);
|
|
1044 |
smallest->next = sorted;
|
|
1045 |
sorted = smallest;
|
|
1046 |
}
|
|
1047 |
list = sorted;
|
|
1048 |
}
|
|
1049 |
|
|
1050 |
ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
|
|
1051 |
size_t max_alloc_words) {
|
|
1052 |
ScratchBlock* res = NULL;
|
|
1053 |
for (int i = 0; i < _n_gens; i++) {
|
|
1054 |
_gens[i]->contribute_scratch(res, requestor, max_alloc_words);
|
|
1055 |
}
|
|
1056 |
sort_scratch_list(res);
|
|
1057 |
return res;
|
|
1058 |
}
|
|
1059 |
|
|
1060 |
size_t GenCollectedHeap::large_typearray_limit() {
|
|
1061 |
return gen_policy()->large_typearray_limit();
|
|
1062 |
}
|
|
1063 |
|
|
1064 |
class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
|
|
1065 |
void do_generation(Generation* gen) {
|
|
1066 |
gen->prepare_for_verify();
|
|
1067 |
}
|
|
1068 |
};
|
|
1069 |
|
|
1070 |
void GenCollectedHeap::prepare_for_verify() {
|
|
1071 |
ensure_parsability(false); // no need to retire TLABs
|
|
1072 |
GenPrepareForVerifyClosure blk;
|
|
1073 |
generation_iterate(&blk, false);
|
|
1074 |
perm_gen()->prepare_for_verify();
|
|
1075 |
}
|
|
1076 |
|
|
1077 |
|
|
1078 |
void GenCollectedHeap::generation_iterate(GenClosure* cl,
|
|
1079 |
bool old_to_young) {
|
|
1080 |
if (old_to_young) {
|
|
1081 |
for (int i = _n_gens-1; i >= 0; i--) {
|
|
1082 |
cl->do_generation(_gens[i]);
|
|
1083 |
}
|
|
1084 |
} else {
|
|
1085 |
for (int i = 0; i < _n_gens; i++) {
|
|
1086 |
cl->do_generation(_gens[i]);
|
|
1087 |
}
|
|
1088 |
}
|
|
1089 |
}
|
|
1090 |
|
|
1091 |
void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
|
|
1092 |
for (int i = 0; i < _n_gens; i++) {
|
|
1093 |
_gens[i]->space_iterate(cl, true);
|
|
1094 |
}
|
|
1095 |
perm_gen()->space_iterate(cl, true);
|
|
1096 |
}
|
|
1097 |
|
|
1098 |
bool GenCollectedHeap::is_maximal_no_gc() const {
|
|
1099 |
for (int i = 0; i < _n_gens; i++) { // skip perm gen
|
|
1100 |
if (!_gens[i]->is_maximal_no_gc()) {
|
|
1101 |
return false;
|
|
1102 |
}
|
|
1103 |
}
|
|
1104 |
return true;
|
|
1105 |
}
|
|
1106 |
|
|
1107 |
void GenCollectedHeap::save_marks() {
|
|
1108 |
for (int i = 0; i < _n_gens; i++) {
|
|
1109 |
_gens[i]->save_marks();
|
|
1110 |
}
|
|
1111 |
perm_gen()->save_marks();
|
|
1112 |
}
|
|
1113 |
|
|
1114 |
void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
|
|
1115 |
for (int i = 0; i <= collectedGen; i++) {
|
|
1116 |
_gens[i]->compute_new_size();
|
|
1117 |
}
|
|
1118 |
}
|
|
1119 |
|
|
1120 |
GenCollectedHeap* GenCollectedHeap::heap() {
|
|
1121 |
assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
|
|
1122 |
assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
|
|
1123 |
return _gch;
|
|
1124 |
}
|
|
1125 |
|
|
1126 |
|
|
1127 |
void GenCollectedHeap::prepare_for_compaction() {
|
|
1128 |
Generation* scanning_gen = _gens[_n_gens-1];
|
|
1129 |
// Start by compacting into same gen.
|
|
1130 |
CompactPoint cp(scanning_gen, NULL, NULL);
|
|
1131 |
while (scanning_gen != NULL) {
|
|
1132 |
scanning_gen->prepare_for_compaction(&cp);
|
|
1133 |
scanning_gen = prev_gen(scanning_gen);
|
|
1134 |
}
|
|
1135 |
}
|
|
1136 |
|
|
1137 |
GCStats* GenCollectedHeap::gc_stats(int level) const {
|
|
1138 |
return _gens[level]->gc_stats();
|
|
1139 |
}
|
|
1140 |
|
|
1141 |
void GenCollectedHeap::verify(bool allow_dirty, bool silent) {
|
|
1142 |
if (!silent) {
|
|
1143 |
gclog_or_tty->print("permgen ");
|
|
1144 |
}
|
|
1145 |
perm_gen()->verify(allow_dirty);
|
|
1146 |
for (int i = _n_gens-1; i >= 0; i--) {
|
|
1147 |
Generation* g = _gens[i];
|
|
1148 |
if (!silent) {
|
|
1149 |
gclog_or_tty->print(g->name());
|
|
1150 |
gclog_or_tty->print(" ");
|
|
1151 |
}
|
|
1152 |
g->verify(allow_dirty);
|
|
1153 |
}
|
|
1154 |
if (!silent) {
|
|
1155 |
gclog_or_tty->print("remset ");
|
|
1156 |
}
|
|
1157 |
rem_set()->verify();
|
|
1158 |
if (!silent) {
|
|
1159 |
gclog_or_tty->print("ref_proc ");
|
|
1160 |
}
|
|
1161 |
ReferenceProcessor::verify();
|
|
1162 |
}
|
|
1163 |
|
|
1164 |
void GenCollectedHeap::print() const { print_on(tty); }
|
|
1165 |
void GenCollectedHeap::print_on(outputStream* st) const {
|
|
1166 |
for (int i = 0; i < _n_gens; i++) {
|
|
1167 |
_gens[i]->print_on(st);
|
|
1168 |
}
|
|
1169 |
perm_gen()->print_on(st);
|
|
1170 |
}
|
|
1171 |
|
|
1172 |
void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
|
1173 |
if (workers() != NULL) {
|
|
1174 |
workers()->threads_do(tc);
|
|
1175 |
}
|
|
1176 |
#ifndef SERIALGC
|
|
1177 |
if (UseConcMarkSweepGC) {
|
|
1178 |
ConcurrentMarkSweepThread::threads_do(tc);
|
|
1179 |
}
|
|
1180 |
#endif // SERIALGC
|
|
1181 |
}
|
|
1182 |
|
|
1183 |
void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
|
|
1184 |
#ifndef SERIALGC
|
|
1185 |
if (UseParNewGC) {
|
|
1186 |
workers()->print_worker_threads_on(st);
|
|
1187 |
}
|
|
1188 |
if (UseConcMarkSweepGC) {
|
|
1189 |
ConcurrentMarkSweepThread::print_all_on(st);
|
|
1190 |
}
|
|
1191 |
#endif // SERIALGC
|
|
1192 |
}
|
|
1193 |
|
|
1194 |
void GenCollectedHeap::print_tracing_info() const {
|
|
1195 |
if (TraceGen0Time) {
|
|
1196 |
get_gen(0)->print_summary_info();
|
|
1197 |
}
|
|
1198 |
if (TraceGen1Time) {
|
|
1199 |
get_gen(1)->print_summary_info();
|
|
1200 |
}
|
|
1201 |
}
|
|
1202 |
|
|
1203 |
void GenCollectedHeap::print_heap_change(size_t prev_used) const {
|
|
1204 |
if (PrintGCDetails && Verbose) {
|
|
1205 |
gclog_or_tty->print(" " SIZE_FORMAT
|
|
1206 |
"->" SIZE_FORMAT
|
|
1207 |
"(" SIZE_FORMAT ")",
|
|
1208 |
prev_used, used(), capacity());
|
|
1209 |
} else {
|
|
1210 |
gclog_or_tty->print(" " SIZE_FORMAT "K"
|
|
1211 |
"->" SIZE_FORMAT "K"
|
|
1212 |
"(" SIZE_FORMAT "K)",
|
|
1213 |
prev_used / K, used() / K, capacity() / K);
|
|
1214 |
}
|
|
1215 |
}
|
|
1216 |
|
|
1217 |
//New method to print perm gen info with PrintGCDetails flag
|
|
1218 |
void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
|
|
1219 |
gclog_or_tty->print(", [%s :", perm_gen()->short_name());
|
|
1220 |
perm_gen()->print_heap_change(perm_prev_used);
|
|
1221 |
gclog_or_tty->print("]");
|
|
1222 |
}
|
|
1223 |
|
|
1224 |
class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
|
|
1225 |
private:
|
|
1226 |
bool _full;
|
|
1227 |
public:
|
|
1228 |
void do_generation(Generation* gen) {
|
|
1229 |
gen->gc_prologue(_full);
|
|
1230 |
}
|
|
1231 |
GenGCPrologueClosure(bool full) : _full(full) {};
|
|
1232 |
};
|
|
1233 |
|
|
1234 |
void GenCollectedHeap::gc_prologue(bool full) {
|
|
1235 |
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
|
1236 |
|
|
1237 |
always_do_update_barrier = false;
|
|
1238 |
// Fill TLAB's and such
|
|
1239 |
CollectedHeap::accumulate_statistics_all_tlabs();
|
|
1240 |
ensure_parsability(true); // retire TLABs
|
|
1241 |
|
|
1242 |
// Call allocation profiler
|
|
1243 |
AllocationProfiler::iterate_since_last_gc();
|
|
1244 |
// Walk generations
|
|
1245 |
GenGCPrologueClosure blk(full);
|
|
1246 |
generation_iterate(&blk, false); // not old-to-young.
|
|
1247 |
perm_gen()->gc_prologue(full);
|
|
1248 |
};
|
|
1249 |
|
|
1250 |
class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
|
|
1251 |
private:
|
|
1252 |
bool _full;
|
|
1253 |
public:
|
|
1254 |
void do_generation(Generation* gen) {
|
|
1255 |
gen->gc_epilogue(_full);
|
|
1256 |
}
|
|
1257 |
GenGCEpilogueClosure(bool full) : _full(full) {};
|
|
1258 |
};
|
|
1259 |
|
|
1260 |
void GenCollectedHeap::gc_epilogue(bool full) {
|
|
1261 |
// Remember if a partial collection of the heap failed, and
|
|
1262 |
// we did a complete collection.
|
|
1263 |
if (full && incremental_collection_will_fail()) {
|
|
1264 |
set_last_incremental_collection_failed();
|
|
1265 |
} else {
|
|
1266 |
clear_last_incremental_collection_failed();
|
|
1267 |
}
|
|
1268 |
// Clear the flag, if set; the generation gc_epilogues will set the
|
|
1269 |
// flag again if the condition persists despite the collection.
|
|
1270 |
clear_incremental_collection_will_fail();
|
|
1271 |
|
|
1272 |
#ifdef COMPILER2
|
|
1273 |
assert(DerivedPointerTable::is_empty(), "derived pointer present");
|
|
1274 |
size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
|
|
1275 |
guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
|
|
1276 |
#endif /* COMPILER2 */
|
|
1277 |
|
|
1278 |
resize_all_tlabs();
|
|
1279 |
|
|
1280 |
GenGCEpilogueClosure blk(full);
|
|
1281 |
generation_iterate(&blk, false); // not old-to-young.
|
|
1282 |
perm_gen()->gc_epilogue(full);
|
|
1283 |
|
|
1284 |
always_do_update_barrier = UseConcMarkSweepGC;
|
|
1285 |
};
|
|
1286 |
|
|
1287 |
class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
|
|
1288 |
public:
|
|
1289 |
void do_generation(Generation* gen) {
|
|
1290 |
gen->ensure_parsability();
|
|
1291 |
}
|
|
1292 |
};
|
|
1293 |
|
|
1294 |
void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
|
|
1295 |
CollectedHeap::ensure_parsability(retire_tlabs);
|
|
1296 |
GenEnsureParsabilityClosure ep_cl;
|
|
1297 |
generation_iterate(&ep_cl, false);
|
|
1298 |
perm_gen()->ensure_parsability();
|
|
1299 |
}
|
|
1300 |
|
|
1301 |
oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
|
|
1302 |
oop obj,
|
|
1303 |
size_t obj_size,
|
|
1304 |
oop* ref) {
|
|
1305 |
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
|
1306 |
HeapWord* result = NULL;
|
|
1307 |
|
|
1308 |
// First give each higher generation a chance to allocate the promoted object.
|
|
1309 |
Generation* allocator = next_gen(gen);
|
|
1310 |
if (allocator != NULL) {
|
|
1311 |
do {
|
|
1312 |
result = allocator->allocate(obj_size, false);
|
|
1313 |
} while (result == NULL && (allocator = next_gen(allocator)) != NULL);
|
|
1314 |
}
|
|
1315 |
|
|
1316 |
if (result == NULL) {
|
|
1317 |
// Then give gen and higher generations a chance to expand and allocate the
|
|
1318 |
// object.
|
|
1319 |
do {
|
|
1320 |
result = gen->expand_and_allocate(obj_size, false);
|
|
1321 |
} while (result == NULL && (gen = next_gen(gen)) != NULL);
|
|
1322 |
}
|
|
1323 |
|
|
1324 |
if (result != NULL) {
|
|
1325 |
Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
|
|
1326 |
}
|
|
1327 |
return oop(result);
|
|
1328 |
}
|
|
1329 |
|
|
1330 |
class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
|
|
1331 |
jlong _time; // in ms
|
|
1332 |
jlong _now; // in ms
|
|
1333 |
|
|
1334 |
public:
|
|
1335 |
GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
|
|
1336 |
|
|
1337 |
jlong time() { return _time; }
|
|
1338 |
|
|
1339 |
void do_generation(Generation* gen) {
|
|
1340 |
_time = MIN2(_time, gen->time_of_last_gc(_now));
|
|
1341 |
}
|
|
1342 |
};
|
|
1343 |
|
|
1344 |
jlong GenCollectedHeap::millis_since_last_gc() {
|
|
1345 |
jlong now = os::javaTimeMillis();
|
|
1346 |
GenTimeOfLastGCClosure tolgc_cl(now);
|
|
1347 |
// iterate over generations getting the oldest
|
|
1348 |
// time that a generation was collected
|
|
1349 |
generation_iterate(&tolgc_cl, false);
|
|
1350 |
tolgc_cl.do_generation(perm_gen());
|
|
1351 |
// XXX Despite the assert above, since javaTimeMillis()
|
|
1352 |
// doesnot guarantee monotonically increasing return
|
|
1353 |
// values (note, i didn't say "strictly monotonic"),
|
|
1354 |
// we need to guard against getting back a time
|
|
1355 |
// later than now. This should be fixed by basing
|
|
1356 |
// on someting like gethrtime() which guarantees
|
|
1357 |
// monotonicity. Note that cond_wait() is susceptible
|
|
1358 |
// to a similar problem, because its interface is
|
|
1359 |
// based on absolute time in the form of the
|
|
1360 |
// system time's notion of UCT. See also 4506635
|
|
1361 |
// for yet another problem of similar nature. XXX
|
|
1362 |
jlong retVal = now - tolgc_cl.time();
|
|
1363 |
if (retVal < 0) {
|
|
1364 |
NOT_PRODUCT(warning("time warp: %d", retVal);)
|
|
1365 |
return 0;
|
|
1366 |
}
|
|
1367 |
return retVal;
|
|
1368 |
}
|