author | coleenp |
Sun, 13 Apr 2008 17:43:42 -0400 | |
changeset 360 | 21d113ecbf6a |
parent 252 | 050143a0dbfb |
child 389 | a44227868a4a |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2 |
* Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "incls/_precompiled.incl" |
|
26 |
#include "incls/_psParallelCompact.cpp.incl" |
|
27 |
||
28 |
#include <math.h> |
|
29 |
||
30 |
// All sizes are in HeapWords. |
|
31 |
const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words |
|
32 |
const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize; |
|
33 |
const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize; |
|
34 |
const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1; |
|
35 |
const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1; |
|
36 |
const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask; |
|
37 |
||
38 |
// 32-bit: 128 words covers 4 bitmap words |
|
39 |
// 64-bit: 128 words covers 2 bitmap words |
|
40 |
const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words |
|
41 |
const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize; |
|
42 |
const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1; |
|
43 |
const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask; |
|
44 |
||
45 |
const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize; |
|
46 |
||
47 |
const ParallelCompactData::ChunkData::chunk_sz_t |
|
48 |
ParallelCompactData::ChunkData::dc_shift = 27; |
|
49 |
||
50 |
const ParallelCompactData::ChunkData::chunk_sz_t |
|
51 |
ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift; |
|
52 |
||
53 |
const ParallelCompactData::ChunkData::chunk_sz_t |
|
54 |
ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift; |
|
55 |
||
56 |
const ParallelCompactData::ChunkData::chunk_sz_t |
|
57 |
ParallelCompactData::ChunkData::los_mask = ~dc_mask; |
|
58 |
||
59 |
const ParallelCompactData::ChunkData::chunk_sz_t |
|
60 |
ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift; |
|
61 |
||
62 |
const ParallelCompactData::ChunkData::chunk_sz_t |
|
63 |
ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift; |
|
64 |
||
65 |
#ifdef ASSERT |
|
66 |
short ParallelCompactData::BlockData::_cur_phase = 0; |
|
67 |
#endif |
|
68 |
||
69 |
SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; |
|
70 |
bool PSParallelCompact::_print_phases = false; |
|
71 |
||
72 |
ReferenceProcessor* PSParallelCompact::_ref_processor = NULL; |
|
73 |
klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL; |
|
74 |
||
75 |
double PSParallelCompact::_dwl_mean; |
|
76 |
double PSParallelCompact::_dwl_std_dev; |
|
77 |
double PSParallelCompact::_dwl_first_term; |
|
78 |
double PSParallelCompact::_dwl_adjustment; |
|
79 |
#ifdef ASSERT |
|
80 |
bool PSParallelCompact::_dwl_initialized = false; |
|
81 |
#endif // #ifdef ASSERT |
|
82 |
||
83 |
#ifdef VALIDATE_MARK_SWEEP |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
84 |
GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL; |
1 | 85 |
GrowableArray<oop> * PSParallelCompact::_live_oops = NULL; |
86 |
GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL; |
|
87 |
GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL; |
|
88 |
size_t PSParallelCompact::_live_oops_index = 0; |
|
89 |
size_t PSParallelCompact::_live_oops_index_at_perm = 0; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
90 |
GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
91 |
GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL; |
1 | 92 |
bool PSParallelCompact::_pointer_tracking = false; |
93 |
bool PSParallelCompact::_root_tracking = true; |
|
94 |
||
95 |
GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL; |
|
96 |
GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL; |
|
97 |
GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL; |
|
98 |
GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL; |
|
99 |
GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL; |
|
100 |
GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL; |
|
101 |
#endif |
|
102 |
||
103 |
// XXX beg - verification code; only works while we also mark in object headers |
|
104 |
static void |
|
105 |
verify_mark_bitmap(ParMarkBitMap& _mark_bitmap) |
|
106 |
{ |
|
107 |
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); |
|
108 |
||
109 |
PSPermGen* perm_gen = heap->perm_gen(); |
|
110 |
PSOldGen* old_gen = heap->old_gen(); |
|
111 |
PSYoungGen* young_gen = heap->young_gen(); |
|
112 |
||
113 |
MutableSpace* perm_space = perm_gen->object_space(); |
|
114 |
MutableSpace* old_space = old_gen->object_space(); |
|
115 |
MutableSpace* eden_space = young_gen->eden_space(); |
|
116 |
MutableSpace* from_space = young_gen->from_space(); |
|
117 |
MutableSpace* to_space = young_gen->to_space(); |
|
118 |
||
119 |
// 'from_space' here is the survivor space at the lower address. |
|
120 |
if (to_space->bottom() < from_space->bottom()) { |
|
121 |
from_space = to_space; |
|
122 |
to_space = young_gen->from_space(); |
|
123 |
} |
|
124 |
||
125 |
HeapWord* boundaries[12]; |
|
126 |
unsigned int bidx = 0; |
|
127 |
const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]); |
|
128 |
||
129 |
boundaries[0] = perm_space->bottom(); |
|
130 |
boundaries[1] = perm_space->top(); |
|
131 |
boundaries[2] = old_space->bottom(); |
|
132 |
boundaries[3] = old_space->top(); |
|
133 |
boundaries[4] = eden_space->bottom(); |
|
134 |
boundaries[5] = eden_space->top(); |
|
135 |
boundaries[6] = from_space->bottom(); |
|
136 |
boundaries[7] = from_space->top(); |
|
137 |
boundaries[8] = to_space->bottom(); |
|
138 |
boundaries[9] = to_space->top(); |
|
139 |
boundaries[10] = to_space->end(); |
|
140 |
boundaries[11] = to_space->end(); |
|
141 |
||
142 |
BitMap::idx_t beg_bit = 0; |
|
143 |
BitMap::idx_t end_bit; |
|
144 |
BitMap::idx_t tmp_bit; |
|
145 |
const BitMap::idx_t last_bit = _mark_bitmap.size(); |
|
146 |
do { |
|
147 |
HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit); |
|
148 |
if (_mark_bitmap.is_marked(beg_bit)) { |
|
149 |
oop obj = (oop)addr; |
|
150 |
assert(obj->is_gc_marked(), "obj header is not marked"); |
|
151 |
end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit); |
|
152 |
const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit); |
|
153 |
assert(size == (size_t)obj->size(), "end bit wrong?"); |
|
154 |
beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit); |
|
155 |
assert(beg_bit > end_bit, "bit set in middle of an obj"); |
|
156 |
} else { |
|
157 |
if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) { |
|
158 |
// a dead object in the current space. |
|
159 |
oop obj = (oop)addr; |
|
160 |
end_bit = _mark_bitmap.addr_to_bit(addr + obj->size()); |
|
161 |
assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap"); |
|
162 |
tmp_bit = beg_bit + 1; |
|
163 |
beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); |
|
164 |
assert(beg_bit == end_bit, "beg bit set in unmarked obj"); |
|
165 |
beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); |
|
166 |
assert(beg_bit == end_bit, "end bit set in unmarked obj"); |
|
167 |
} else if (addr < boundaries[bidx + 2]) { |
|
168 |
// addr is between top in the current space and bottom in the next. |
|
169 |
end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr); |
|
170 |
tmp_bit = beg_bit; |
|
171 |
beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); |
|
172 |
assert(beg_bit == end_bit, "beg bit set above top"); |
|
173 |
beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); |
|
174 |
assert(beg_bit == end_bit, "end bit set above top"); |
|
175 |
bidx += 2; |
|
176 |
} else if (bidx < bidx_max - 2) { |
|
177 |
bidx += 2; // ??? |
|
178 |
} else { |
|
179 |
tmp_bit = beg_bit; |
|
180 |
beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit); |
|
181 |
assert(beg_bit == last_bit, "beg bit set outside heap"); |
|
182 |
beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit); |
|
183 |
assert(beg_bit == last_bit, "end bit set outside heap"); |
|
184 |
} |
|
185 |
} |
|
186 |
} while (beg_bit < last_bit); |
|
187 |
} |
|
188 |
// XXX end - verification code; only works while we also mark in object headers |
|
189 |
||
190 |
#ifndef PRODUCT |
|
191 |
const char* PSParallelCompact::space_names[] = { |
|
192 |
"perm", "old ", "eden", "from", "to " |
|
193 |
}; |
|
194 |
||
195 |
void PSParallelCompact::print_chunk_ranges() |
|
196 |
{ |
|
197 |
tty->print_cr("space bottom top end new_top"); |
|
198 |
tty->print_cr("------ ---------- ---------- ---------- ----------"); |
|
199 |
||
200 |
for (unsigned int id = 0; id < last_space_id; ++id) { |
|
201 |
const MutableSpace* space = _space_info[id].space(); |
|
202 |
tty->print_cr("%u %s " |
|
203 |
SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " " |
|
204 |
SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ", |
|
205 |
id, space_names[id], |
|
206 |
summary_data().addr_to_chunk_idx(space->bottom()), |
|
207 |
summary_data().addr_to_chunk_idx(space->top()), |
|
208 |
summary_data().addr_to_chunk_idx(space->end()), |
|
209 |
summary_data().addr_to_chunk_idx(_space_info[id].new_top())); |
|
210 |
} |
|
211 |
} |
|
212 |
||
213 |
void |
|
214 |
print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c) |
|
215 |
{ |
|
216 |
#define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7") |
|
217 |
#define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5") |
|
218 |
||
219 |
ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
220 |
size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0; |
|
221 |
tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " " |
|
222 |
CHUNK_IDX_FORMAT " " PTR_FORMAT " " |
|
223 |
CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " " |
|
224 |
CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d", |
|
225 |
i, c->data_location(), dci, c->destination(), |
|
226 |
c->partial_obj_size(), c->live_obj_size(), |
|
227 |
c->data_size(), c->source_chunk(), c->destination_count()); |
|
228 |
||
229 |
#undef CHUNK_IDX_FORMAT |
|
230 |
#undef CHUNK_DATA_FORMAT |
|
231 |
} |
|
232 |
||
233 |
void |
|
234 |
print_generic_summary_data(ParallelCompactData& summary_data, |
|
235 |
HeapWord* const beg_addr, |
|
236 |
HeapWord* const end_addr) |
|
237 |
{ |
|
238 |
size_t total_words = 0; |
|
239 |
size_t i = summary_data.addr_to_chunk_idx(beg_addr); |
|
240 |
const size_t last = summary_data.addr_to_chunk_idx(end_addr); |
|
241 |
HeapWord* pdest = 0; |
|
242 |
||
243 |
while (i <= last) { |
|
244 |
ParallelCompactData::ChunkData* c = summary_data.chunk(i); |
|
245 |
if (c->data_size() != 0 || c->destination() != pdest) { |
|
246 |
print_generic_summary_chunk(i, c); |
|
247 |
total_words += c->data_size(); |
|
248 |
pdest = c->destination(); |
|
249 |
} |
|
250 |
++i; |
|
251 |
} |
|
252 |
||
253 |
tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize); |
|
254 |
} |
|
255 |
||
256 |
void |
|
257 |
print_generic_summary_data(ParallelCompactData& summary_data, |
|
258 |
SpaceInfo* space_info) |
|
259 |
{ |
|
260 |
for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) { |
|
261 |
const MutableSpace* space = space_info[id].space(); |
|
262 |
print_generic_summary_data(summary_data, space->bottom(), |
|
263 |
MAX2(space->top(), space_info[id].new_top())); |
|
264 |
} |
|
265 |
} |
|
266 |
||
267 |
void |
|
268 |
print_initial_summary_chunk(size_t i, |
|
269 |
const ParallelCompactData::ChunkData* c, |
|
270 |
bool newline = true) |
|
271 |
{ |
|
272 |
tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " " |
|
273 |
SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " " |
|
274 |
SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d", |
|
275 |
i, c->destination(), |
|
276 |
c->partial_obj_size(), c->live_obj_size(), |
|
277 |
c->data_size(), c->source_chunk(), c->destination_count()); |
|
278 |
if (newline) tty->cr(); |
|
279 |
} |
|
280 |
||
281 |
void |
|
282 |
print_initial_summary_data(ParallelCompactData& summary_data, |
|
283 |
const MutableSpace* space) { |
|
284 |
if (space->top() == space->bottom()) { |
|
285 |
return; |
|
286 |
} |
|
287 |
||
288 |
const size_t chunk_size = ParallelCompactData::ChunkSize; |
|
289 |
HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top()); |
|
290 |
const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up); |
|
291 |
const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1); |
|
292 |
HeapWord* end_addr = c->destination() + c->data_size(); |
|
293 |
const size_t live_in_space = pointer_delta(end_addr, space->bottom()); |
|
294 |
||
295 |
// Print (and count) the full chunks at the beginning of the space. |
|
296 |
size_t full_chunk_count = 0; |
|
297 |
size_t i = summary_data.addr_to_chunk_idx(space->bottom()); |
|
298 |
while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) { |
|
299 |
print_initial_summary_chunk(i, summary_data.chunk(i)); |
|
300 |
++full_chunk_count; |
|
301 |
++i; |
|
302 |
} |
|
303 |
||
304 |
size_t live_to_right = live_in_space - full_chunk_count * chunk_size; |
|
305 |
||
306 |
double max_reclaimed_ratio = 0.0; |
|
307 |
size_t max_reclaimed_ratio_chunk = 0; |
|
308 |
size_t max_dead_to_right = 0; |
|
309 |
size_t max_live_to_right = 0; |
|
310 |
||
311 |
// Print the 'reclaimed ratio' for chunks while there is something live in the |
|
312 |
// chunk or to the right of it. The remaining chunks are empty (and |
|
313 |
// uninteresting), and computing the ratio will result in division by 0. |
|
314 |
while (i < end_chunk && live_to_right > 0) { |
|
315 |
c = summary_data.chunk(i); |
|
316 |
HeapWord* const chunk_addr = summary_data.chunk_to_addr(i); |
|
317 |
const size_t used_to_right = pointer_delta(space->top(), chunk_addr); |
|
318 |
const size_t dead_to_right = used_to_right - live_to_right; |
|
319 |
const double reclaimed_ratio = double(dead_to_right) / live_to_right; |
|
320 |
||
321 |
if (reclaimed_ratio > max_reclaimed_ratio) { |
|
322 |
max_reclaimed_ratio = reclaimed_ratio; |
|
323 |
max_reclaimed_ratio_chunk = i; |
|
324 |
max_dead_to_right = dead_to_right; |
|
325 |
max_live_to_right = live_to_right; |
|
326 |
} |
|
327 |
||
328 |
print_initial_summary_chunk(i, c, false); |
|
329 |
tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"), |
|
330 |
reclaimed_ratio, dead_to_right, live_to_right); |
|
331 |
||
332 |
live_to_right -= c->data_size(); |
|
333 |
++i; |
|
334 |
} |
|
335 |
||
336 |
// Any remaining chunks are empty. Print one more if there is one. |
|
337 |
if (i < end_chunk) { |
|
338 |
print_initial_summary_chunk(i, summary_data.chunk(i)); |
|
339 |
} |
|
340 |
||
341 |
tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " " |
|
342 |
"l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f", |
|
343 |
max_reclaimed_ratio_chunk, max_dead_to_right, |
|
344 |
max_live_to_right, max_reclaimed_ratio); |
|
345 |
} |
|
346 |
||
347 |
void |
|
348 |
print_initial_summary_data(ParallelCompactData& summary_data, |
|
349 |
SpaceInfo* space_info) { |
|
350 |
unsigned int id = PSParallelCompact::perm_space_id; |
|
351 |
const MutableSpace* space; |
|
352 |
do { |
|
353 |
space = space_info[id].space(); |
|
354 |
print_initial_summary_data(summary_data, space); |
|
355 |
} while (++id < PSParallelCompact::eden_space_id); |
|
356 |
||
357 |
do { |
|
358 |
space = space_info[id].space(); |
|
359 |
print_generic_summary_data(summary_data, space->bottom(), space->top()); |
|
360 |
} while (++id < PSParallelCompact::last_space_id); |
|
361 |
} |
|
362 |
#endif // #ifndef PRODUCT |
|
363 |
||
364 |
#ifdef ASSERT |
|
365 |
size_t add_obj_count; |
|
366 |
size_t add_obj_size; |
|
367 |
size_t mark_bitmap_count; |
|
368 |
size_t mark_bitmap_size; |
|
369 |
#endif // #ifdef ASSERT |
|
370 |
||
371 |
ParallelCompactData::ParallelCompactData() |
|
372 |
{ |
|
373 |
_region_start = 0; |
|
374 |
||
375 |
_chunk_vspace = 0; |
|
376 |
_chunk_data = 0; |
|
377 |
_chunk_count = 0; |
|
378 |
||
379 |
_block_vspace = 0; |
|
380 |
_block_data = 0; |
|
381 |
_block_count = 0; |
|
382 |
} |
|
383 |
||
384 |
bool ParallelCompactData::initialize(MemRegion covered_region) |
|
385 |
{ |
|
386 |
_region_start = covered_region.start(); |
|
387 |
const size_t region_size = covered_region.word_size(); |
|
388 |
DEBUG_ONLY(_region_end = _region_start + region_size;) |
|
389 |
||
390 |
assert(chunk_align_down(_region_start) == _region_start, |
|
391 |
"region start not aligned"); |
|
392 |
assert((region_size & ChunkSizeOffsetMask) == 0, |
|
393 |
"region size not a multiple of ChunkSize"); |
|
394 |
||
395 |
bool result = initialize_chunk_data(region_size); |
|
396 |
||
397 |
// Initialize the block data if it will be used for updating pointers, or if |
|
398 |
// this is a debug build. |
|
399 |
if (!UseParallelOldGCChunkPointerCalc || trueInDebug) { |
|
400 |
result = result && initialize_block_data(region_size); |
|
401 |
} |
|
402 |
||
403 |
return result; |
|
404 |
} |
|
405 |
||
406 |
PSVirtualSpace* |
|
407 |
ParallelCompactData::create_vspace(size_t count, size_t element_size) |
|
408 |
{ |
|
409 |
const size_t raw_bytes = count * element_size; |
|
410 |
const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10); |
|
411 |
const size_t granularity = os::vm_allocation_granularity(); |
|
412 |
const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity)); |
|
413 |
||
414 |
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : |
|
415 |
MAX2(page_sz, granularity); |
|
252
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
1
diff
changeset
|
416 |
ReservedSpace rs(bytes, rs_align, rs_align > 0); |
1 | 417 |
os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), |
418 |
rs.size()); |
|
419 |
PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); |
|
420 |
if (vspace != 0) { |
|
421 |
if (vspace->expand_by(bytes)) { |
|
422 |
return vspace; |
|
423 |
} |
|
424 |
delete vspace; |
|
425 |
} |
|
426 |
||
427 |
return 0; |
|
428 |
} |
|
429 |
||
430 |
bool ParallelCompactData::initialize_chunk_data(size_t region_size) |
|
431 |
{ |
|
432 |
const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize; |
|
433 |
_chunk_vspace = create_vspace(count, sizeof(ChunkData)); |
|
434 |
if (_chunk_vspace != 0) { |
|
435 |
_chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr(); |
|
436 |
_chunk_count = count; |
|
437 |
return true; |
|
438 |
} |
|
439 |
return false; |
|
440 |
} |
|
441 |
||
442 |
bool ParallelCompactData::initialize_block_data(size_t region_size) |
|
443 |
{ |
|
444 |
const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize; |
|
445 |
_block_vspace = create_vspace(count, sizeof(BlockData)); |
|
446 |
if (_block_vspace != 0) { |
|
447 |
_block_data = (BlockData*)_block_vspace->reserved_low_addr(); |
|
448 |
_block_count = count; |
|
449 |
return true; |
|
450 |
} |
|
451 |
return false; |
|
452 |
} |
|
453 |
||
454 |
void ParallelCompactData::clear() |
|
455 |
{ |
|
456 |
if (_block_data) { |
|
457 |
memset(_block_data, 0, _block_vspace->committed_size()); |
|
458 |
} |
|
459 |
memset(_chunk_data, 0, _chunk_vspace->committed_size()); |
|
460 |
} |
|
461 |
||
462 |
void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) { |
|
463 |
assert(beg_chunk <= _chunk_count, "beg_chunk out of range"); |
|
464 |
assert(end_chunk <= _chunk_count, "end_chunk out of range"); |
|
465 |
assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize"); |
|
466 |
||
467 |
const size_t chunk_cnt = end_chunk - beg_chunk; |
|
468 |
||
469 |
if (_block_data) { |
|
470 |
const size_t blocks_per_chunk = ChunkSize / BlockSize; |
|
471 |
const size_t beg_block = beg_chunk * blocks_per_chunk; |
|
472 |
const size_t block_cnt = chunk_cnt * blocks_per_chunk; |
|
473 |
memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData)); |
|
474 |
} |
|
475 |
memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData)); |
|
476 |
} |
|
477 |
||
478 |
HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const |
|
479 |
{ |
|
480 |
const ChunkData* cur_cp = chunk(chunk_idx); |
|
481 |
const ChunkData* const end_cp = chunk(chunk_count() - 1); |
|
482 |
||
483 |
HeapWord* result = chunk_to_addr(chunk_idx); |
|
484 |
if (cur_cp < end_cp) { |
|
485 |
do { |
|
486 |
result += cur_cp->partial_obj_size(); |
|
487 |
} while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp); |
|
488 |
} |
|
489 |
return result; |
|
490 |
} |
|
491 |
||
492 |
void ParallelCompactData::add_obj(HeapWord* addr, size_t len) |
|
493 |
{ |
|
494 |
const size_t obj_ofs = pointer_delta(addr, _region_start); |
|
495 |
const size_t beg_chunk = obj_ofs >> Log2ChunkSize; |
|
496 |
const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize; |
|
497 |
||
498 |
DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);) |
|
499 |
DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);) |
|
500 |
||
501 |
if (beg_chunk == end_chunk) { |
|
502 |
// All in one chunk. |
|
503 |
_chunk_data[beg_chunk].add_live_obj(len); |
|
504 |
return; |
|
505 |
} |
|
506 |
||
507 |
// First chunk. |
|
508 |
const size_t beg_ofs = chunk_offset(addr); |
|
509 |
_chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs); |
|
510 |
||
511 |
klassOop klass = ((oop)addr)->klass(); |
|
512 |
// Middle chunks--completely spanned by this object. |
|
513 |
for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) { |
|
514 |
_chunk_data[chunk].set_partial_obj_size(ChunkSize); |
|
515 |
_chunk_data[chunk].set_partial_obj_addr(addr); |
|
516 |
} |
|
517 |
||
518 |
// Last chunk. |
|
519 |
const size_t end_ofs = chunk_offset(addr + len - 1); |
|
520 |
_chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1); |
|
521 |
_chunk_data[end_chunk].set_partial_obj_addr(addr); |
|
522 |
} |
|
523 |
||
524 |
void |
|
525 |
ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) |
|
526 |
{ |
|
527 |
assert(chunk_offset(beg) == 0, "not ChunkSize aligned"); |
|
528 |
assert(chunk_offset(end) == 0, "not ChunkSize aligned"); |
|
529 |
||
530 |
size_t cur_chunk = addr_to_chunk_idx(beg); |
|
531 |
const size_t end_chunk = addr_to_chunk_idx(end); |
|
532 |
HeapWord* addr = beg; |
|
533 |
while (cur_chunk < end_chunk) { |
|
534 |
_chunk_data[cur_chunk].set_destination(addr); |
|
535 |
_chunk_data[cur_chunk].set_destination_count(0); |
|
536 |
_chunk_data[cur_chunk].set_source_chunk(cur_chunk); |
|
537 |
_chunk_data[cur_chunk].set_data_location(addr); |
|
538 |
||
539 |
// Update live_obj_size so the chunk appears completely full. |
|
540 |
size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size(); |
|
541 |
_chunk_data[cur_chunk].set_live_obj_size(live_size); |
|
542 |
||
543 |
++cur_chunk; |
|
544 |
addr += ChunkSize; |
|
545 |
} |
|
546 |
} |
|
547 |
||
548 |
bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end, |
|
549 |
HeapWord* source_beg, HeapWord* source_end, |
|
550 |
HeapWord** target_next, |
|
551 |
HeapWord** source_next) { |
|
552 |
// This is too strict. |
|
553 |
// assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned"); |
|
554 |
||
555 |
if (TraceParallelOldGCSummaryPhase) { |
|
556 |
tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " " |
|
557 |
"sb=" PTR_FORMAT " se=" PTR_FORMAT " " |
|
558 |
"tn=" PTR_FORMAT " sn=" PTR_FORMAT, |
|
559 |
target_beg, target_end, |
|
560 |
source_beg, source_end, |
|
561 |
target_next != 0 ? *target_next : (HeapWord*) 0, |
|
562 |
source_next != 0 ? *source_next : (HeapWord*) 0); |
|
563 |
} |
|
564 |
||
565 |
size_t cur_chunk = addr_to_chunk_idx(source_beg); |
|
566 |
const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end)); |
|
567 |
||
568 |
HeapWord *dest_addr = target_beg; |
|
569 |
while (cur_chunk < end_chunk) { |
|
570 |
size_t words = _chunk_data[cur_chunk].data_size(); |
|
571 |
||
572 |
#if 1 |
|
573 |
assert(pointer_delta(target_end, dest_addr) >= words, |
|
574 |
"source region does not fit into target region"); |
|
575 |
#else |
|
576 |
// XXX - need some work on the corner cases here. If the chunk does not |
|
577 |
// fit, then must either make sure any partial_obj from the chunk fits, or |
|
578 |
// 'undo' the initial part of the partial_obj that is in the previous chunk. |
|
579 |
if (dest_addr + words >= target_end) { |
|
580 |
// Let the caller know where to continue. |
|
581 |
*target_next = dest_addr; |
|
582 |
*source_next = chunk_to_addr(cur_chunk); |
|
583 |
return false; |
|
584 |
} |
|
585 |
#endif // #if 1 |
|
586 |
||
587 |
_chunk_data[cur_chunk].set_destination(dest_addr); |
|
588 |
||
589 |
// Set the destination_count for cur_chunk, and if necessary, update |
|
590 |
// source_chunk for a destination chunk. The source_chunk field is updated |
|
591 |
// if cur_chunk is the first (left-most) chunk to be copied to a destination |
|
592 |
// chunk. |
|
593 |
// |
|
594 |
// The destination_count calculation is a bit subtle. A chunk that has data |
|
595 |
// that compacts into itself does not count itself as a destination. This |
|
596 |
// maintains the invariant that a zero count means the chunk is available |
|
597 |
// and can be claimed and then filled. |
|
598 |
if (words > 0) { |
|
599 |
HeapWord* const last_addr = dest_addr + words - 1; |
|
600 |
const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr); |
|
601 |
const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr); |
|
602 |
#if 0 |
|
603 |
// Initially assume that the destination chunks will be the same and |
|
604 |
// adjust the value below if necessary. Under this assumption, if |
|
605 |
// cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely |
|
606 |
// into itself. |
|
607 |
uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1; |
|
608 |
if (dest_chunk_1 != dest_chunk_2) { |
|
609 |
// Destination chunks differ; adjust destination_count. |
|
610 |
destination_count += 1; |
|
611 |
// Data from cur_chunk will be copied to the start of dest_chunk_2. |
|
612 |
_chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); |
|
613 |
} else if (chunk_offset(dest_addr) == 0) { |
|
614 |
// Data from cur_chunk will be copied to the start of the destination |
|
615 |
// chunk. |
|
616 |
_chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); |
|
617 |
} |
|
618 |
#else |
|
619 |
// Initially assume that the destination chunks will be different and |
|
620 |
// adjust the value below if necessary. Under this assumption, if |
|
621 |
// cur_chunk == dest_chunk2, then cur_chunk will be compacted partially |
|
622 |
// into dest_chunk_1 and partially into itself. |
|
623 |
uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2; |
|
624 |
if (dest_chunk_1 != dest_chunk_2) { |
|
625 |
// Data from cur_chunk will be copied to the start of dest_chunk_2. |
|
626 |
_chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); |
|
627 |
} else { |
|
628 |
// Destination chunks are the same; adjust destination_count. |
|
629 |
destination_count -= 1; |
|
630 |
if (chunk_offset(dest_addr) == 0) { |
|
631 |
// Data from cur_chunk will be copied to the start of the destination |
|
632 |
// chunk. |
|
633 |
_chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); |
|
634 |
} |
|
635 |
} |
|
636 |
#endif // #if 0 |
|
637 |
||
638 |
_chunk_data[cur_chunk].set_destination_count(destination_count); |
|
639 |
_chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk)); |
|
640 |
dest_addr += words; |
|
641 |
} |
|
642 |
||
643 |
++cur_chunk; |
|
644 |
} |
|
645 |
||
646 |
*target_next = dest_addr; |
|
647 |
return true; |
|
648 |
} |
|
649 |
||
650 |
bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) { |
|
651 |
HeapWord* block_addr = block_to_addr(block_index); |
|
652 |
HeapWord* block_end_addr = block_addr + BlockSize; |
|
653 |
size_t chunk_index = addr_to_chunk_idx(block_addr); |
|
654 |
HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index); |
|
655 |
||
656 |
// An object that ends at the end of the block, ends |
|
657 |
// in the block (the last word of the object is to |
|
658 |
// the left of the end). |
|
659 |
if ((block_addr < partial_obj_end_addr) && |
|
660 |
(partial_obj_end_addr <= block_end_addr)) { |
|
661 |
return true; |
|
662 |
} |
|
663 |
||
664 |
return false; |
|
665 |
} |
|
666 |
||
667 |
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { |
|
668 |
HeapWord* result = NULL; |
|
669 |
if (UseParallelOldGCChunkPointerCalc) { |
|
670 |
result = chunk_calc_new_pointer(addr); |
|
671 |
} else { |
|
672 |
result = block_calc_new_pointer(addr); |
|
673 |
} |
|
674 |
return result; |
|
675 |
} |
|
676 |
||
677 |
// This method is overly complicated (expensive) to be called |
|
678 |
// for every reference. |
|
679 |
// Try to restructure this so that a NULL is returned if |
|
680 |
// the object is dead. But don't wast the cycles to explicitly check |
|
681 |
// that it is dead since only live objects should be passed in. |
|
682 |
||
683 |
HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) { |
|
684 |
assert(addr != NULL, "Should detect NULL oop earlier"); |
|
685 |
assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); |
|
686 |
#ifdef ASSERT |
|
687 |
if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { |
|
688 |
gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); |
|
689 |
} |
|
690 |
#endif |
|
691 |
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); |
|
692 |
||
693 |
// Chunk covering the object. |
|
694 |
size_t chunk_index = addr_to_chunk_idx(addr); |
|
695 |
const ChunkData* const chunk_ptr = chunk(chunk_index); |
|
696 |
HeapWord* const chunk_addr = chunk_align_down(addr); |
|
697 |
||
698 |
assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); |
|
699 |
assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); |
|
700 |
||
701 |
HeapWord* result = chunk_ptr->destination(); |
|
702 |
||
703 |
// If all the data in the chunk is live, then the new location of the object |
|
704 |
// can be calculated from the destination of the chunk plus the offset of the |
|
705 |
// object in the chunk. |
|
706 |
if (chunk_ptr->data_size() == ChunkSize) { |
|
707 |
result += pointer_delta(addr, chunk_addr); |
|
708 |
return result; |
|
709 |
} |
|
710 |
||
711 |
// The new location of the object is |
|
712 |
// chunk destination + |
|
713 |
// size of the partial object extending onto the chunk + |
|
714 |
// sizes of the live objects in the Chunk that are to the left of addr |
|
715 |
const size_t partial_obj_size = chunk_ptr->partial_obj_size(); |
|
716 |
HeapWord* const search_start = chunk_addr + partial_obj_size; |
|
717 |
||
718 |
const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); |
|
719 |
size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); |
|
720 |
||
721 |
result += partial_obj_size + live_to_left; |
|
722 |
assert(result <= addr, "object cannot move to the right"); |
|
723 |
return result; |
|
724 |
} |
|
725 |
||
726 |
HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) { |
|
727 |
assert(addr != NULL, "Should detect NULL oop earlier"); |
|
728 |
assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); |
|
729 |
#ifdef ASSERT |
|
730 |
if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { |
|
731 |
gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); |
|
732 |
} |
|
733 |
#endif |
|
734 |
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); |
|
735 |
||
736 |
// Chunk covering the object. |
|
737 |
size_t chunk_index = addr_to_chunk_idx(addr); |
|
738 |
const ChunkData* const chunk_ptr = chunk(chunk_index); |
|
739 |
HeapWord* const chunk_addr = chunk_align_down(addr); |
|
740 |
||
741 |
assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); |
|
742 |
assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); |
|
743 |
||
744 |
HeapWord* result = chunk_ptr->destination(); |
|
745 |
||
746 |
// If all the data in the chunk is live, then the new location of the object |
|
747 |
// can be calculated from the destination of the chunk plus the offset of the |
|
748 |
// object in the chunk. |
|
749 |
if (chunk_ptr->data_size() == ChunkSize) { |
|
750 |
result += pointer_delta(addr, chunk_addr); |
|
751 |
return result; |
|
752 |
} |
|
753 |
||
754 |
// The new location of the object is |
|
755 |
// chunk destination + |
|
756 |
// block offset + |
|
757 |
// sizes of the live objects in the Block that are to the left of addr |
|
758 |
const size_t block_offset = addr_to_block_ptr(addr)->offset(); |
|
759 |
HeapWord* const search_start = chunk_addr + block_offset; |
|
760 |
||
761 |
const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); |
|
762 |
size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); |
|
763 |
||
764 |
result += block_offset + live_to_left; |
|
765 |
assert(result <= addr, "object cannot move to the right"); |
|
766 |
assert(result == chunk_calc_new_pointer(addr), "Should match"); |
|
767 |
return result; |
|
768 |
} |
|
769 |
||
770 |
klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) { |
|
771 |
klassOop updated_klass; |
|
772 |
if (PSParallelCompact::should_update_klass(old_klass)) { |
|
773 |
updated_klass = (klassOop) calc_new_pointer(old_klass); |
|
774 |
} else { |
|
775 |
updated_klass = old_klass; |
|
776 |
} |
|
777 |
||
778 |
return updated_klass; |
|
779 |
} |
|
780 |
||
781 |
#ifdef ASSERT |
|
782 |
void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace) |
|
783 |
{ |
|
784 |
const size_t* const beg = (const size_t*)vspace->committed_low_addr(); |
|
785 |
const size_t* const end = (const size_t*)vspace->committed_high_addr(); |
|
786 |
for (const size_t* p = beg; p < end; ++p) { |
|
787 |
assert(*p == 0, "not zero"); |
|
788 |
} |
|
789 |
} |
|
790 |
||
791 |
void ParallelCompactData::verify_clear() |
|
792 |
{ |
|
793 |
verify_clear(_chunk_vspace); |
|
794 |
verify_clear(_block_vspace); |
|
795 |
} |
|
796 |
#endif // #ifdef ASSERT |
|
797 |
||
798 |
#ifdef NOT_PRODUCT |
|
799 |
ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) { |
|
800 |
ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
801 |
return sd.chunk(chunk_index); |
|
802 |
} |
|
803 |
#endif |
|
804 |
||
805 |
elapsedTimer PSParallelCompact::_accumulated_time; |
|
806 |
unsigned int PSParallelCompact::_total_invocations = 0; |
|
807 |
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; |
|
808 |
jlong PSParallelCompact::_time_of_last_gc = 0; |
|
809 |
CollectorCounters* PSParallelCompact::_counters = NULL; |
|
810 |
ParMarkBitMap PSParallelCompact::_mark_bitmap; |
|
811 |
ParallelCompactData PSParallelCompact::_summary_data; |
|
812 |
||
813 |
PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
814 |
|
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
815 |
void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
816 |
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
817 |
|
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
818 |
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
819 |
void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
820 |
|
1 | 821 |
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); |
822 |
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); |
|
823 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
824 |
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
825 |
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
826 |
|
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
827 |
void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
828 |
|
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
829 |
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
830 |
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } |
1 | 831 |
|
832 |
void PSParallelCompact::post_initialize() { |
|
833 |
ParallelScavengeHeap* heap = gc_heap(); |
|
834 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
835 |
||
836 |
MemRegion mr = heap->reserved_region(); |
|
837 |
_ref_processor = ReferenceProcessor::create_ref_processor( |
|
838 |
mr, // span |
|
839 |
true, // atomic_discovery |
|
840 |
true, // mt_discovery |
|
841 |
&_is_alive_closure, |
|
842 |
ParallelGCThreads, |
|
843 |
ParallelRefProcEnabled); |
|
844 |
_counters = new CollectorCounters("PSParallelCompact", 1); |
|
845 |
||
846 |
// Initialize static fields in ParCompactionManager. |
|
847 |
ParCompactionManager::initialize(mark_bitmap()); |
|
848 |
} |
|
849 |
||
850 |
bool PSParallelCompact::initialize() { |
|
851 |
ParallelScavengeHeap* heap = gc_heap(); |
|
852 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
853 |
MemRegion mr = heap->reserved_region(); |
|
854 |
||
855 |
// Was the old gen get allocated successfully? |
|
856 |
if (!heap->old_gen()->is_allocated()) { |
|
857 |
return false; |
|
858 |
} |
|
859 |
||
860 |
initialize_space_info(); |
|
861 |
initialize_dead_wood_limiter(); |
|
862 |
||
863 |
if (!_mark_bitmap.initialize(mr)) { |
|
864 |
vm_shutdown_during_initialization("Unable to allocate bit map for " |
|
865 |
"parallel garbage collection for the requested heap size."); |
|
866 |
return false; |
|
867 |
} |
|
868 |
||
869 |
if (!_summary_data.initialize(mr)) { |
|
870 |
vm_shutdown_during_initialization("Unable to allocate tables for " |
|
871 |
"parallel garbage collection for the requested heap size."); |
|
872 |
return false; |
|
873 |
} |
|
874 |
||
875 |
return true; |
|
876 |
} |
|
877 |
||
878 |
void PSParallelCompact::initialize_space_info() |
|
879 |
{ |
|
880 |
memset(&_space_info, 0, sizeof(_space_info)); |
|
881 |
||
882 |
ParallelScavengeHeap* heap = gc_heap(); |
|
883 |
PSYoungGen* young_gen = heap->young_gen(); |
|
884 |
MutableSpace* perm_space = heap->perm_gen()->object_space(); |
|
885 |
||
886 |
_space_info[perm_space_id].set_space(perm_space); |
|
887 |
_space_info[old_space_id].set_space(heap->old_gen()->object_space()); |
|
888 |
_space_info[eden_space_id].set_space(young_gen->eden_space()); |
|
889 |
_space_info[from_space_id].set_space(young_gen->from_space()); |
|
890 |
_space_info[to_space_id].set_space(young_gen->to_space()); |
|
891 |
||
892 |
_space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array()); |
|
893 |
_space_info[old_space_id].set_start_array(heap->old_gen()->start_array()); |
|
894 |
||
895 |
_space_info[perm_space_id].set_min_dense_prefix(perm_space->top()); |
|
896 |
if (TraceParallelOldGCDensePrefix) { |
|
897 |
tty->print_cr("perm min_dense_prefix=" PTR_FORMAT, |
|
898 |
_space_info[perm_space_id].min_dense_prefix()); |
|
899 |
} |
|
900 |
} |
|
901 |
||
902 |
void PSParallelCompact::initialize_dead_wood_limiter() |
|
903 |
{ |
|
904 |
const size_t max = 100; |
|
905 |
_dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0; |
|
906 |
_dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0; |
|
907 |
_dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev); |
|
908 |
DEBUG_ONLY(_dwl_initialized = true;) |
|
909 |
_dwl_adjustment = normal_distribution(1.0); |
|
910 |
} |
|
911 |
||
912 |
// Simple class for storing info about the heap at the start of GC, to be used |
|
913 |
// after GC for comparison/printing. |
|
914 |
class PreGCValues { |
|
915 |
public: |
|
916 |
PreGCValues() { } |
|
917 |
PreGCValues(ParallelScavengeHeap* heap) { fill(heap); } |
|
918 |
||
919 |
void fill(ParallelScavengeHeap* heap) { |
|
920 |
_heap_used = heap->used(); |
|
921 |
_young_gen_used = heap->young_gen()->used_in_bytes(); |
|
922 |
_old_gen_used = heap->old_gen()->used_in_bytes(); |
|
923 |
_perm_gen_used = heap->perm_gen()->used_in_bytes(); |
|
924 |
}; |
|
925 |
||
926 |
size_t heap_used() const { return _heap_used; } |
|
927 |
size_t young_gen_used() const { return _young_gen_used; } |
|
928 |
size_t old_gen_used() const { return _old_gen_used; } |
|
929 |
size_t perm_gen_used() const { return _perm_gen_used; } |
|
930 |
||
931 |
private: |
|
932 |
size_t _heap_used; |
|
933 |
size_t _young_gen_used; |
|
934 |
size_t _old_gen_used; |
|
935 |
size_t _perm_gen_used; |
|
936 |
}; |
|
937 |
||
938 |
void |
|
939 |
PSParallelCompact::clear_data_covering_space(SpaceId id) |
|
940 |
{ |
|
941 |
// At this point, top is the value before GC, new_top() is the value that will |
|
942 |
// be set at the end of GC. The marking bitmap is cleared to top; nothing |
|
943 |
// should be marked above top. The summary data is cleared to the larger of |
|
944 |
// top & new_top. |
|
945 |
MutableSpace* const space = _space_info[id].space(); |
|
946 |
HeapWord* const bot = space->bottom(); |
|
947 |
HeapWord* const top = space->top(); |
|
948 |
HeapWord* const max_top = MAX2(top, _space_info[id].new_top()); |
|
949 |
||
950 |
const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot); |
|
951 |
const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top)); |
|
952 |
_mark_bitmap.clear_range(beg_bit, end_bit); |
|
953 |
||
954 |
const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot); |
|
955 |
const size_t end_chunk = |
|
956 |
_summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top)); |
|
957 |
_summary_data.clear_range(beg_chunk, end_chunk); |
|
958 |
} |
|
959 |
||
960 |
void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) |
|
961 |
{ |
|
962 |
// Update the from & to space pointers in space_info, since they are swapped |
|
963 |
// at each young gen gc. Do the update unconditionally (even though a |
|
964 |
// promotion failure does not swap spaces) because an unknown number of minor |
|
965 |
// collections will have swapped the spaces an unknown number of times. |
|
966 |
TraceTime tm("pre compact", print_phases(), true, gclog_or_tty); |
|
967 |
ParallelScavengeHeap* heap = gc_heap(); |
|
968 |
_space_info[from_space_id].set_space(heap->young_gen()->from_space()); |
|
969 |
_space_info[to_space_id].set_space(heap->young_gen()->to_space()); |
|
970 |
||
971 |
pre_gc_values->fill(heap); |
|
972 |
||
973 |
ParCompactionManager::reset(); |
|
974 |
NOT_PRODUCT(_mark_bitmap.reset_counters()); |
|
975 |
DEBUG_ONLY(add_obj_count = add_obj_size = 0;) |
|
976 |
DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) |
|
977 |
||
978 |
// Increment the invocation count |
|
979 |
heap->increment_total_collections(); |
|
980 |
||
981 |
// We need to track unique mark sweep invocations as well. |
|
982 |
_total_invocations++; |
|
983 |
||
984 |
if (PrintHeapAtGC) { |
|
985 |
Universe::print_heap_before_gc(); |
|
986 |
} |
|
987 |
||
988 |
// Fill in TLABs |
|
989 |
heap->accumulate_statistics_all_tlabs(); |
|
990 |
heap->ensure_parsability(true); // retire TLABs |
|
991 |
||
992 |
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { |
|
993 |
HandleMark hm; // Discard invalid handles created during verification |
|
994 |
gclog_or_tty->print(" VerifyBeforeGC:"); |
|
995 |
Universe::verify(true); |
|
996 |
} |
|
997 |
||
998 |
// Verify object start arrays |
|
999 |
if (VerifyObjectStartArray && |
|
1000 |
VerifyBeforeGC) { |
|
1001 |
heap->old_gen()->verify_object_start_array(); |
|
1002 |
heap->perm_gen()->verify_object_start_array(); |
|
1003 |
} |
|
1004 |
||
1005 |
DEBUG_ONLY(mark_bitmap()->verify_clear();) |
|
1006 |
DEBUG_ONLY(summary_data().verify_clear();) |
|
1007 |
} |
|
1008 |
||
1009 |
void PSParallelCompact::post_compact() |
|
1010 |
{ |
|
1011 |
TraceTime tm("post compact", print_phases(), true, gclog_or_tty); |
|
1012 |
||
1013 |
// Clear the marking bitmap and summary data and update top() in each space. |
|
1014 |
for (unsigned int id = perm_space_id; id < last_space_id; ++id) { |
|
1015 |
clear_data_covering_space(SpaceId(id)); |
|
1016 |
_space_info[id].space()->set_top(_space_info[id].new_top()); |
|
1017 |
} |
|
1018 |
||
1019 |
MutableSpace* const eden_space = _space_info[eden_space_id].space(); |
|
1020 |
MutableSpace* const from_space = _space_info[from_space_id].space(); |
|
1021 |
MutableSpace* const to_space = _space_info[to_space_id].space(); |
|
1022 |
||
1023 |
ParallelScavengeHeap* heap = gc_heap(); |
|
1024 |
bool eden_empty = eden_space->is_empty(); |
|
1025 |
if (!eden_empty) { |
|
1026 |
eden_empty = absorb_live_data_from_eden(heap->size_policy(), |
|
1027 |
heap->young_gen(), heap->old_gen()); |
|
1028 |
} |
|
1029 |
||
1030 |
// Update heap occupancy information which is used as input to the soft ref |
|
1031 |
// clearing policy at the next gc. |
|
1032 |
Universe::update_heap_info_at_gc(); |
|
1033 |
||
1034 |
bool young_gen_empty = eden_empty && from_space->is_empty() && |
|
1035 |
to_space->is_empty(); |
|
1036 |
||
1037 |
BarrierSet* bs = heap->barrier_set(); |
|
1038 |
if (bs->is_a(BarrierSet::ModRef)) { |
|
1039 |
ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; |
|
1040 |
MemRegion old_mr = heap->old_gen()->reserved(); |
|
1041 |
MemRegion perm_mr = heap->perm_gen()->reserved(); |
|
1042 |
assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); |
|
1043 |
||
1044 |
if (young_gen_empty) { |
|
1045 |
modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); |
|
1046 |
} else { |
|
1047 |
modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); |
|
1048 |
} |
|
1049 |
} |
|
1050 |
||
1051 |
Threads::gc_epilogue(); |
|
1052 |
CodeCache::gc_epilogue(); |
|
1053 |
||
1054 |
COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
|
1055 |
||
1056 |
ref_processor()->enqueue_discovered_references(NULL); |
|
1057 |
||
1058 |
// Update time of last GC |
|
1059 |
reset_millis_since_last_gc(); |
|
1060 |
} |
|
1061 |
||
1062 |
HeapWord* |
|
1063 |
PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id, |
|
1064 |
bool maximum_compaction) |
|
1065 |
{ |
|
1066 |
const size_t chunk_size = ParallelCompactData::ChunkSize; |
|
1067 |
const ParallelCompactData& sd = summary_data(); |
|
1068 |
||
1069 |
const MutableSpace* const space = _space_info[id].space(); |
|
1070 |
HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); |
|
1071 |
const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom()); |
|
1072 |
const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up); |
|
1073 |
||
1074 |
// Skip full chunks at the beginning of the space--they are necessarily part |
|
1075 |
// of the dense prefix. |
|
1076 |
size_t full_count = 0; |
|
1077 |
const ChunkData* cp; |
|
1078 |
for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) { |
|
1079 |
++full_count; |
|
1080 |
} |
|
1081 |
||
1082 |
assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); |
|
1083 |
const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; |
|
1084 |
const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval; |
|
1085 |
if (maximum_compaction || cp == end_cp || interval_ended) { |
|
1086 |
_maximum_compaction_gc_num = total_invocations(); |
|
1087 |
return sd.chunk_to_addr(cp); |
|
1088 |
} |
|
1089 |
||
1090 |
HeapWord* const new_top = _space_info[id].new_top(); |
|
1091 |
const size_t space_live = pointer_delta(new_top, space->bottom()); |
|
1092 |
const size_t space_used = space->used_in_words(); |
|
1093 |
const size_t space_capacity = space->capacity_in_words(); |
|
1094 |
||
1095 |
const double cur_density = double(space_live) / space_capacity; |
|
1096 |
const double deadwood_density = |
|
1097 |
(1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density; |
|
1098 |
const size_t deadwood_goal = size_t(space_capacity * deadwood_density); |
|
1099 |
||
1100 |
if (TraceParallelOldGCDensePrefix) { |
|
1101 |
tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT, |
|
1102 |
cur_density, deadwood_density, deadwood_goal); |
|
1103 |
tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " " |
|
1104 |
"space_cap=" SIZE_FORMAT, |
|
1105 |
space_live, space_used, |
|
1106 |
space_capacity); |
|
1107 |
} |
|
1108 |
||
1109 |
// XXX - Use binary search? |
|
1110 |
HeapWord* dense_prefix = sd.chunk_to_addr(cp); |
|
1111 |
const ChunkData* full_cp = cp; |
|
1112 |
const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1); |
|
1113 |
while (cp < end_cp) { |
|
1114 |
HeapWord* chunk_destination = cp->destination(); |
|
1115 |
const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination); |
|
1116 |
if (TraceParallelOldGCDensePrefix && Verbose) { |
|
1117 |
tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " " |
|
1118 |
"dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"), |
|
1119 |
sd.chunk(cp), chunk_destination, |
|
1120 |
dense_prefix, cur_deadwood); |
|
1121 |
} |
|
1122 |
||
1123 |
if (cur_deadwood >= deadwood_goal) { |
|
1124 |
// Found the chunk that has the correct amount of deadwood to the left. |
|
1125 |
// This typically occurs after crossing a fairly sparse set of chunks, so |
|
1126 |
// iterate backwards over those sparse chunks, looking for the chunk that |
|
1127 |
// has the lowest density of live objects 'to the right.' |
|
1128 |
size_t space_to_left = sd.chunk(cp) * chunk_size; |
|
1129 |
size_t live_to_left = space_to_left - cur_deadwood; |
|
1130 |
size_t space_to_right = space_capacity - space_to_left; |
|
1131 |
size_t live_to_right = space_live - live_to_left; |
|
1132 |
double density_to_right = double(live_to_right) / space_to_right; |
|
1133 |
while (cp > full_cp) { |
|
1134 |
--cp; |
|
1135 |
const size_t prev_chunk_live_to_right = live_to_right - cp->data_size(); |
|
1136 |
const size_t prev_chunk_space_to_right = space_to_right + chunk_size; |
|
1137 |
double prev_chunk_density_to_right = |
|
1138 |
double(prev_chunk_live_to_right) / prev_chunk_space_to_right; |
|
1139 |
if (density_to_right <= prev_chunk_density_to_right) { |
|
1140 |
return dense_prefix; |
|
1141 |
} |
|
1142 |
if (TraceParallelOldGCDensePrefix && Verbose) { |
|
1143 |
tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f " |
|
1144 |
"pc_d2r=%10.8f", sd.chunk(cp), density_to_right, |
|
1145 |
prev_chunk_density_to_right); |
|
1146 |
} |
|
1147 |
dense_prefix -= chunk_size; |
|
1148 |
live_to_right = prev_chunk_live_to_right; |
|
1149 |
space_to_right = prev_chunk_space_to_right; |
|
1150 |
density_to_right = prev_chunk_density_to_right; |
|
1151 |
} |
|
1152 |
return dense_prefix; |
|
1153 |
} |
|
1154 |
||
1155 |
dense_prefix += chunk_size; |
|
1156 |
++cp; |
|
1157 |
} |
|
1158 |
||
1159 |
return dense_prefix; |
|
1160 |
} |
|
1161 |
||
1162 |
#ifndef PRODUCT |
|
1163 |
void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm, |
|
1164 |
const SpaceId id, |
|
1165 |
const bool maximum_compaction, |
|
1166 |
HeapWord* const addr) |
|
1167 |
{ |
|
1168 |
const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr); |
|
1169 |
ChunkData* const cp = summary_data().chunk(chunk_idx); |
|
1170 |
const MutableSpace* const space = _space_info[id].space(); |
|
1171 |
HeapWord* const new_top = _space_info[id].new_top(); |
|
1172 |
||
1173 |
const size_t space_live = pointer_delta(new_top, space->bottom()); |
|
1174 |
const size_t dead_to_left = pointer_delta(addr, cp->destination()); |
|
1175 |
const size_t space_cap = space->capacity_in_words(); |
|
1176 |
const double dead_to_left_pct = double(dead_to_left) / space_cap; |
|
1177 |
const size_t live_to_right = new_top - cp->destination(); |
|
1178 |
const size_t dead_to_right = space->top() - addr - live_to_right; |
|
1179 |
||
1180 |
tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " " |
|
1181 |
"spl=" SIZE_FORMAT " " |
|
1182 |
"d2l=" SIZE_FORMAT " d2l%%=%6.4f " |
|
1183 |
"d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT |
|
1184 |
" ratio=%10.8f", |
|
1185 |
algorithm, addr, chunk_idx, |
|
1186 |
space_live, |
|
1187 |
dead_to_left, dead_to_left_pct, |
|
1188 |
dead_to_right, live_to_right, |
|
1189 |
double(dead_to_right) / live_to_right); |
|
1190 |
} |
|
1191 |
#endif // #ifndef PRODUCT |
|
1192 |
||
1193 |
// Return a fraction indicating how much of the generation can be treated as |
|
1194 |
// "dead wood" (i.e., not reclaimed). The function uses a normal distribution |
|
1195 |
// based on the density of live objects in the generation to determine a limit, |
|
1196 |
// which is then adjusted so the return value is min_percent when the density is |
|
1197 |
// 1. |
|
1198 |
// |
|
1199 |
// The following table shows some return values for a different values of the |
|
1200 |
// standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and |
|
1201 |
// min_percent is 1. |
|
1202 |
// |
|
1203 |
// fraction allowed as dead wood |
|
1204 |
// ----------------------------------------------------------------- |
|
1205 |
// density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95 |
|
1206 |
// ------- ---------- ---------- ---------- ---------- ---------- ---------- |
|
1207 |
// 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 |
|
1208 |
// 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941 |
|
1209 |
// 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272 |
|
1210 |
// 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066 |
|
1211 |
// 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975 |
|
1212 |
// 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313 |
|
1213 |
// 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132 |
|
1214 |
// 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289 |
|
1215 |
// 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500 |
|
1216 |
// 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386 |
|
1217 |
// 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510 |
|
1218 |
// 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386 |
|
1219 |
// 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500 |
|
1220 |
// 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289 |
|
1221 |
// 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132 |
|
1222 |
// 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313 |
|
1223 |
// 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975 |
|
1224 |
// 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066 |
|
1225 |
// 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272 |
|
1226 |
// 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941 |
|
1227 |
// 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 |
|
1228 |
||
1229 |
double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent) |
|
1230 |
{ |
|
1231 |
assert(_dwl_initialized, "uninitialized"); |
|
1232 |
||
1233 |
// The raw limit is the value of the normal distribution at x = density. |
|
1234 |
const double raw_limit = normal_distribution(density); |
|
1235 |
||
1236 |
// Adjust the raw limit so it becomes the minimum when the density is 1. |
|
1237 |
// |
|
1238 |
// First subtract the adjustment value (which is simply the precomputed value |
|
1239 |
// normal_distribution(1.0)); this yields a value of 0 when the density is 1. |
|
1240 |
// Then add the minimum value, so the minimum is returned when the density is |
|
1241 |
// 1. Finally, prevent negative values, which occur when the mean is not 0.5. |
|
1242 |
const double min = double(min_percent) / 100.0; |
|
1243 |
const double limit = raw_limit - _dwl_adjustment + min; |
|
1244 |
return MAX2(limit, 0.0); |
|
1245 |
} |
|
1246 |
||
1247 |
ParallelCompactData::ChunkData* |
|
1248 |
PSParallelCompact::first_dead_space_chunk(const ChunkData* beg, |
|
1249 |
const ChunkData* end) |
|
1250 |
{ |
|
1251 |
const size_t chunk_size = ParallelCompactData::ChunkSize; |
|
1252 |
ParallelCompactData& sd = summary_data(); |
|
1253 |
size_t left = sd.chunk(beg); |
|
1254 |
size_t right = end > beg ? sd.chunk(end) - 1 : left; |
|
1255 |
||
1256 |
// Binary search. |
|
1257 |
while (left < right) { |
|
1258 |
// Equivalent to (left + right) / 2, but does not overflow. |
|
1259 |
const size_t middle = left + (right - left) / 2; |
|
1260 |
ChunkData* const middle_ptr = sd.chunk(middle); |
|
1261 |
HeapWord* const dest = middle_ptr->destination(); |
|
1262 |
HeapWord* const addr = sd.chunk_to_addr(middle); |
|
1263 |
assert(dest != NULL, "sanity"); |
|
1264 |
assert(dest <= addr, "must move left"); |
|
1265 |
||
1266 |
if (middle > left && dest < addr) { |
|
1267 |
right = middle - 1; |
|
1268 |
} else if (middle < right && middle_ptr->data_size() == chunk_size) { |
|
1269 |
left = middle + 1; |
|
1270 |
} else { |
|
1271 |
return middle_ptr; |
|
1272 |
} |
|
1273 |
} |
|
1274 |
return sd.chunk(left); |
|
1275 |
} |
|
1276 |
||
1277 |
ParallelCompactData::ChunkData* |
|
1278 |
PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg, |
|
1279 |
const ChunkData* end, |
|
1280 |
size_t dead_words) |
|
1281 |
{ |
|
1282 |
ParallelCompactData& sd = summary_data(); |
|
1283 |
size_t left = sd.chunk(beg); |
|
1284 |
size_t right = end > beg ? sd.chunk(end) - 1 : left; |
|
1285 |
||
1286 |
// Binary search. |
|
1287 |
while (left < right) { |
|
1288 |
// Equivalent to (left + right) / 2, but does not overflow. |
|
1289 |
const size_t middle = left + (right - left) / 2; |
|
1290 |
ChunkData* const middle_ptr = sd.chunk(middle); |
|
1291 |
HeapWord* const dest = middle_ptr->destination(); |
|
1292 |
HeapWord* const addr = sd.chunk_to_addr(middle); |
|
1293 |
assert(dest != NULL, "sanity"); |
|
1294 |
assert(dest <= addr, "must move left"); |
|
1295 |
||
1296 |
const size_t dead_to_left = pointer_delta(addr, dest); |
|
1297 |
if (middle > left && dead_to_left > dead_words) { |
|
1298 |
right = middle - 1; |
|
1299 |
} else if (middle < right && dead_to_left < dead_words) { |
|
1300 |
left = middle + 1; |
|
1301 |
} else { |
|
1302 |
return middle_ptr; |
|
1303 |
} |
|
1304 |
} |
|
1305 |
return sd.chunk(left); |
|
1306 |
} |
|
1307 |
||
1308 |
// The result is valid during the summary phase, after the initial summarization |
|
1309 |
// of each space into itself, and before final summarization. |
|
1310 |
inline double |
|
1311 |
PSParallelCompact::reclaimed_ratio(const ChunkData* const cp, |
|
1312 |
HeapWord* const bottom, |
|
1313 |
HeapWord* const top, |
|
1314 |
HeapWord* const new_top) |
|
1315 |
{ |
|
1316 |
ParallelCompactData& sd = summary_data(); |
|
1317 |
||
1318 |
assert(cp != NULL, "sanity"); |
|
1319 |
assert(bottom != NULL, "sanity"); |
|
1320 |
assert(top != NULL, "sanity"); |
|
1321 |
assert(new_top != NULL, "sanity"); |
|
1322 |
assert(top >= new_top, "summary data problem?"); |
|
1323 |
assert(new_top > bottom, "space is empty; should not be here"); |
|
1324 |
assert(new_top >= cp->destination(), "sanity"); |
|
1325 |
assert(top >= sd.chunk_to_addr(cp), "sanity"); |
|
1326 |
||
1327 |
HeapWord* const destination = cp->destination(); |
|
1328 |
const size_t dense_prefix_live = pointer_delta(destination, bottom); |
|
1329 |
const size_t compacted_region_live = pointer_delta(new_top, destination); |
|
1330 |
const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp)); |
|
1331 |
const size_t reclaimable = compacted_region_used - compacted_region_live; |
|
1332 |
||
1333 |
const double divisor = dense_prefix_live + 1.25 * compacted_region_live; |
|
1334 |
return double(reclaimable) / divisor; |
|
1335 |
} |
|
1336 |
||
1337 |
// Return the address of the end of the dense prefix, a.k.a. the start of the |
|
1338 |
// compacted region. The address is always on a chunk boundary. |
|
1339 |
// |
|
1340 |
// Completely full chunks at the left are skipped, since no compaction can occur |
|
1341 |
// in those chunks. Then the maximum amount of dead wood to allow is computed, |
|
1342 |
// based on the density (amount live / capacity) of the generation; the chunk |
|
1343 |
// with approximately that amount of dead space to the left is identified as the |
|
1344 |
// limit chunk. Chunks between the last completely full chunk and the limit |
|
1345 |
// chunk are scanned and the one that has the best (maximum) reclaimed_ratio() |
|
1346 |
// is selected. |
|
1347 |
HeapWord* |
|
1348 |
PSParallelCompact::compute_dense_prefix(const SpaceId id, |
|
1349 |
bool maximum_compaction) |
|
1350 |
{ |
|
1351 |
const size_t chunk_size = ParallelCompactData::ChunkSize; |
|
1352 |
const ParallelCompactData& sd = summary_data(); |
|
1353 |
||
1354 |
const MutableSpace* const space = _space_info[id].space(); |
|
1355 |
HeapWord* const top = space->top(); |
|
1356 |
HeapWord* const top_aligned_up = sd.chunk_align_up(top); |
|
1357 |
HeapWord* const new_top = _space_info[id].new_top(); |
|
1358 |
HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top); |
|
1359 |
HeapWord* const bottom = space->bottom(); |
|
1360 |
const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom); |
|
1361 |
const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); |
|
1362 |
const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up); |
|
1363 |
||
1364 |
// Skip full chunks at the beginning of the space--they are necessarily part |
|
1365 |
// of the dense prefix. |
|
1366 |
const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp); |
|
1367 |
assert(full_cp->destination() == sd.chunk_to_addr(full_cp) || |
|
1368 |
space->is_empty(), "no dead space allowed to the left"); |
|
1369 |
assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1, |
|
1370 |
"chunk must have dead space"); |
|
1371 |
||
1372 |
// The gc number is saved whenever a maximum compaction is done, and used to |
|
1373 |
// determine when the maximum compaction interval has expired. This avoids |
|
1374 |
// successive max compactions for different reasons. |
|
1375 |
assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); |
|
1376 |
const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; |
|
1377 |
const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval || |
|
1378 |
total_invocations() == HeapFirstMaximumCompactionCount; |
|
1379 |
if (maximum_compaction || full_cp == top_cp || interval_ended) { |
|
1380 |
_maximum_compaction_gc_num = total_invocations(); |
|
1381 |
return sd.chunk_to_addr(full_cp); |
|
1382 |
} |
|
1383 |
||
1384 |
const size_t space_live = pointer_delta(new_top, bottom); |
|
1385 |
const size_t space_used = space->used_in_words(); |
|
1386 |
const size_t space_capacity = space->capacity_in_words(); |
|
1387 |
||
1388 |
const double density = double(space_live) / double(space_capacity); |
|
1389 |
const size_t min_percent_free = |
|
1390 |
id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio; |
|
1391 |
const double limiter = dead_wood_limiter(density, min_percent_free); |
|
1392 |
const size_t dead_wood_max = space_used - space_live; |
|
1393 |
const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter), |
|
1394 |
dead_wood_max); |
|
1395 |
||
1396 |
if (TraceParallelOldGCDensePrefix) { |
|
1397 |
tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " " |
|
1398 |
"space_cap=" SIZE_FORMAT, |
|
1399 |
space_live, space_used, |
|
1400 |
space_capacity); |
|
1401 |
tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f " |
|
1402 |
"dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT, |
|
1403 |
density, min_percent_free, limiter, |
|
1404 |
dead_wood_max, dead_wood_limit); |
|
1405 |
} |
|
1406 |
||
1407 |
// Locate the chunk with the desired amount of dead space to the left. |
|
1408 |
const ChunkData* const limit_cp = |
|
1409 |
dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit); |
|
1410 |
||
1411 |
// Scan from the first chunk with dead space to the limit chunk and find the |
|
1412 |
// one with the best (largest) reclaimed ratio. |
|
1413 |
double best_ratio = 0.0; |
|
1414 |
const ChunkData* best_cp = full_cp; |
|
1415 |
for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) { |
|
1416 |
double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top); |
|
1417 |
if (tmp_ratio > best_ratio) { |
|
1418 |
best_cp = cp; |
|
1419 |
best_ratio = tmp_ratio; |
|
1420 |
} |
|
1421 |
} |
|
1422 |
||
1423 |
#if 0 |
|
1424 |
// Something to consider: if the chunk with the best ratio is 'close to' the |
|
1425 |
// first chunk w/free space, choose the first chunk with free space |
|
1426 |
// ("first-free"). The first-free chunk is usually near the start of the |
|
1427 |
// heap, which means we are copying most of the heap already, so copy a bit |
|
1428 |
// more to get complete compaction. |
|
1429 |
if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) { |
|
1430 |
_maximum_compaction_gc_num = total_invocations(); |
|
1431 |
best_cp = full_cp; |
|
1432 |
} |
|
1433 |
#endif // #if 0 |
|
1434 |
||
1435 |
return sd.chunk_to_addr(best_cp); |
|
1436 |
} |
|
1437 |
||
1438 |
void PSParallelCompact::summarize_spaces_quick() |
|
1439 |
{ |
|
1440 |
for (unsigned int i = 0; i < last_space_id; ++i) { |
|
1441 |
const MutableSpace* space = _space_info[i].space(); |
|
1442 |
bool result = _summary_data.summarize(space->bottom(), space->end(), |
|
1443 |
space->bottom(), space->top(), |
|
1444 |
_space_info[i].new_top_addr()); |
|
1445 |
assert(result, "should never fail"); |
|
1446 |
_space_info[i].set_dense_prefix(space->bottom()); |
|
1447 |
} |
|
1448 |
} |
|
1449 |
||
1450 |
void PSParallelCompact::fill_dense_prefix_end(SpaceId id) |
|
1451 |
{ |
|
1452 |
HeapWord* const dense_prefix_end = dense_prefix(id); |
|
1453 |
const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end); |
|
1454 |
const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end); |
|
1455 |
if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) { |
|
1456 |
// Only enough dead space is filled so that any remaining dead space to the |
|
1457 |
// left is larger than the minimum filler object. (The remainder is filled |
|
1458 |
// during the copy/update phase.) |
|
1459 |
// |
|
1460 |
// The size of the dead space to the right of the boundary is not a |
|
1461 |
// concern, since compaction will be able to use whatever space is |
|
1462 |
// available. |
|
1463 |
// |
|
1464 |
// Here '||' is the boundary, 'x' represents a don't care bit and a box |
|
1465 |
// surrounds the space to be filled with an object. |
|
1466 |
// |
|
1467 |
// In the 32-bit VM, each bit represents two 32-bit words: |
|
1468 |
// +---+ |
|
1469 |
// a) beg_bits: ... x x x | 0 | || 0 x x ... |
|
1470 |
// end_bits: ... x x x | 0 | || 0 x x ... |
|
1471 |
// +---+ |
|
1472 |
// |
|
1473 |
// In the 64-bit VM, each bit represents one 64-bit word: |
|
1474 |
// +------------+ |
|
1475 |
// b) beg_bits: ... x x x | 0 || 0 | x x ... |
|
1476 |
// end_bits: ... x x 1 | 0 || 0 | x x ... |
|
1477 |
// +------------+ |
|
1478 |
// +-------+ |
|
1479 |
// c) beg_bits: ... x x | 0 0 | || 0 x x ... |
|
1480 |
// end_bits: ... x 1 | 0 0 | || 0 x x ... |
|
1481 |
// +-------+ |
|
1482 |
// +-----------+ |
|
1483 |
// d) beg_bits: ... x | 0 0 0 | || 0 x x ... |
|
1484 |
// end_bits: ... 1 | 0 0 0 | || 0 x x ... |
|
1485 |
// +-----------+ |
|
1486 |
// +-------+ |
|
1487 |
// e) beg_bits: ... 0 0 | 0 0 | || 0 x x ... |
|
1488 |
// end_bits: ... 0 0 | 0 0 | || 0 x x ... |
|
1489 |
// +-------+ |
|
1490 |
||
1491 |
// Initially assume case a, c or e will apply. |
|
1492 |
size_t obj_len = (size_t)oopDesc::header_size(); |
|
1493 |
HeapWord* obj_beg = dense_prefix_end - obj_len; |
|
1494 |
||
1495 |
#ifdef _LP64 |
|
1496 |
if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) { |
|
1497 |
// Case b above. |
|
1498 |
obj_beg = dense_prefix_end - 1; |
|
1499 |
} else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) && |
|
1500 |
_mark_bitmap.is_obj_end(dense_prefix_bit - 4)) { |
|
1501 |
// Case d above. |
|
1502 |
obj_beg = dense_prefix_end - 3; |
|
1503 |
obj_len = 3; |
|
1504 |
} |
|
1505 |
#endif // #ifdef _LP64 |
|
1506 |
||
1507 |
MemRegion region(obj_beg, obj_len); |
|
1508 |
SharedHeap::fill_region_with_object(region); |
|
1509 |
_mark_bitmap.mark_obj(obj_beg, obj_len); |
|
1510 |
_summary_data.add_obj(obj_beg, obj_len); |
|
1511 |
assert(start_array(id) != NULL, "sanity"); |
|
1512 |
start_array(id)->allocate_block(obj_beg); |
|
1513 |
} |
|
1514 |
} |
|
1515 |
||
1516 |
void |
|
1517 |
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) |
|
1518 |
{ |
|
1519 |
assert(id < last_space_id, "id out of range"); |
|
1520 |
||
1521 |
const MutableSpace* space = _space_info[id].space(); |
|
1522 |
HeapWord** new_top_addr = _space_info[id].new_top_addr(); |
|
1523 |
||
1524 |
HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction); |
|
1525 |
_space_info[id].set_dense_prefix(dense_prefix_end); |
|
1526 |
||
1527 |
#ifndef PRODUCT |
|
1528 |
if (TraceParallelOldGCDensePrefix) { |
|
1529 |
print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end); |
|
1530 |
HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction); |
|
1531 |
print_dense_prefix_stats("density", id, maximum_compaction, addr); |
|
1532 |
} |
|
1533 |
#endif // #ifndef PRODUCT |
|
1534 |
||
1535 |
// If dead space crosses the dense prefix boundary, it is (at least partially) |
|
1536 |
// filled with a dummy object, marked live and added to the summary data. |
|
1537 |
// This simplifies the copy/update phase and must be done before the final |
|
1538 |
// locations of objects are determined, to prevent leaving a fragment of dead |
|
1539 |
// space that is too small to fill with an object. |
|
1540 |
if (!maximum_compaction && dense_prefix_end != space->bottom()) { |
|
1541 |
fill_dense_prefix_end(id); |
|
1542 |
} |
|
1543 |
||
1544 |
// Compute the destination of each Chunk, and thus each object. |
|
1545 |
_summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); |
|
1546 |
_summary_data.summarize(dense_prefix_end, space->end(), |
|
1547 |
dense_prefix_end, space->top(), |
|
1548 |
new_top_addr); |
|
1549 |
||
1550 |
if (TraceParallelOldGCSummaryPhase) { |
|
1551 |
const size_t chunk_size = ParallelCompactData::ChunkSize; |
|
1552 |
const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end); |
|
1553 |
const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom()); |
|
1554 |
const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr); |
|
1555 |
const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end); |
|
1556 |
tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " " |
|
1557 |
"dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " |
|
1558 |
"cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, |
|
1559 |
id, space->capacity_in_words(), dense_prefix_end, |
|
1560 |
dp_chunk, dp_words / chunk_size, |
|
1561 |
cr_words / chunk_size, *new_top_addr); |
|
1562 |
} |
|
1563 |
} |
|
1564 |
||
1565 |
void PSParallelCompact::summary_phase(ParCompactionManager* cm, |
|
1566 |
bool maximum_compaction) |
|
1567 |
{ |
|
1568 |
EventMark m("2 summarize"); |
|
1569 |
TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); |
|
1570 |
// trace("2"); |
|
1571 |
||
1572 |
#ifdef ASSERT |
|
1573 |
if (VerifyParallelOldWithMarkSweep && |
|
1574 |
(PSParallelCompact::total_invocations() % |
|
1575 |
VerifyParallelOldWithMarkSweepInterval) == 0) { |
|
1576 |
verify_mark_bitmap(_mark_bitmap); |
|
1577 |
} |
|
1578 |
if (TraceParallelOldGCMarkingPhase) { |
|
1579 |
tty->print_cr("add_obj_count=" SIZE_FORMAT " " |
|
1580 |
"add_obj_bytes=" SIZE_FORMAT, |
|
1581 |
add_obj_count, add_obj_size * HeapWordSize); |
|
1582 |
tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " " |
|
1583 |
"mark_bitmap_bytes=" SIZE_FORMAT, |
|
1584 |
mark_bitmap_count, mark_bitmap_size * HeapWordSize); |
|
1585 |
} |
|
1586 |
#endif // #ifdef ASSERT |
|
1587 |
||
1588 |
// Quick summarization of each space into itself, to see how much is live. |
|
1589 |
summarize_spaces_quick(); |
|
1590 |
||
1591 |
if (TraceParallelOldGCSummaryPhase) { |
|
1592 |
tty->print_cr("summary_phase: after summarizing each space to self"); |
|
1593 |
Universe::print(); |
|
1594 |
NOT_PRODUCT(print_chunk_ranges()); |
|
1595 |
if (Verbose) { |
|
1596 |
NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info)); |
|
1597 |
} |
|
1598 |
} |
|
1599 |
||
1600 |
// The amount of live data that will end up in old space (assuming it fits). |
|
1601 |
size_t old_space_total_live = 0; |
|
1602 |
unsigned int id; |
|
1603 |
for (id = old_space_id; id < last_space_id; ++id) { |
|
1604 |
old_space_total_live += pointer_delta(_space_info[id].new_top(), |
|
1605 |
_space_info[id].space()->bottom()); |
|
1606 |
} |
|
1607 |
||
1608 |
const MutableSpace* old_space = _space_info[old_space_id].space(); |
|
1609 |
if (old_space_total_live > old_space->capacity_in_words()) { |
|
1610 |
// XXX - should also try to expand |
|
1611 |
maximum_compaction = true; |
|
1612 |
} else if (!UseParallelOldGCDensePrefix) { |
|
1613 |
maximum_compaction = true; |
|
1614 |
} |
|
1615 |
||
1616 |
// Permanent and Old generations. |
|
1617 |
summarize_space(perm_space_id, maximum_compaction); |
|
1618 |
summarize_space(old_space_id, maximum_compaction); |
|
1619 |
||
1620 |
// Summarize the remaining spaces (those in the young gen) into old space. If |
|
1621 |
// the live data from a space doesn't fit, the existing summarization is left |
|
1622 |
// intact, so the data is compacted down within the space itself. |
|
1623 |
HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr(); |
|
1624 |
HeapWord* const target_space_end = old_space->end(); |
|
1625 |
for (id = eden_space_id; id < last_space_id; ++id) { |
|
1626 |
const MutableSpace* space = _space_info[id].space(); |
|
1627 |
const size_t live = pointer_delta(_space_info[id].new_top(), |
|
1628 |
space->bottom()); |
|
1629 |
const size_t available = pointer_delta(target_space_end, *new_top_addr); |
|
1630 |
if (live <= available) { |
|
1631 |
// All the live data will fit. |
|
1632 |
if (TraceParallelOldGCSummaryPhase) { |
|
1633 |
tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT, |
|
1634 |
id, *new_top_addr); |
|
1635 |
} |
|
1636 |
_summary_data.summarize(*new_top_addr, target_space_end, |
|
1637 |
space->bottom(), space->top(), |
|
1638 |
new_top_addr); |
|
1639 |
||
1640 |
// Reset the new_top value for the space. |
|
1641 |
_space_info[id].set_new_top(space->bottom()); |
|
1642 |
||
1643 |
// Clear the source_chunk field for each chunk in the space. |
|
1644 |
ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom()); |
|
1645 |
ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1); |
|
1646 |
while (beg_chunk <= end_chunk) { |
|
1647 |
beg_chunk->set_source_chunk(0); |
|
1648 |
++beg_chunk; |
|
1649 |
} |
|
1650 |
} |
|
1651 |
} |
|
1652 |
||
1653 |
// Fill in the block data after any changes to the chunks have |
|
1654 |
// been made. |
|
1655 |
#ifdef ASSERT |
|
1656 |
summarize_blocks(cm, perm_space_id); |
|
1657 |
summarize_blocks(cm, old_space_id); |
|
1658 |
#else |
|
1659 |
if (!UseParallelOldGCChunkPointerCalc) { |
|
1660 |
summarize_blocks(cm, perm_space_id); |
|
1661 |
summarize_blocks(cm, old_space_id); |
|
1662 |
} |
|
1663 |
#endif |
|
1664 |
||
1665 |
if (TraceParallelOldGCSummaryPhase) { |
|
1666 |
tty->print_cr("summary_phase: after final summarization"); |
|
1667 |
Universe::print(); |
|
1668 |
NOT_PRODUCT(print_chunk_ranges()); |
|
1669 |
if (Verbose) { |
|
1670 |
NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info)); |
|
1671 |
} |
|
1672 |
} |
|
1673 |
} |
|
1674 |
||
1675 |
// Fill in the BlockData. |
|
1676 |
// Iterate over the spaces and within each space iterate over |
|
1677 |
// the chunks and fill in the BlockData for each chunk. |
|
1678 |
||
1679 |
void PSParallelCompact::summarize_blocks(ParCompactionManager* cm, |
|
1680 |
SpaceId first_compaction_space_id) { |
|
1681 |
#if 0 |
|
1682 |
DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);) |
|
1683 |
for (SpaceId cur_space_id = first_compaction_space_id; |
|
1684 |
cur_space_id != last_space_id; |
|
1685 |
cur_space_id = next_compaction_space_id(cur_space_id)) { |
|
1686 |
// Iterate over the chunks in the space |
|
1687 |
size_t start_chunk_index = |
|
1688 |
_summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom()); |
|
1689 |
BitBlockUpdateClosure bbu(mark_bitmap(), |
|
1690 |
cm, |
|
1691 |
start_chunk_index); |
|
1692 |
// Iterate over blocks. |
|
1693 |
for (size_t chunk_index = start_chunk_index; |
|
1694 |
chunk_index < _summary_data.chunk_count() && |
|
1695 |
_summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top(); |
|
1696 |
chunk_index++) { |
|
1697 |
||
1698 |
// Reset the closure for the new chunk. Note that the closure |
|
1699 |
// maintains some data that does not get reset for each chunk |
|
1700 |
// so a new instance of the closure is no appropriate. |
|
1701 |
bbu.reset_chunk(chunk_index); |
|
1702 |
||
1703 |
// Start the iteration with the first live object. This |
|
1704 |
// may return the end of the chunk. That is acceptable since |
|
1705 |
// it will properly limit the iterations. |
|
1706 |
ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit( |
|
1707 |
_summary_data.first_live_or_end_in_chunk(chunk_index)); |
|
1708 |
||
1709 |
// End the iteration at the end of the chunk. |
|
1710 |
HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index); |
|
1711 |
HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize; |
|
1712 |
ParMarkBitMap::idx_t right_offset = |
|
1713 |
mark_bitmap()->addr_to_bit(chunk_end); |
|
1714 |
||
1715 |
// Blocks that have not objects starting in them can be |
|
1716 |
// skipped because their data will never be used. |
|
1717 |
if (left_offset < right_offset) { |
|
1718 |
||
1719 |
// Iterate through the objects in the chunk. |
|
1720 |
ParMarkBitMap::idx_t last_offset = |
|
1721 |
mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset); |
|
1722 |
||
1723 |
// If last_offset is less than right_offset, then the iterations |
|
1724 |
// terminated while it was looking for an end bit. "last_offset" |
|
1725 |
// is then the offset for the last start bit. In this situation |
|
1726 |
// the "offset" field for the next block to the right (_cur_block + 1) |
|
1727 |
// will not have been update although there may be live data |
|
1728 |
// to the left of the chunk. |
|
1729 |
||
1730 |
size_t cur_block_plus_1 = bbu.cur_block() + 1; |
|
1731 |
HeapWord* cur_block_plus_1_addr = |
|
1732 |
_summary_data.block_to_addr(bbu.cur_block()) + |
|
1733 |
ParallelCompactData::BlockSize; |
|
1734 |
HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset); |
|
1735 |
#if 1 // This code works. The else doesn't but should. Why does it? |
|
1736 |
// The current block (cur_block()) has already been updated. |
|
1737 |
// The last block that may need to be updated is either the |
|
1738 |
// next block (current block + 1) or the block where the |
|
1739 |
// last object starts (which can be greater than the |
|
1740 |
// next block if there were no objects found in intervening |
|
1741 |
// blocks). |
|
1742 |
size_t last_block = |
|
1743 |
MAX2(bbu.cur_block() + 1, |
|
1744 |
_summary_data.addr_to_block_idx(last_offset_addr)); |
|
1745 |
#else |
|
1746 |
// The current block has already been updated. The only block |
|
1747 |
// that remains to be updated is the block where the last |
|
1748 |
// object in the chunk starts. |
|
1749 |
size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr); |
|
1750 |
#endif |
|
1751 |
assert_bit_is_start(last_offset); |
|
1752 |
assert((last_block == _summary_data.block_count()) || |
|
1753 |
(_summary_data.block(last_block)->raw_offset() == 0), |
|
1754 |
"Should not have been set"); |
|
1755 |
// Is the last block still in the current chunk? If still |
|
1756 |
// in this chunk, update the last block (the counting that |
|
1757 |
// included the current block is meant for the offset of the last |
|
1758 |
// block). If not in this chunk, do nothing. Should not |
|
1759 |
// update a block in the next chunk. |
|
1760 |
if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(), |
|
1761 |
last_block)) { |
|
1762 |
if (last_offset < right_offset) { |
|
1763 |
// The last object started in this chunk but ends beyond |
|
1764 |
// this chunk. Update the block for this last object. |
|
1765 |
assert(mark_bitmap()->is_marked(last_offset), "Should be marked"); |
|
1766 |
// No end bit was found. The closure takes care of |
|
1767 |
// the cases where |
|
1768 |
// an objects crosses over into the next block |
|
1769 |
// an objects starts and ends in the next block |
|
1770 |
// It does not handle the case where an object is |
|
1771 |
// the first object in a later block and extends |
|
1772 |
// past the end of the chunk (i.e., the closure |
|
1773 |
// only handles complete objects that are in the range |
|
1774 |
// it is given). That object is handed back here |
|
1775 |
// for any special consideration necessary. |
|
1776 |
// |
|
1777 |
// Is the first bit in the last block a start or end bit? |
|
1778 |
// |
|
1779 |
// If the partial object ends in the last block L, |
|
1780 |
// then the 1st bit in L may be an end bit. |
|
1781 |
// |
|
1782 |
// Else does the last object start in a block after the current |
|
1783 |
// block? A block AA will already have been updated if an |
|
1784 |
// object ends in the next block AA+1. An object found to end in |
|
1785 |
// the AA+1 is the trigger that updates AA. Objects are being |
|
1786 |
// counted in the current block for updaing a following |
|
1787 |
// block. An object may start in later block |
|
1788 |
// block but may extend beyond the last block in the chunk. |
|
1789 |
// Updates are only done when the end of an object has been |
|
1790 |
// found. If the last object (covered by block L) starts |
|
1791 |
// beyond the current block, then no object ends in L (otherwise |
|
1792 |
// L would be the current block). So the first bit in L is |
|
1793 |
// a start bit. |
|
1794 |
// |
|
1795 |
// Else the last objects start in the current block and ends |
|
1796 |
// beyond the chunk. The current block has already been |
|
1797 |
// updated and there is no later block (with an object |
|
1798 |
// starting in it) that needs to be updated. |
|
1799 |
// |
|
1800 |
if (_summary_data.partial_obj_ends_in_block(last_block)) { |
|
1801 |
_summary_data.block(last_block)->set_end_bit_offset( |
|
1802 |
bbu.live_data_left()); |
|
1803 |
} else if (last_offset_addr >= cur_block_plus_1_addr) { |
|
1804 |
// The start of the object is on a later block |
|
1805 |
// (to the right of the current block and there are no |
|
1806 |
// complete live objects to the left of this last object |
|
1807 |
// within the chunk. |
|
1808 |
// The first bit in the block is for the start of the |
|
1809 |
// last object. |
|
1810 |
_summary_data.block(last_block)->set_start_bit_offset( |
|
1811 |
bbu.live_data_left()); |
|
1812 |
} else { |
|
1813 |
// The start of the last object was found in |
|
1814 |
// the current chunk (which has already |
|
1815 |
// been updated). |
|
1816 |
assert(bbu.cur_block() == |
|
1817 |
_summary_data.addr_to_block_idx(last_offset_addr), |
|
1818 |
"Should be a block already processed"); |
|
1819 |
} |
|
1820 |
#ifdef ASSERT |
|
1821 |
// Is there enough block information to find this object? |
|
1822 |
// The destination of the chunk has not been set so the |
|
1823 |
// values returned by calc_new_pointer() and |
|
1824 |
// block_calc_new_pointer() will only be |
|
1825 |
// offsets. But they should agree. |
|
1826 |
HeapWord* moved_obj_with_chunks = |
|
1827 |
_summary_data.chunk_calc_new_pointer(last_offset_addr); |
|
1828 |
HeapWord* moved_obj_with_blocks = |
|
1829 |
_summary_data.calc_new_pointer(last_offset_addr); |
|
1830 |
assert(moved_obj_with_chunks == moved_obj_with_blocks, |
|
1831 |
"Block calculation is wrong"); |
|
1832 |
#endif |
|
1833 |
} else if (last_block < _summary_data.block_count()) { |
|
1834 |
// Iterations ended looking for a start bit (but |
|
1835 |
// did not run off the end of the block table). |
|
1836 |
_summary_data.block(last_block)->set_start_bit_offset( |
|
1837 |
bbu.live_data_left()); |
|
1838 |
} |
|
1839 |
} |
|
1840 |
#ifdef ASSERT |
|
1841 |
// Is there enough block information to find this object? |
|
1842 |
HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset); |
|
1843 |
HeapWord* moved_obj_with_chunks = |
|
1844 |
_summary_data.calc_new_pointer(left_offset_addr); |
|
1845 |
HeapWord* moved_obj_with_blocks = |
|
1846 |
_summary_data.calc_new_pointer(left_offset_addr); |
|
1847 |
assert(moved_obj_with_chunks == moved_obj_with_blocks, |
|
1848 |
"Block calculation is wrong"); |
|
1849 |
#endif |
|
1850 |
||
1851 |
// Is there another block after the end of this chunk? |
|
1852 |
#ifdef ASSERT |
|
1853 |
if (last_block < _summary_data.block_count()) { |
|
1854 |
// No object may have been found in a block. If that |
|
1855 |
// block is at the end of the chunk, the iteration will |
|
1856 |
// terminate without incrementing the current block so |
|
1857 |
// that the current block is not the last block in the |
|
1858 |
// chunk. That situation precludes asserting that the |
|
1859 |
// current block is the last block in the chunk. Assert |
|
1860 |
// the lesser condition that the current block does not |
|
1861 |
// exceed the chunk. |
|
1862 |
assert(_summary_data.block_to_addr(last_block) <= |
|
1863 |
(_summary_data.chunk_to_addr(chunk_index) + |
|
1864 |
ParallelCompactData::ChunkSize), |
|
1865 |
"Chunk and block inconsistency"); |
|
1866 |
assert(last_offset <= right_offset, "Iteration over ran end"); |
|
1867 |
} |
|
1868 |
#endif |
|
1869 |
} |
|
1870 |
#ifdef ASSERT |
|
1871 |
if (PrintGCDetails && Verbose) { |
|
1872 |
if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) { |
|
1873 |
size_t first_block = |
|
1874 |
chunk_index / ParallelCompactData::BlocksPerChunk; |
|
1875 |
gclog_or_tty->print_cr("first_block " PTR_FORMAT |
|
1876 |
" _offset " PTR_FORMAT |
|
1877 |
"_first_is_start_bit %d", |
|
1878 |
first_block, |
|
1879 |
_summary_data.block(first_block)->raw_offset(), |
|
1880 |
_summary_data.block(first_block)->first_is_start_bit()); |
|
1881 |
} |
|
1882 |
} |
|
1883 |
#endif |
|
1884 |
} |
|
1885 |
} |
|
1886 |
DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);) |
|
1887 |
#endif // #if 0 |
|
1888 |
} |
|
1889 |
||
1890 |
// This method should contain all heap-specific policy for invoking a full |
|
1891 |
// collection. invoke_no_policy() will only attempt to compact the heap; it |
|
1892 |
// will do nothing further. If we need to bail out for policy reasons, scavenge |
|
1893 |
// before full gc, or any other specialized behavior, it needs to be added here. |
|
1894 |
// |
|
1895 |
// Note that this method should only be called from the vm_thread while at a |
|
1896 |
// safepoint. |
|
1897 |
void PSParallelCompact::invoke(bool maximum_heap_compaction) { |
|
1898 |
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
|
1899 |
assert(Thread::current() == (Thread*)VMThread::vm_thread(), |
|
1900 |
"should be in vm thread"); |
|
1901 |
ParallelScavengeHeap* heap = gc_heap(); |
|
1902 |
GCCause::Cause gc_cause = heap->gc_cause(); |
|
1903 |
assert(!heap->is_gc_active(), "not reentrant"); |
|
1904 |
||
1905 |
PSAdaptiveSizePolicy* policy = heap->size_policy(); |
|
1906 |
||
1907 |
// Before each allocation/collection attempt, find out from the |
|
1908 |
// policy object if GCs are, on the whole, taking too long. If so, |
|
1909 |
// bail out without attempting a collection. The exceptions are |
|
1910 |
// for explicitly requested GC's. |
|
1911 |
if (!policy->gc_time_limit_exceeded() || |
|
1912 |
GCCause::is_user_requested_gc(gc_cause) || |
|
1913 |
GCCause::is_serviceability_requested_gc(gc_cause)) { |
|
1914 |
IsGCActiveMark mark; |
|
1915 |
||
1916 |
if (ScavengeBeforeFullGC) { |
|
1917 |
PSScavenge::invoke_no_policy(); |
|
1918 |
} |
|
1919 |
||
1920 |
PSParallelCompact::invoke_no_policy(maximum_heap_compaction); |
|
1921 |
} |
|
1922 |
} |
|
1923 |
||
1924 |
bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) { |
|
1925 |
size_t addr_chunk_index = addr_to_chunk_idx(addr); |
|
1926 |
return chunk_index == addr_chunk_index; |
|
1927 |
} |
|
1928 |
||
1929 |
bool ParallelCompactData::chunk_contains_block(size_t chunk_index, |
|
1930 |
size_t block_index) { |
|
1931 |
size_t first_block_in_chunk = chunk_index * BlocksPerChunk; |
|
1932 |
size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1; |
|
1933 |
||
1934 |
return (first_block_in_chunk <= block_index) && |
|
1935 |
(block_index <= last_block_in_chunk); |
|
1936 |
} |
|
1937 |
||
1938 |
// This method contains no policy. You should probably |
|
1939 |
// be calling invoke() instead. |
|
1940 |
void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { |
|
1941 |
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); |
|
1942 |
assert(ref_processor() != NULL, "Sanity"); |
|
1943 |
||
1944 |
if (GC_locker::is_active()) { |
|
1945 |
return; |
|
1946 |
} |
|
1947 |
||
1948 |
TimeStamp marking_start; |
|
1949 |
TimeStamp compaction_start; |
|
1950 |
TimeStamp collection_exit; |
|
1951 |
||
1952 |
// "serial_CM" is needed until the parallel implementation |
|
1953 |
// of the move and update is done. |
|
1954 |
ParCompactionManager* serial_CM = new ParCompactionManager(); |
|
1955 |
// Don't initialize more than once. |
|
1956 |
// serial_CM->initialize(&summary_data(), mark_bitmap()); |
|
1957 |
||
1958 |
ParallelScavengeHeap* heap = gc_heap(); |
|
1959 |
GCCause::Cause gc_cause = heap->gc_cause(); |
|
1960 |
PSYoungGen* young_gen = heap->young_gen(); |
|
1961 |
PSOldGen* old_gen = heap->old_gen(); |
|
1962 |
PSPermGen* perm_gen = heap->perm_gen(); |
|
1963 |
PSAdaptiveSizePolicy* size_policy = heap->size_policy(); |
|
1964 |
||
1965 |
_print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; |
|
1966 |
||
1967 |
// Make sure data structures are sane, make the heap parsable, and do other |
|
1968 |
// miscellaneous bookkeeping. |
|
1969 |
PreGCValues pre_gc_values; |
|
1970 |
pre_compact(&pre_gc_values); |
|
1971 |
||
1972 |
// Place after pre_compact() where the number of invocations is incremented. |
|
1973 |
AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); |
|
1974 |
||
1975 |
{ |
|
1976 |
ResourceMark rm; |
|
1977 |
HandleMark hm; |
|
1978 |
||
1979 |
const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc; |
|
1980 |
||
1981 |
// This is useful for debugging but don't change the output the |
|
1982 |
// the customer sees. |
|
1983 |
const char* gc_cause_str = "Full GC"; |
|
1984 |
if (is_system_gc && PrintGCDetails) { |
|
1985 |
gc_cause_str = "Full GC (System)"; |
|
1986 |
} |
|
1987 |
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
|
1988 |
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
|
1989 |
TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); |
|
1990 |
TraceCollectorStats tcs(counters()); |
|
1991 |
TraceMemoryManagerStats tms(true /* Full GC */); |
|
1992 |
||
1993 |
if (TraceGen1Time) accumulated_time()->start(); |
|
1994 |
||
1995 |
// Let the size policy know we're starting |
|
1996 |
size_policy->major_collection_begin(); |
|
1997 |
||
1998 |
// When collecting the permanent generation methodOops may be moving, |
|
1999 |
// so we either have to flush all bcp data or convert it into bci. |
|
2000 |
CodeCache::gc_prologue(); |
|
2001 |
Threads::gc_prologue(); |
|
2002 |
||
2003 |
NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
|
2004 |
COMPILER2_PRESENT(DerivedPointerTable::clear()); |
|
2005 |
||
2006 |
ref_processor()->enable_discovery(); |
|
2007 |
||
2008 |
bool marked_for_unloading = false; |
|
2009 |
||
2010 |
marking_start.update(); |
|
2011 |
marking_phase(serial_CM, maximum_heap_compaction); |
|
2012 |
||
2013 |
#ifndef PRODUCT |
|
2014 |
if (TraceParallelOldGCMarkingPhase) { |
|
2015 |
gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d " |
|
2016 |
"cas_by_another %d", |
|
2017 |
mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(), |
|
2018 |
mark_bitmap()->cas_by_another()); |
|
2019 |
} |
|
2020 |
#endif // #ifndef PRODUCT |
|
2021 |
||
2022 |
#ifdef ASSERT |
|
2023 |
if (VerifyParallelOldWithMarkSweep && |
|
2024 |
(PSParallelCompact::total_invocations() % |
|
2025 |
VerifyParallelOldWithMarkSweepInterval) == 0) { |
|
2026 |
gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()"); |
|
2027 |
if (PrintGCDetails && Verbose) { |
|
2028 |
gclog_or_tty->print_cr("mark_sweep_phase1:"); |
|
2029 |
} |
|
2030 |
// Clear the discovered lists so that discovered objects |
|
2031 |
// don't look like they have been discovered twice. |
|
2032 |
ref_processor()->clear_discovered_references(); |
|
2033 |
||
2034 |
PSMarkSweep::allocate_stacks(); |
|
2035 |
MemRegion mr = Universe::heap()->reserved_region(); |
|
2036 |
PSMarkSweep::ref_processor()->enable_discovery(); |
|
2037 |
PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction); |
|
2038 |
} |
|
2039 |
#endif |
|
2040 |
||
2041 |
bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc; |
|
2042 |
summary_phase(serial_CM, maximum_heap_compaction || max_on_system_gc); |
|
2043 |
||
2044 |
#ifdef ASSERT |
|
2045 |
if (VerifyParallelOldWithMarkSweep && |
|
2046 |
(PSParallelCompact::total_invocations() % |
|
2047 |
VerifyParallelOldWithMarkSweepInterval) == 0) { |
|
2048 |
if (PrintGCDetails && Verbose) { |
|
2049 |
gclog_or_tty->print_cr("mark_sweep_phase2:"); |
|
2050 |
} |
|
2051 |
PSMarkSweep::mark_sweep_phase2(); |
|
2052 |
} |
|
2053 |
#endif |
|
2054 |
||
2055 |
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); |
|
2056 |
COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); |
|
2057 |
||
2058 |
// adjust_roots() updates Universe::_intArrayKlassObj which is |
|
2059 |
// needed by the compaction for filling holes in the dense prefix. |
|
2060 |
adjust_roots(); |
|
2061 |
||
2062 |
#ifdef ASSERT |
|
2063 |
if (VerifyParallelOldWithMarkSweep && |
|
2064 |
(PSParallelCompact::total_invocations() % |
|
2065 |
VerifyParallelOldWithMarkSweepInterval) == 0) { |
|
2066 |
// Do a separate verify phase so that the verify |
|
2067 |
// code can use the the forwarding pointers to |
|
2068 |
// check the new pointer calculation. The restore_marks() |
|
2069 |
// has to be done before the real compact. |
|
2070 |
serial_CM->set_action(ParCompactionManager::VerifyUpdate); |
|
2071 |
compact_perm(serial_CM); |
|
2072 |
compact_serial(serial_CM); |
|
2073 |
serial_CM->set_action(ParCompactionManager::ResetObjects); |
|
2074 |
compact_perm(serial_CM); |
|
2075 |
compact_serial(serial_CM); |
|
2076 |
serial_CM->set_action(ParCompactionManager::UpdateAndCopy); |
|
2077 |
||
2078 |
// For debugging only |
|
2079 |
PSMarkSweep::restore_marks(); |
|
2080 |
PSMarkSweep::deallocate_stacks(); |
|
2081 |
} |
|
2082 |
#endif |
|
2083 |
||
2084 |
compaction_start.update(); |
|
2085 |
// Does the perm gen always have to be done serially because |
|
2086 |
// klasses are used in the update of an object? |
|
2087 |
compact_perm(serial_CM); |
|
2088 |
||
2089 |
if (UseParallelOldGCCompacting) { |
|
2090 |
compact(); |
|
2091 |
} else { |
|
2092 |
compact_serial(serial_CM); |
|
2093 |
} |
|
2094 |
||
2095 |
delete serial_CM; |
|
2096 |
||
2097 |
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be |
|
2098 |
// done before resizing. |
|
2099 |
post_compact(); |
|
2100 |
||
2101 |
// Let the size policy know we're done |
|
2102 |
size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); |
|
2103 |
||
2104 |
if (UseAdaptiveSizePolicy) { |
|
2105 |
if (PrintAdaptiveSizePolicy) { |
|
2106 |
gclog_or_tty->print("AdaptiveSizeStart: "); |
|
2107 |
gclog_or_tty->stamp(); |
|
2108 |
gclog_or_tty->print_cr(" collection: %d ", |
|
2109 |
heap->total_collections()); |
|
2110 |
if (Verbose) { |
|
2111 |
gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" |
|
2112 |
" perm_gen_capacity: %d ", |
|
2113 |
old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), |
|
2114 |
perm_gen->capacity_in_bytes()); |
|
2115 |
} |
|
2116 |
} |
|
2117 |
||
2118 |
// Don't check if the size_policy is ready here. Let |
|
2119 |
// the size_policy check that internally. |
|
2120 |
if (UseAdaptiveGenerationSizePolicyAtMajorCollection && |
|
2121 |
((gc_cause != GCCause::_java_lang_system_gc) || |
|
2122 |
UseAdaptiveSizePolicyWithSystemGC)) { |
|
2123 |
// Calculate optimal free space amounts |
|
2124 |
assert(young_gen->max_size() > |
|
2125 |
young_gen->from_space()->capacity_in_bytes() + |
|
2126 |
young_gen->to_space()->capacity_in_bytes(), |
|
2127 |
"Sizes of space in young gen are out-of-bounds"); |
|
2128 |
size_t max_eden_size = young_gen->max_size() - |
|
2129 |
young_gen->from_space()->capacity_in_bytes() - |
|
2130 |
young_gen->to_space()->capacity_in_bytes(); |
|
2131 |
size_policy->compute_generation_free_space(young_gen->used_in_bytes(), |
|
2132 |
young_gen->eden_space()->used_in_bytes(), |
|
2133 |
old_gen->used_in_bytes(), |
|
2134 |
perm_gen->used_in_bytes(), |
|
2135 |
young_gen->eden_space()->capacity_in_bytes(), |
|
2136 |
old_gen->max_gen_size(), |
|
2137 |
max_eden_size, |
|
2138 |
true /* full gc*/, |
|
2139 |
gc_cause); |
|
2140 |
||
2141 |
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); |
|
2142 |
||
2143 |
// Don't resize the young generation at an major collection. A |
|
2144 |
// desired young generation size may have been calculated but |
|
2145 |
// resizing the young generation complicates the code because the |
|
2146 |
// resizing of the old generation may have moved the boundary |
|
2147 |
// between the young generation and the old generation. Let the |
|
2148 |
// young generation resizing happen at the minor collections. |
|
2149 |
} |
|
2150 |
if (PrintAdaptiveSizePolicy) { |
|
2151 |
gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", |
|
2152 |
heap->total_collections()); |
|
2153 |
} |
|
2154 |
} |
|
2155 |
||
2156 |
if (UsePerfData) { |
|
2157 |
PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); |
|
2158 |
counters->update_counters(); |
|
2159 |
counters->update_old_capacity(old_gen->capacity_in_bytes()); |
|
2160 |
counters->update_young_capacity(young_gen->capacity_in_bytes()); |
|
2161 |
} |
|
2162 |
||
2163 |
heap->resize_all_tlabs(); |
|
2164 |
||
2165 |
// We collected the perm gen, so we'll resize it here. |
|
2166 |
perm_gen->compute_new_size(pre_gc_values.perm_gen_used()); |
|
2167 |
||
2168 |
if (TraceGen1Time) accumulated_time()->stop(); |
|
2169 |
||
2170 |
if (PrintGC) { |
|
2171 |
if (PrintGCDetails) { |
|
2172 |
// No GC timestamp here. This is after GC so it would be confusing. |
|
2173 |
young_gen->print_used_change(pre_gc_values.young_gen_used()); |
|
2174 |
old_gen->print_used_change(pre_gc_values.old_gen_used()); |
|
2175 |
heap->print_heap_change(pre_gc_values.heap_used()); |
|
2176 |
// Print perm gen last (print_heap_change() excludes the perm gen). |
|
2177 |
perm_gen->print_used_change(pre_gc_values.perm_gen_used()); |
|
2178 |
} else { |
|
2179 |
heap->print_heap_change(pre_gc_values.heap_used()); |
|
2180 |
} |
|
2181 |
} |
|
2182 |
||
2183 |
// Track memory usage and detect low memory |
|
2184 |
MemoryService::track_memory_usage(); |
|
2185 |
heap->update_counters(); |
|
2186 |
||
2187 |
if (PrintGCDetails) { |
|
2188 |
if (size_policy->print_gc_time_limit_would_be_exceeded()) { |
|
2189 |
if (size_policy->gc_time_limit_exceeded()) { |
|
2190 |
gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " |
|
2191 |
"of %d%%", GCTimeLimit); |
|
2192 |
} else { |
|
2193 |
gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " |
|
2194 |
"of %d%%", GCTimeLimit); |
|
2195 |
} |
|
2196 |
} |
|
2197 |
size_policy->set_print_gc_time_limit_would_be_exceeded(false); |
|
2198 |
} |
|
2199 |
} |
|
2200 |
||
2201 |
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { |
|
2202 |
HandleMark hm; // Discard invalid handles created during verification |
|
2203 |
gclog_or_tty->print(" VerifyAfterGC:"); |
|
2204 |
Universe::verify(false); |
|
2205 |
} |
|
2206 |
||
2207 |
// Re-verify object start arrays |
|
2208 |
if (VerifyObjectStartArray && |
|
2209 |
VerifyAfterGC) { |
|
2210 |
old_gen->verify_object_start_array(); |
|
2211 |
perm_gen->verify_object_start_array(); |
|
2212 |
} |
|
2213 |
||
2214 |
NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
|
2215 |
||
2216 |
collection_exit.update(); |
|
2217 |
||
2218 |
if (PrintHeapAtGC) { |
|
2219 |
Universe::print_heap_after_gc(); |
|
2220 |
} |
|
2221 |
if (PrintGCTaskTimeStamps) { |
|
2222 |
gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " |
|
2223 |
INT64_FORMAT, |
|
2224 |
marking_start.ticks(), compaction_start.ticks(), |
|
2225 |
collection_exit.ticks()); |
|
2226 |
gc_task_manager()->print_task_time_stamps(); |
|
2227 |
} |
|
2228 |
} |
|
2229 |
||
2230 |
bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, |
|
2231 |
PSYoungGen* young_gen, |
|
2232 |
PSOldGen* old_gen) { |
|
2233 |
MutableSpace* const eden_space = young_gen->eden_space(); |
|
2234 |
assert(!eden_space->is_empty(), "eden must be non-empty"); |
|
2235 |
assert(young_gen->virtual_space()->alignment() == |
|
2236 |
old_gen->virtual_space()->alignment(), "alignments do not match"); |
|
2237 |
||
2238 |
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { |
|
2239 |
return false; |
|
2240 |
} |
|
2241 |
||
2242 |
// Both generations must be completely committed. |
|
2243 |
if (young_gen->virtual_space()->uncommitted_size() != 0) { |
|
2244 |
return false; |
|
2245 |
} |
|
2246 |
if (old_gen->virtual_space()->uncommitted_size() != 0) { |
|
2247 |
return false; |
|
2248 |
} |
|
2249 |
||
2250 |
// Figure out how much to take from eden. Include the average amount promoted |
|
2251 |
// in the total; otherwise the next young gen GC will simply bail out to a |
|
2252 |
// full GC. |
|
2253 |
const size_t alignment = old_gen->virtual_space()->alignment(); |
|
2254 |
const size_t eden_used = eden_space->used_in_bytes(); |
|
2255 |
const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); |
|
2256 |
const size_t absorb_size = align_size_up(eden_used + promoted, alignment); |
|
2257 |
const size_t eden_capacity = eden_space->capacity_in_bytes(); |
|
2258 |
||
2259 |
if (absorb_size >= eden_capacity) { |
|
2260 |
return false; // Must leave some space in eden. |
|
2261 |
} |
|
2262 |
||
2263 |
const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; |
|
2264 |
if (new_young_size < young_gen->min_gen_size()) { |
|
2265 |
return false; // Respect young gen minimum size. |
|
2266 |
} |
|
2267 |
||
2268 |
if (TraceAdaptiveGCBoundary && Verbose) { |
|
2269 |
gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " |
|
2270 |
"eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " |
|
2271 |
"from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " |
|
2272 |
"young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", |
|
2273 |
absorb_size / K, |
|
2274 |
eden_capacity / K, (eden_capacity - absorb_size) / K, |
|
2275 |
young_gen->from_space()->used_in_bytes() / K, |
|
2276 |
young_gen->to_space()->used_in_bytes() / K, |
|
2277 |
young_gen->capacity_in_bytes() / K, new_young_size / K); |
|
2278 |
} |
|
2279 |
||
2280 |
// Fill the unused part of the old gen. |
|
2281 |
MutableSpace* const old_space = old_gen->object_space(); |
|
2282 |
MemRegion old_gen_unused(old_space->top(), old_space->end()); |
|
2283 |
if (!old_gen_unused.is_empty()) { |
|
2284 |
SharedHeap::fill_region_with_object(old_gen_unused); |
|
2285 |
} |
|
2286 |
||
2287 |
// Take the live data from eden and set both top and end in the old gen to |
|
2288 |
// eden top. (Need to set end because reset_after_change() mangles the region |
|
2289 |
// from end to virtual_space->high() in debug builds). |
|
2290 |
HeapWord* const new_top = eden_space->top(); |
|
2291 |
old_gen->virtual_space()->expand_into(young_gen->virtual_space(), |
|
2292 |
absorb_size); |
|
2293 |
young_gen->reset_after_change(); |
|
2294 |
old_space->set_top(new_top); |
|
2295 |
old_space->set_end(new_top); |
|
2296 |
old_gen->reset_after_change(); |
|
2297 |
||
2298 |
// Update the object start array for the filler object and the data from eden. |
|
2299 |
ObjectStartArray* const start_array = old_gen->start_array(); |
|
2300 |
HeapWord* const start = old_gen_unused.start(); |
|
2301 |
for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { |
|
2302 |
start_array->allocate_block(addr); |
|
2303 |
} |
|
2304 |
||
2305 |
// Could update the promoted average here, but it is not typically updated at |
|
2306 |
// full GCs and the value to use is unclear. Something like |
|
2307 |
// |
|
2308 |
// cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. |
|
2309 |
||
2310 |
size_policy->set_bytes_absorbed_from_eden(absorb_size); |
|
2311 |
return true; |
|
2312 |
} |
|
2313 |
||
2314 |
GCTaskManager* const PSParallelCompact::gc_task_manager() { |
|
2315 |
assert(ParallelScavengeHeap::gc_task_manager() != NULL, |
|
2316 |
"shouldn't return NULL"); |
|
2317 |
return ParallelScavengeHeap::gc_task_manager(); |
|
2318 |
} |
|
2319 |
||
2320 |
void PSParallelCompact::marking_phase(ParCompactionManager* cm, |
|
2321 |
bool maximum_heap_compaction) { |
|
2322 |
// Recursively traverse all live objects and mark them |
|
2323 |
EventMark m("1 mark object"); |
|
2324 |
TraceTime tm("marking phase", print_phases(), true, gclog_or_tty); |
|
2325 |
||
2326 |
ParallelScavengeHeap* heap = gc_heap(); |
|
2327 |
uint parallel_gc_threads = heap->gc_task_manager()->workers(); |
|
2328 |
TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); |
|
2329 |
ParallelTaskTerminator terminator(parallel_gc_threads, qset); |
|
2330 |
||
2331 |
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); |
|
2332 |
PSParallelCompact::FollowStackClosure follow_stack_closure(cm); |
|
2333 |
||
2334 |
{ |
|
2335 |
TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty); |
|
2336 |
||
2337 |
GCTaskQueue* q = GCTaskQueue::create(); |
|
2338 |
||
2339 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe)); |
|
2340 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles)); |
|
2341 |
// We scan the thread roots in parallel |
|
2342 |
Threads::create_thread_roots_marking_tasks(q); |
|
2343 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer)); |
|
2344 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler)); |
|
2345 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management)); |
|
2346 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary)); |
|
2347 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti)); |
|
2348 |
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols)); |
|
2349 |
||
2350 |
if (parallel_gc_threads > 1) { |
|
2351 |
for (uint j = 0; j < parallel_gc_threads; j++) { |
|
2352 |
q->enqueue(new StealMarkingTask(&terminator)); |
|
2353 |
} |
|
2354 |
} |
|
2355 |
||
2356 |
WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); |
|
2357 |
q->enqueue(fin); |
|
2358 |
||
2359 |
gc_task_manager()->add_list(q); |
|
2360 |
||
2361 |
fin->wait_for(); |
|
2362 |
||
2363 |
// We have to release the barrier tasks! |
|
2364 |
WaitForBarrierGCTask::destroy(fin); |
|
2365 |
} |
|
2366 |
||
2367 |
// Process reference objects found during marking |
|
2368 |
{ |
|
2369 |
TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); |
|
2370 |
ReferencePolicy *soft_ref_policy; |
|
2371 |
if (maximum_heap_compaction) { |
|
2372 |
soft_ref_policy = new AlwaysClearPolicy(); |
|
2373 |
} else { |
|
2374 |
#ifdef COMPILER2 |
|
2375 |
soft_ref_policy = new LRUMaxHeapPolicy(); |
|
2376 |
#else |
|
2377 |
soft_ref_policy = new LRUCurrentHeapPolicy(); |
|
2378 |
#endif // COMPILER2 |
|
2379 |
} |
|
2380 |
assert(soft_ref_policy != NULL, "No soft reference policy"); |
|
2381 |
if (ref_processor()->processing_is_mt()) { |
|
2382 |
RefProcTaskExecutor task_executor; |
|
2383 |
ref_processor()->process_discovered_references( |
|
2384 |
soft_ref_policy, is_alive_closure(), &mark_and_push_closure, |
|
2385 |
&follow_stack_closure, &task_executor); |
|
2386 |
} else { |
|
2387 |
ref_processor()->process_discovered_references( |
|
2388 |
soft_ref_policy, is_alive_closure(), &mark_and_push_closure, |
|
2389 |
&follow_stack_closure, NULL); |
|
2390 |
} |
|
2391 |
} |
|
2392 |
||
2393 |
TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); |
|
2394 |
// Follow system dictionary roots and unload classes. |
|
2395 |
bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); |
|
2396 |
||
2397 |
// Follow code cache roots. |
|
2398 |
CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure, |
|
2399 |
purged_class); |
|
2400 |
follow_stack(cm); // Flush marking stack. |
|
2401 |
||
2402 |
// Update subklass/sibling/implementor links of live klasses |
|
2403 |
// revisit_klass_stack is used in follow_weak_klass_links(). |
|
2404 |
follow_weak_klass_links(cm); |
|
2405 |
||
2406 |
// Visit symbol and interned string tables and delete unmarked oops |
|
2407 |
SymbolTable::unlink(is_alive_closure()); |
|
2408 |
StringTable::unlink(is_alive_closure()); |
|
2409 |
||
2410 |
assert(cm->marking_stack()->size() == 0, "stack should be empty by now"); |
|
2411 |
assert(cm->overflow_stack()->is_empty(), "stack should be empty by now"); |
|
2412 |
} |
|
2413 |
||
2414 |
// This should be moved to the shared markSweep code! |
|
2415 |
class PSAlwaysTrueClosure: public BoolObjectClosure { |
|
2416 |
public: |
|
2417 |
void do_object(oop p) { ShouldNotReachHere(); } |
|
2418 |
bool do_object_b(oop p) { return true; } |
|
2419 |
}; |
|
2420 |
static PSAlwaysTrueClosure always_true; |
|
2421 |
||
2422 |
void PSParallelCompact::adjust_roots() { |
|
2423 |
// Adjust the pointers to reflect the new locations |
|
2424 |
EventMark m("3 adjust roots"); |
|
2425 |
TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty); |
|
2426 |
||
2427 |
// General strong roots. |
|
2428 |
Universe::oops_do(adjust_root_pointer_closure()); |
|
2429 |
ReferenceProcessor::oops_do(adjust_root_pointer_closure()); |
|
2430 |
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles |
|
2431 |
Threads::oops_do(adjust_root_pointer_closure()); |
|
2432 |
ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); |
|
2433 |
FlatProfiler::oops_do(adjust_root_pointer_closure()); |
|
2434 |
Management::oops_do(adjust_root_pointer_closure()); |
|
2435 |
JvmtiExport::oops_do(adjust_root_pointer_closure()); |
|
2436 |
// SO_AllClasses |
|
2437 |
SystemDictionary::oops_do(adjust_root_pointer_closure()); |
|
2438 |
vmSymbols::oops_do(adjust_root_pointer_closure()); |
|
2439 |
||
2440 |
// Now adjust pointers in remaining weak roots. (All of which should |
|
2441 |
// have been cleared if they pointed to non-surviving objects.) |
|
2442 |
// Global (weak) JNI handles |
|
2443 |
JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); |
|
2444 |
||
2445 |
CodeCache::oops_do(adjust_pointer_closure()); |
|
2446 |
SymbolTable::oops_do(adjust_root_pointer_closure()); |
|
2447 |
StringTable::oops_do(adjust_root_pointer_closure()); |
|
2448 |
ref_processor()->weak_oops_do(adjust_root_pointer_closure()); |
|
2449 |
// Roots were visited so references into the young gen in roots |
|
2450 |
// may have been scanned. Process them also. |
|
2451 |
// Should the reference processor have a span that excludes |
|
2452 |
// young gen objects? |
|
2453 |
PSScavenge::reference_processor()->weak_oops_do( |
|
2454 |
adjust_root_pointer_closure()); |
|
2455 |
} |
|
2456 |
||
2457 |
void PSParallelCompact::compact_perm(ParCompactionManager* cm) { |
|
2458 |
EventMark m("4 compact perm"); |
|
2459 |
TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty); |
|
2460 |
// trace("4"); |
|
2461 |
||
2462 |
gc_heap()->perm_gen()->start_array()->reset(); |
|
2463 |
move_and_update(cm, perm_space_id); |
|
2464 |
} |
|
2465 |
||
2466 |
void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q, |
|
2467 |
uint parallel_gc_threads) { |
|
2468 |
TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); |
|
2469 |
||
2470 |
const unsigned int task_count = MAX2(parallel_gc_threads, 1U); |
|
2471 |
for (unsigned int j = 0; j < task_count; j++) { |
|
2472 |
q->enqueue(new DrainStacksCompactionTask()); |
|
2473 |
} |
|
2474 |
||
2475 |
// Find all chunks that are available (can be filled immediately) and |
|
2476 |
// distribute them to the thread stacks. The iteration is done in reverse |
|
2477 |
// order (high to low) so the chunks will be removed in ascending order. |
|
2478 |
||
2479 |
const ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
2480 |
||
2481 |
size_t fillable_chunks = 0; // A count for diagnostic purposes. |
|
2482 |
unsigned int which = 0; // The worker thread number. |
|
2483 |
||
2484 |
for (unsigned int id = to_space_id; id > perm_space_id; --id) { |
|
2485 |
SpaceInfo* const space_info = _space_info + id; |
|
2486 |
MutableSpace* const space = space_info->space(); |
|
2487 |
HeapWord* const new_top = space_info->new_top(); |
|
2488 |
||
2489 |
const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix()); |
|
2490 |
const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top)); |
|
2491 |
assert(end_chunk > 0, "perm gen cannot be empty"); |
|
2492 |
||
2493 |
for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) { |
|
2494 |
if (sd.chunk(cur)->claim_unsafe()) { |
|
2495 |
ParCompactionManager* cm = ParCompactionManager::manager_array(which); |
|
2496 |
cm->save_for_processing(cur); |
|
2497 |
||
2498 |
if (TraceParallelOldGCCompactionPhase && Verbose) { |
|
2499 |
const size_t count_mod_8 = fillable_chunks & 7; |
|
2500 |
if (count_mod_8 == 0) gclog_or_tty->print("fillable: "); |
|
2501 |
gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur); |
|
2502 |
if (count_mod_8 == 7) gclog_or_tty->cr(); |
|
2503 |
} |
|
2504 |
||
2505 |
NOT_PRODUCT(++fillable_chunks;) |
|
2506 |
||
2507 |
// Assign chunks to threads in round-robin fashion. |
|
2508 |
if (++which == task_count) { |
|
2509 |
which = 0; |
|
2510 |
} |
|
2511 |
} |
|
2512 |
} |
|
2513 |
} |
|
2514 |
||
2515 |
if (TraceParallelOldGCCompactionPhase) { |
|
2516 |
if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr(); |
|
2517 |
gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks); |
|
2518 |
} |
|
2519 |
} |
|
2520 |
||
2521 |
#define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 |
|
2522 |
||
2523 |
void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, |
|
2524 |
uint parallel_gc_threads) { |
|
2525 |
TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty); |
|
2526 |
||
2527 |
ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
2528 |
||
2529 |
// Iterate over all the spaces adding tasks for updating |
|
2530 |
// chunks in the dense prefix. Assume that 1 gc thread |
|
2531 |
// will work on opening the gaps and the remaining gc threads |
|
2532 |
// will work on the dense prefix. |
|
2533 |
SpaceId space_id = old_space_id; |
|
2534 |
while (space_id != last_space_id) { |
|
2535 |
HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix(); |
|
2536 |
const MutableSpace* const space = _space_info[space_id].space(); |
|
2537 |
||
2538 |
if (dense_prefix_end == space->bottom()) { |
|
2539 |
// There is no dense prefix for this space. |
|
2540 |
space_id = next_compaction_space_id(space_id); |
|
2541 |
continue; |
|
2542 |
} |
|
2543 |
||
2544 |
// The dense prefix is before this chunk. |
|
2545 |
size_t chunk_index_end_dense_prefix = |
|
2546 |
sd.addr_to_chunk_idx(dense_prefix_end); |
|
2547 |
ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix); |
|
2548 |
assert(dense_prefix_end == space->end() || |
|
2549 |
dense_prefix_cp->available() || |
|
2550 |
dense_prefix_cp->claimed(), |
|
2551 |
"The chunk after the dense prefix should always be ready to fill"); |
|
2552 |
||
2553 |
size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom()); |
|
2554 |
||
2555 |
// Is there dense prefix work? |
|
2556 |
size_t total_dense_prefix_chunks = |
|
2557 |
chunk_index_end_dense_prefix - chunk_index_start; |
|
2558 |
// How many chunks of the dense prefix should be given to |
|
2559 |
// each thread? |
|
2560 |
if (total_dense_prefix_chunks > 0) { |
|
2561 |
uint tasks_for_dense_prefix = 1; |
|
2562 |
if (UseParallelDensePrefixUpdate) { |
|
2563 |
if (total_dense_prefix_chunks <= |
|
2564 |
(parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { |
|
2565 |
// Don't over partition. This assumes that |
|
2566 |
// PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value |
|
2567 |
// so there are not many chunks to process. |
|
2568 |
tasks_for_dense_prefix = parallel_gc_threads; |
|
2569 |
} else { |
|
2570 |
// Over partition |
|
2571 |
tasks_for_dense_prefix = parallel_gc_threads * |
|
2572 |
PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; |
|
2573 |
} |
|
2574 |
} |
|
2575 |
size_t chunks_per_thread = total_dense_prefix_chunks / |
|
2576 |
tasks_for_dense_prefix; |
|
2577 |
// Give each thread at least 1 chunk. |
|
2578 |
if (chunks_per_thread == 0) { |
|
2579 |
chunks_per_thread = 1; |
|
2580 |
} |
|
2581 |
||
2582 |
for (uint k = 0; k < tasks_for_dense_prefix; k++) { |
|
2583 |
if (chunk_index_start >= chunk_index_end_dense_prefix) { |
|
2584 |
break; |
|
2585 |
} |
|
2586 |
// chunk_index_end is not processed |
|
2587 |
size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread, |
|
2588 |
chunk_index_end_dense_prefix); |
|
2589 |
q->enqueue(new UpdateDensePrefixTask( |
|
2590 |
space_id, |
|
2591 |
chunk_index_start, |
|
2592 |
chunk_index_end)); |
|
2593 |
chunk_index_start = chunk_index_end; |
|
2594 |
} |
|
2595 |
} |
|
2596 |
// This gets any part of the dense prefix that did not |
|
2597 |
// fit evenly. |
|
2598 |
if (chunk_index_start < chunk_index_end_dense_prefix) { |
|
2599 |
q->enqueue(new UpdateDensePrefixTask( |
|
2600 |
space_id, |
|
2601 |
chunk_index_start, |
|
2602 |
chunk_index_end_dense_prefix)); |
|
2603 |
} |
|
2604 |
space_id = next_compaction_space_id(space_id); |
|
2605 |
} // End tasks for dense prefix |
|
2606 |
} |
|
2607 |
||
2608 |
void PSParallelCompact::enqueue_chunk_stealing_tasks( |
|
2609 |
GCTaskQueue* q, |
|
2610 |
ParallelTaskTerminator* terminator_ptr, |
|
2611 |
uint parallel_gc_threads) { |
|
2612 |
TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); |
|
2613 |
||
2614 |
// Once a thread has drained it's stack, it should try to steal chunks from |
|
2615 |
// other threads. |
|
2616 |
if (parallel_gc_threads > 1) { |
|
2617 |
for (uint j = 0; j < parallel_gc_threads; j++) { |
|
2618 |
q->enqueue(new StealChunkCompactionTask(terminator_ptr)); |
|
2619 |
} |
|
2620 |
} |
|
2621 |
} |
|
2622 |
||
2623 |
void PSParallelCompact::compact() { |
|
2624 |
EventMark m("5 compact"); |
|
2625 |
// trace("5"); |
|
2626 |
TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty); |
|
2627 |
||
2628 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
2629 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
2630 |
PSOldGen* old_gen = heap->old_gen(); |
|
2631 |
old_gen->start_array()->reset(); |
|
2632 |
uint parallel_gc_threads = heap->gc_task_manager()->workers(); |
|
2633 |
TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); |
|
2634 |
ParallelTaskTerminator terminator(parallel_gc_threads, qset); |
|
2635 |
||
2636 |
GCTaskQueue* q = GCTaskQueue::create(); |
|
2637 |
enqueue_chunk_draining_tasks(q, parallel_gc_threads); |
|
2638 |
enqueue_dense_prefix_tasks(q, parallel_gc_threads); |
|
2639 |
enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads); |
|
2640 |
||
2641 |
{ |
|
2642 |
TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); |
|
2643 |
||
2644 |
WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); |
|
2645 |
q->enqueue(fin); |
|
2646 |
||
2647 |
gc_task_manager()->add_list(q); |
|
2648 |
||
2649 |
fin->wait_for(); |
|
2650 |
||
2651 |
// We have to release the barrier tasks! |
|
2652 |
WaitForBarrierGCTask::destroy(fin); |
|
2653 |
||
2654 |
#ifdef ASSERT |
|
2655 |
// Verify that all chunks have been processed before the deferred updates. |
|
2656 |
// Note that perm_space_id is skipped; this type of verification is not |
|
2657 |
// valid until the perm gen is compacted by chunks. |
|
2658 |
for (unsigned int id = old_space_id; id < last_space_id; ++id) { |
|
2659 |
verify_complete(SpaceId(id)); |
|
2660 |
} |
|
2661 |
#endif |
|
2662 |
} |
|
2663 |
||
2664 |
{ |
|
2665 |
// Update the deferred objects, if any. Any compaction manager can be used. |
|
2666 |
TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty); |
|
2667 |
ParCompactionManager* cm = ParCompactionManager::manager_array(0); |
|
2668 |
for (unsigned int id = old_space_id; id < last_space_id; ++id) { |
|
2669 |
update_deferred_objects(cm, SpaceId(id)); |
|
2670 |
} |
|
2671 |
} |
|
2672 |
} |
|
2673 |
||
2674 |
#ifdef ASSERT |
|
2675 |
void PSParallelCompact::verify_complete(SpaceId space_id) { |
|
2676 |
// All Chunks between space bottom() to new_top() should be marked as filled |
|
2677 |
// and all Chunks between new_top() and top() should be available (i.e., |
|
2678 |
// should have been emptied). |
|
2679 |
ParallelCompactData& sd = summary_data(); |
|
2680 |
SpaceInfo si = _space_info[space_id]; |
|
2681 |
HeapWord* new_top_addr = sd.chunk_align_up(si.new_top()); |
|
2682 |
HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top()); |
|
2683 |
const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom()); |
|
2684 |
const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr); |
|
2685 |
const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr); |
|
2686 |
||
2687 |
bool issued_a_warning = false; |
|
2688 |
||
2689 |
size_t cur_chunk; |
|
2690 |
for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) { |
|
2691 |
const ChunkData* const c = sd.chunk(cur_chunk); |
|
2692 |
if (!c->completed()) { |
|
2693 |
warning("chunk " SIZE_FORMAT " not filled: " |
|
2694 |
"destination_count=" SIZE_FORMAT, |
|
2695 |
cur_chunk, c->destination_count()); |
|
2696 |
issued_a_warning = true; |
|
2697 |
} |
|
2698 |
} |
|
2699 |
||
2700 |
for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) { |
|
2701 |
const ChunkData* const c = sd.chunk(cur_chunk); |
|
2702 |
if (!c->available()) { |
|
2703 |
warning("chunk " SIZE_FORMAT " not empty: " |
|
2704 |
"destination_count=" SIZE_FORMAT, |
|
2705 |
cur_chunk, c->destination_count()); |
|
2706 |
issued_a_warning = true; |
|
2707 |
} |
|
2708 |
} |
|
2709 |
||
2710 |
if (issued_a_warning) { |
|
2711 |
print_chunk_ranges(); |
|
2712 |
} |
|
2713 |
} |
|
2714 |
#endif // #ifdef ASSERT |
|
2715 |
||
2716 |
void PSParallelCompact::compact_serial(ParCompactionManager* cm) { |
|
2717 |
EventMark m("5 compact serial"); |
|
2718 |
TraceTime tm("compact serial", print_phases(), true, gclog_or_tty); |
|
2719 |
||
2720 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
2721 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
2722 |
||
2723 |
PSYoungGen* young_gen = heap->young_gen(); |
|
2724 |
PSOldGen* old_gen = heap->old_gen(); |
|
2725 |
||
2726 |
old_gen->start_array()->reset(); |
|
2727 |
old_gen->move_and_update(cm); |
|
2728 |
young_gen->move_and_update(cm); |
|
2729 |
} |
|
2730 |
||
2731 |
||
2732 |
void PSParallelCompact::follow_stack(ParCompactionManager* cm) { |
|
2733 |
while(!cm->overflow_stack()->is_empty()) { |
|
2734 |
oop obj = cm->overflow_stack()->pop(); |
|
2735 |
obj->follow_contents(cm); |
|
2736 |
} |
|
2737 |
||
2738 |
oop obj; |
|
2739 |
// obj is a reference!!! |
|
2740 |
while (cm->marking_stack()->pop_local(obj)) { |
|
2741 |
// It would be nice to assert about the type of objects we might |
|
2742 |
// pop, but they can come from anywhere, unfortunately. |
|
2743 |
obj->follow_contents(cm); |
|
2744 |
} |
|
2745 |
} |
|
2746 |
||
2747 |
void |
|
2748 |
PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) { |
|
2749 |
// All klasses on the revisit stack are marked at this point. |
|
2750 |
// Update and follow all subklass, sibling and implementor links. |
|
2751 |
for (uint i = 0; i < ParallelGCThreads+1; i++) { |
|
2752 |
ParCompactionManager* cm = ParCompactionManager::manager_array(i); |
|
2753 |
KeepAliveClosure keep_alive_closure(cm); |
|
2754 |
for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) { |
|
2755 |
cm->revisit_klass_stack()->at(i)->follow_weak_klass_links( |
|
2756 |
is_alive_closure(), |
|
2757 |
&keep_alive_closure); |
|
2758 |
} |
|
2759 |
follow_stack(cm); |
|
2760 |
} |
|
2761 |
} |
|
2762 |
||
2763 |
void |
|
2764 |
PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) { |
|
2765 |
cm->revisit_klass_stack()->push(k); |
|
2766 |
} |
|
2767 |
||
2768 |
#ifdef VALIDATE_MARK_SWEEP |
|
2769 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
2770 |
void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) { |
1 | 2771 |
if (!ValidateMarkSweep) |
2772 |
return; |
|
2773 |
||
2774 |
if (!isroot) { |
|
2775 |
if (_pointer_tracking) { |
|
2776 |
guarantee(_adjusted_pointers->contains(p), "should have seen this pointer"); |
|
2777 |
_adjusted_pointers->remove(p); |
|
2778 |
} |
|
2779 |
} else { |
|
2780 |
ptrdiff_t index = _root_refs_stack->find(p); |
|
2781 |
if (index != -1) { |
|
2782 |
int l = _root_refs_stack->length(); |
|
2783 |
if (l > 0 && l - 1 != index) { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
2784 |
void* last = _root_refs_stack->pop(); |
1 | 2785 |
assert(last != p, "should be different"); |
2786 |
_root_refs_stack->at_put(index, last); |
|
2787 |
} else { |
|
2788 |
_root_refs_stack->remove(p); |
|
2789 |
} |
|
2790 |
} |
|
2791 |
} |
|
2792 |
} |
|
2793 |
||
2794 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
2795 |
void PSParallelCompact::check_adjust_pointer(void* p) { |
1 | 2796 |
_adjusted_pointers->push(p); |
2797 |
} |
|
2798 |
||
2799 |
||
2800 |
class AdjusterTracker: public OopClosure { |
|
2801 |
public: |
|
2802 |
AdjusterTracker() {}; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
2803 |
void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
2804 |
void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); } |
1 | 2805 |
}; |
2806 |
||
2807 |
||
2808 |
void PSParallelCompact::track_interior_pointers(oop obj) { |
|
2809 |
if (ValidateMarkSweep) { |
|
2810 |
_adjusted_pointers->clear(); |
|
2811 |
_pointer_tracking = true; |
|
2812 |
||
2813 |
AdjusterTracker checker; |
|
2814 |
obj->oop_iterate(&checker); |
|
2815 |
} |
|
2816 |
} |
|
2817 |
||
2818 |
||
2819 |
void PSParallelCompact::check_interior_pointers() { |
|
2820 |
if (ValidateMarkSweep) { |
|
2821 |
_pointer_tracking = false; |
|
2822 |
guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers"); |
|
2823 |
} |
|
2824 |
} |
|
2825 |
||
2826 |
||
2827 |
void PSParallelCompact::reset_live_oop_tracking(bool at_perm) { |
|
2828 |
if (ValidateMarkSweep) { |
|
2829 |
guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops"); |
|
2830 |
_live_oops_index = at_perm ? _live_oops_index_at_perm : 0; |
|
2831 |
} |
|
2832 |
} |
|
2833 |
||
2834 |
||
2835 |
void PSParallelCompact::register_live_oop(oop p, size_t size) { |
|
2836 |
if (ValidateMarkSweep) { |
|
2837 |
_live_oops->push(p); |
|
2838 |
_live_oops_size->push(size); |
|
2839 |
_live_oops_index++; |
|
2840 |
} |
|
2841 |
} |
|
2842 |
||
2843 |
void PSParallelCompact::validate_live_oop(oop p, size_t size) { |
|
2844 |
if (ValidateMarkSweep) { |
|
2845 |
oop obj = _live_oops->at((int)_live_oops_index); |
|
2846 |
guarantee(obj == p, "should be the same object"); |
|
2847 |
guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size"); |
|
2848 |
_live_oops_index++; |
|
2849 |
} |
|
2850 |
} |
|
2851 |
||
2852 |
void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size, |
|
2853 |
HeapWord* compaction_top) { |
|
2854 |
assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top), |
|
2855 |
"should be moved to forwarded location"); |
|
2856 |
if (ValidateMarkSweep) { |
|
2857 |
PSParallelCompact::validate_live_oop(oop(q), size); |
|
2858 |
_live_oops_moved_to->push(oop(compaction_top)); |
|
2859 |
} |
|
2860 |
if (RecordMarkSweepCompaction) { |
|
2861 |
_cur_gc_live_oops->push(q); |
|
2862 |
_cur_gc_live_oops_moved_to->push(compaction_top); |
|
2863 |
_cur_gc_live_oops_size->push(size); |
|
2864 |
} |
|
2865 |
} |
|
2866 |
||
2867 |
||
2868 |
void PSParallelCompact::compaction_complete() { |
|
2869 |
if (RecordMarkSweepCompaction) { |
|
2870 |
GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops; |
|
2871 |
GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to; |
|
2872 |
GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size; |
|
2873 |
||
2874 |
_cur_gc_live_oops = _last_gc_live_oops; |
|
2875 |
_cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to; |
|
2876 |
_cur_gc_live_oops_size = _last_gc_live_oops_size; |
|
2877 |
_last_gc_live_oops = _tmp_live_oops; |
|
2878 |
_last_gc_live_oops_moved_to = _tmp_live_oops_moved_to; |
|
2879 |
_last_gc_live_oops_size = _tmp_live_oops_size; |
|
2880 |
} |
|
2881 |
} |
|
2882 |
||
2883 |
||
2884 |
void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) { |
|
2885 |
if (!RecordMarkSweepCompaction) { |
|
2886 |
tty->print_cr("Requires RecordMarkSweepCompaction to be enabled"); |
|
2887 |
return; |
|
2888 |
} |
|
2889 |
||
2890 |
if (_last_gc_live_oops == NULL) { |
|
2891 |
tty->print_cr("No compaction information gathered yet"); |
|
2892 |
return; |
|
2893 |
} |
|
2894 |
||
2895 |
for (int i = 0; i < _last_gc_live_oops->length(); i++) { |
|
2896 |
HeapWord* old_oop = _last_gc_live_oops->at(i); |
|
2897 |
size_t sz = _last_gc_live_oops_size->at(i); |
|
2898 |
if (old_oop <= q && q < (old_oop + sz)) { |
|
2899 |
HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i); |
|
2900 |
size_t offset = (q - old_oop); |
|
2901 |
tty->print_cr("Address " PTR_FORMAT, q); |
|
2902 |
tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset); |
|
2903 |
tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset); |
|
2904 |
return; |
|
2905 |
} |
|
2906 |
} |
|
2907 |
||
2908 |
tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q); |
|
2909 |
} |
|
2910 |
#endif //VALIDATE_MARK_SWEEP |
|
2911 |
||
2912 |
// Update interior oops in the ranges of chunks [beg_chunk, end_chunk). |
|
2913 |
void |
|
2914 |
PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, |
|
2915 |
SpaceId space_id, |
|
2916 |
size_t beg_chunk, |
|
2917 |
size_t end_chunk) { |
|
2918 |
ParallelCompactData& sd = summary_data(); |
|
2919 |
ParMarkBitMap* const mbm = mark_bitmap(); |
|
2920 |
||
2921 |
HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk); |
|
2922 |
HeapWord* const end_addr = sd.chunk_to_addr(end_chunk); |
|
2923 |
assert(beg_chunk <= end_chunk, "bad chunk range"); |
|
2924 |
assert(end_addr <= dense_prefix(space_id), "not in the dense prefix"); |
|
2925 |
||
2926 |
#ifdef ASSERT |
|
2927 |
// Claim the chunks to avoid triggering an assert when they are marked as |
|
2928 |
// filled. |
|
2929 |
for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) { |
|
2930 |
assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed"); |
|
2931 |
} |
|
2932 |
#endif // #ifdef ASSERT |
|
2933 |
||
2934 |
if (beg_addr != space(space_id)->bottom()) { |
|
2935 |
// Find the first live object or block of dead space that *starts* in this |
|
2936 |
// range of chunks. If a partial object crosses onto the chunk, skip it; it |
|
2937 |
// will be marked for 'deferred update' when the object head is processed. |
|
2938 |
// If dead space crosses onto the chunk, it is also skipped; it will be |
|
2939 |
// filled when the prior chunk is processed. If neither of those apply, the |
|
2940 |
// first word in the chunk is the start of a live object or dead space. |
|
2941 |
assert(beg_addr > space(space_id)->bottom(), "sanity"); |
|
2942 |
const ChunkData* const cp = sd.chunk(beg_chunk); |
|
2943 |
if (cp->partial_obj_size() != 0) { |
|
2944 |
beg_addr = sd.partial_obj_end(beg_chunk); |
|
2945 |
} else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) { |
|
2946 |
beg_addr = mbm->find_obj_beg(beg_addr, end_addr); |
|
2947 |
} |
|
2948 |
} |
|
2949 |
||
2950 |
if (beg_addr < end_addr) { |
|
2951 |
// A live object or block of dead space starts in this range of Chunks. |
|
2952 |
HeapWord* const dense_prefix_end = dense_prefix(space_id); |
|
2953 |
||
2954 |
// Create closures and iterate. |
|
2955 |
UpdateOnlyClosure update_closure(mbm, cm, space_id); |
|
2956 |
FillClosure fill_closure(cm, space_id); |
|
2957 |
ParMarkBitMap::IterationStatus status; |
|
2958 |
status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr, |
|
2959 |
dense_prefix_end); |
|
2960 |
if (status == ParMarkBitMap::incomplete) { |
|
2961 |
update_closure.do_addr(update_closure.source()); |
|
2962 |
} |
|
2963 |
} |
|
2964 |
||
2965 |
// Mark the chunks as filled. |
|
2966 |
ChunkData* const beg_cp = sd.chunk(beg_chunk); |
|
2967 |
ChunkData* const end_cp = sd.chunk(end_chunk); |
|
2968 |
for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) { |
|
2969 |
cp->set_completed(); |
|
2970 |
} |
|
2971 |
} |
|
2972 |
||
2973 |
// Return the SpaceId for the space containing addr. If addr is not in the |
|
2974 |
// heap, last_space_id is returned. In debug mode it expects the address to be |
|
2975 |
// in the heap and asserts such. |
|
2976 |
PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { |
|
2977 |
assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap"); |
|
2978 |
||
2979 |
for (unsigned int id = perm_space_id; id < last_space_id; ++id) { |
|
2980 |
if (_space_info[id].space()->contains(addr)) { |
|
2981 |
return SpaceId(id); |
|
2982 |
} |
|
2983 |
} |
|
2984 |
||
2985 |
assert(false, "no space contains the addr"); |
|
2986 |
return last_space_id; |
|
2987 |
} |
|
2988 |
||
2989 |
void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm, |
|
2990 |
SpaceId id) { |
|
2991 |
assert(id < last_space_id, "bad space id"); |
|
2992 |
||
2993 |
ParallelCompactData& sd = summary_data(); |
|
2994 |
const SpaceInfo* const space_info = _space_info + id; |
|
2995 |
ObjectStartArray* const start_array = space_info->start_array(); |
|
2996 |
||
2997 |
const MutableSpace* const space = space_info->space(); |
|
2998 |
assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set"); |
|
2999 |
HeapWord* const beg_addr = space_info->dense_prefix(); |
|
3000 |
HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top()); |
|
3001 |
||
3002 |
const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr); |
|
3003 |
const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr); |
|
3004 |
const ChunkData* cur_chunk; |
|
3005 |
for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) { |
|
3006 |
HeapWord* const addr = cur_chunk->deferred_obj_addr(); |
|
3007 |
if (addr != NULL) { |
|
3008 |
if (start_array != NULL) { |
|
3009 |
start_array->allocate_block(addr); |
|
3010 |
} |
|
3011 |
oop(addr)->update_contents(cm); |
|
3012 |
assert(oop(addr)->is_oop_or_null(), "should be an oop now"); |
|
3013 |
} |
|
3014 |
} |
|
3015 |
} |
|
3016 |
||
3017 |
// Skip over count live words starting from beg, and return the address of the |
|
3018 |
// next live word. Unless marked, the word corresponding to beg is assumed to |
|
3019 |
// be dead. Callers must either ensure beg does not correspond to the middle of |
|
3020 |
// an object, or account for those live words in some other way. Callers must |
|
3021 |
// also ensure that there are enough live words in the range [beg, end) to skip. |
|
3022 |
HeapWord* |
|
3023 |
PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count) |
|
3024 |
{ |
|
3025 |
assert(count > 0, "sanity"); |
|
3026 |
||
3027 |
ParMarkBitMap* m = mark_bitmap(); |
|
3028 |
idx_t bits_to_skip = m->words_to_bits(count); |
|
3029 |
idx_t cur_beg = m->addr_to_bit(beg); |
|
3030 |
const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end)); |
|
3031 |
||
3032 |
do { |
|
3033 |
cur_beg = m->find_obj_beg(cur_beg, search_end); |
|
3034 |
idx_t cur_end = m->find_obj_end(cur_beg, search_end); |
|
3035 |
const size_t obj_bits = cur_end - cur_beg + 1; |
|
3036 |
if (obj_bits > bits_to_skip) { |
|
3037 |
return m->bit_to_addr(cur_beg + bits_to_skip); |
|
3038 |
} |
|
3039 |
bits_to_skip -= obj_bits; |
|
3040 |
cur_beg = cur_end + 1; |
|
3041 |
} while (bits_to_skip > 0); |
|
3042 |
||
3043 |
// Skipping the desired number of words landed just past the end of an object. |
|
3044 |
// Find the start of the next object. |
|
3045 |
cur_beg = m->find_obj_beg(cur_beg, search_end); |
|
3046 |
assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip"); |
|
3047 |
return m->bit_to_addr(cur_beg); |
|
3048 |
} |
|
3049 |
||
3050 |
HeapWord* |
|
3051 |
PSParallelCompact::first_src_addr(HeapWord* const dest_addr, |
|
3052 |
size_t src_chunk_idx) |
|
3053 |
{ |
|
3054 |
ParMarkBitMap* const bitmap = mark_bitmap(); |
|
3055 |
const ParallelCompactData& sd = summary_data(); |
|
3056 |
const size_t ChunkSize = ParallelCompactData::ChunkSize; |
|
3057 |
||
3058 |
assert(sd.is_chunk_aligned(dest_addr), "not aligned"); |
|
3059 |
||
3060 |
const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx); |
|
3061 |
const size_t partial_obj_size = src_chunk_ptr->partial_obj_size(); |
|
3062 |
HeapWord* const src_chunk_destination = src_chunk_ptr->destination(); |
|
3063 |
||
3064 |
assert(dest_addr >= src_chunk_destination, "wrong src chunk"); |
|
3065 |
assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty"); |
|
3066 |
||
3067 |
HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx); |
|
3068 |
HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize; |
|
3069 |
||
3070 |
HeapWord* addr = src_chunk_beg; |
|
3071 |
if (dest_addr == src_chunk_destination) { |
|
3072 |
// Return the first live word in the source chunk. |
|
3073 |
if (partial_obj_size == 0) { |
|
3074 |
addr = bitmap->find_obj_beg(addr, src_chunk_end); |
|
3075 |
assert(addr < src_chunk_end, "no objects start in src chunk"); |
|
3076 |
} |
|
3077 |
return addr; |
|
3078 |
} |
|
3079 |
||
3080 |
// Must skip some live data. |
|
3081 |
size_t words_to_skip = dest_addr - src_chunk_destination; |
|
3082 |
assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk"); |
|
3083 |
||
3084 |
if (partial_obj_size >= words_to_skip) { |
|
3085 |
// All the live words to skip are part of the partial object. |
|
3086 |
addr += words_to_skip; |
|
3087 |
if (partial_obj_size == words_to_skip) { |
|
3088 |
// Find the first live word past the partial object. |
|
3089 |
addr = bitmap->find_obj_beg(addr, src_chunk_end); |
|
3090 |
assert(addr < src_chunk_end, "wrong src chunk"); |
|
3091 |
} |
|
3092 |
return addr; |
|
3093 |
} |
|
3094 |
||
3095 |
// Skip over the partial object (if any). |
|
3096 |
if (partial_obj_size != 0) { |
|
3097 |
words_to_skip -= partial_obj_size; |
|
3098 |
addr += partial_obj_size; |
|
3099 |
} |
|
3100 |
||
3101 |
// Skip over live words due to objects that start in the chunk. |
|
3102 |
addr = skip_live_words(addr, src_chunk_end, words_to_skip); |
|
3103 |
assert(addr < src_chunk_end, "wrong src chunk"); |
|
3104 |
return addr; |
|
3105 |
} |
|
3106 |
||
3107 |
void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, |
|
3108 |
size_t beg_chunk, |
|
3109 |
HeapWord* end_addr) |
|
3110 |
{ |
|
3111 |
ParallelCompactData& sd = summary_data(); |
|
3112 |
ChunkData* const beg = sd.chunk(beg_chunk); |
|
3113 |
HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr); |
|
3114 |
ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up); |
|
3115 |
size_t cur_idx = beg_chunk; |
|
3116 |
for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) { |
|
3117 |
assert(cur->data_size() > 0, "chunk must have live data"); |
|
3118 |
cur->decrement_destination_count(); |
|
3119 |
if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) { |
|
3120 |
cm->save_for_processing(cur_idx); |
|
3121 |
} |
|
3122 |
} |
|
3123 |
} |
|
3124 |
||
3125 |
size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure, |
|
3126 |
SpaceId& src_space_id, |
|
3127 |
HeapWord*& src_space_top, |
|
3128 |
HeapWord* end_addr) |
|
3129 |
{ |
|
3130 |
typedef ParallelCompactData::ChunkData ChunkData; |
|
3131 |
||
3132 |
ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
3133 |
const size_t chunk_size = ParallelCompactData::ChunkSize; |
|
3134 |
||
3135 |
size_t src_chunk_idx = 0; |
|
3136 |
||
3137 |
// Skip empty chunks (if any) up to the top of the space. |
|
3138 |
HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr); |
|
3139 |
ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up); |
|
3140 |
HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top); |
|
3141 |
const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up); |
|
3142 |
while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) { |
|
3143 |
++src_chunk_ptr; |
|
3144 |
} |
|
3145 |
||
3146 |
if (src_chunk_ptr < top_chunk_ptr) { |
|
3147 |
// The next source chunk is in the current space. Update src_chunk_idx and |
|
3148 |
// the source address to match src_chunk_ptr. |
|
3149 |
src_chunk_idx = sd.chunk(src_chunk_ptr); |
|
3150 |
HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx); |
|
3151 |
if (src_chunk_addr > closure.source()) { |
|
3152 |
closure.set_source(src_chunk_addr); |
|
3153 |
} |
|
3154 |
return src_chunk_idx; |
|
3155 |
} |
|
3156 |
||
3157 |
// Switch to a new source space and find the first non-empty chunk. |
|
3158 |
unsigned int space_id = src_space_id + 1; |
|
3159 |
assert(space_id < last_space_id, "not enough spaces"); |
|
3160 |
||
3161 |
HeapWord* const destination = closure.destination(); |
|
3162 |
||
3163 |
do { |
|
3164 |
MutableSpace* space = _space_info[space_id].space(); |
|
3165 |
HeapWord* const bottom = space->bottom(); |
|
3166 |
const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom); |
|
3167 |
||
3168 |
// Iterate over the spaces that do not compact into themselves. |
|
3169 |
if (bottom_cp->destination() != bottom) { |
|
3170 |
HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); |
|
3171 |
const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); |
|
3172 |
||
3173 |
for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) { |
|
3174 |
if (src_cp->live_obj_size() > 0) { |
|
3175 |
// Found it. |
|
3176 |
assert(src_cp->destination() == destination, |
|
3177 |
"first live obj in the space must match the destination"); |
|
3178 |
assert(src_cp->partial_obj_size() == 0, |
|
3179 |
"a space cannot begin with a partial obj"); |
|
3180 |
||
3181 |
src_space_id = SpaceId(space_id); |
|
3182 |
src_space_top = space->top(); |
|
3183 |
const size_t src_chunk_idx = sd.chunk(src_cp); |
|
3184 |
closure.set_source(sd.chunk_to_addr(src_chunk_idx)); |
|
3185 |
return src_chunk_idx; |
|
3186 |
} else { |
|
3187 |
assert(src_cp->data_size() == 0, "sanity"); |
|
3188 |
} |
|
3189 |
} |
|
3190 |
} |
|
3191 |
} while (++space_id < last_space_id); |
|
3192 |
||
3193 |
assert(false, "no source chunk was found"); |
|
3194 |
return 0; |
|
3195 |
} |
|
3196 |
||
3197 |
void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx) |
|
3198 |
{ |
|
3199 |
typedef ParMarkBitMap::IterationStatus IterationStatus; |
|
3200 |
const size_t ChunkSize = ParallelCompactData::ChunkSize; |
|
3201 |
ParMarkBitMap* const bitmap = mark_bitmap(); |
|
3202 |
ParallelCompactData& sd = summary_data(); |
|
3203 |
ChunkData* const chunk_ptr = sd.chunk(chunk_idx); |
|
3204 |
||
3205 |
// Get the items needed to construct the closure. |
|
3206 |
HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx); |
|
3207 |
SpaceId dest_space_id = space_id(dest_addr); |
|
3208 |
ObjectStartArray* start_array = _space_info[dest_space_id].start_array(); |
|
3209 |
HeapWord* new_top = _space_info[dest_space_id].new_top(); |
|
3210 |
assert(dest_addr < new_top, "sanity"); |
|
3211 |
const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize); |
|
3212 |
||
3213 |
// Get the source chunk and related info. |
|
3214 |
size_t src_chunk_idx = chunk_ptr->source_chunk(); |
|
3215 |
SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx)); |
|
3216 |
HeapWord* src_space_top = _space_info[src_space_id].space()->top(); |
|
3217 |
||
3218 |
MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); |
|
3219 |
closure.set_source(first_src_addr(dest_addr, src_chunk_idx)); |
|
3220 |
||
3221 |
// Adjust src_chunk_idx to prepare for decrementing destination counts (the |
|
3222 |
// destination count is not decremented when a chunk is copied to itself). |
|
3223 |
if (src_chunk_idx == chunk_idx) { |
|
3224 |
src_chunk_idx += 1; |
|
3225 |
} |
|
3226 |
||
3227 |
if (bitmap->is_unmarked(closure.source())) { |
|
3228 |
// The first source word is in the middle of an object; copy the remainder |
|
3229 |
// of the object or as much as will fit. The fact that pointer updates were |
|
3230 |
// deferred will be noted when the object header is processed. |
|
3231 |
HeapWord* const old_src_addr = closure.source(); |
|
3232 |
closure.copy_partial_obj(); |
|
3233 |
if (closure.is_full()) { |
|
3234 |
decrement_destination_counts(cm, src_chunk_idx, closure.source()); |
|
3235 |
chunk_ptr->set_deferred_obj_addr(NULL); |
|
3236 |
chunk_ptr->set_completed(); |
|
3237 |
return; |
|
3238 |
} |
|
3239 |
||
3240 |
HeapWord* const end_addr = sd.chunk_align_down(closure.source()); |
|
3241 |
if (sd.chunk_align_down(old_src_addr) != end_addr) { |
|
3242 |
// The partial object was copied from more than one source chunk. |
|
3243 |
decrement_destination_counts(cm, src_chunk_idx, end_addr); |
|
3244 |
||
3245 |
// Move to the next source chunk, possibly switching spaces as well. All |
|
3246 |
// args except end_addr may be modified. |
|
3247 |
src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, |
|
3248 |
end_addr); |
|
3249 |
} |
|
3250 |
} |
|
3251 |
||
3252 |
do { |
|
3253 |
HeapWord* const cur_addr = closure.source(); |
|
3254 |
HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1), |
|
3255 |
src_space_top); |
|
3256 |
IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr); |
|
3257 |
||
3258 |
if (status == ParMarkBitMap::incomplete) { |
|
3259 |
// The last obj that starts in the source chunk does not end in the chunk. |
|
3260 |
assert(closure.source() < end_addr, "sanity") |
|
3261 |
HeapWord* const obj_beg = closure.source(); |
|
3262 |
HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(), |
|
3263 |
src_space_top); |
|
3264 |
HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end); |
|
3265 |
if (obj_end < range_end) { |
|
3266 |
// The end was found; the entire object will fit. |
|
3267 |
status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end)); |
|
3268 |
assert(status != ParMarkBitMap::would_overflow, "sanity"); |
|
3269 |
} else { |
|
3270 |
// The end was not found; the object will not fit. |
|
3271 |
assert(range_end < src_space_top, "obj cannot cross space boundary"); |
|
3272 |
status = ParMarkBitMap::would_overflow; |
|
3273 |
} |
|
3274 |
} |
|
3275 |
||
3276 |
if (status == ParMarkBitMap::would_overflow) { |
|
3277 |
// The last object did not fit. Note that interior oop updates were |
|
3278 |
// deferred, then copy enough of the object to fill the chunk. |
|
3279 |
chunk_ptr->set_deferred_obj_addr(closure.destination()); |
|
3280 |
status = closure.copy_until_full(); // copies from closure.source() |
|
3281 |
||
3282 |
decrement_destination_counts(cm, src_chunk_idx, closure.source()); |
|
3283 |
chunk_ptr->set_completed(); |
|
3284 |
return; |
|
3285 |
} |
|
3286 |
||
3287 |
if (status == ParMarkBitMap::full) { |
|
3288 |
decrement_destination_counts(cm, src_chunk_idx, closure.source()); |
|
3289 |
chunk_ptr->set_deferred_obj_addr(NULL); |
|
3290 |
chunk_ptr->set_completed(); |
|
3291 |
return; |
|
3292 |
} |
|
3293 |
||
3294 |
decrement_destination_counts(cm, src_chunk_idx, end_addr); |
|
3295 |
||
3296 |
// Move to the next source chunk, possibly switching spaces as well. All |
|
3297 |
// args except end_addr may be modified. |
|
3298 |
src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, |
|
3299 |
end_addr); |
|
3300 |
} while (true); |
|
3301 |
} |
|
3302 |
||
3303 |
void |
|
3304 |
PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) { |
|
3305 |
const MutableSpace* sp = space(space_id); |
|
3306 |
if (sp->is_empty()) { |
|
3307 |
return; |
|
3308 |
} |
|
3309 |
||
3310 |
ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
3311 |
ParMarkBitMap* const bitmap = mark_bitmap(); |
|
3312 |
HeapWord* const dp_addr = dense_prefix(space_id); |
|
3313 |
HeapWord* beg_addr = sp->bottom(); |
|
3314 |
HeapWord* end_addr = sp->top(); |
|
3315 |
||
3316 |
#ifdef ASSERT |
|
3317 |
assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix"); |
|
3318 |
if (cm->should_verify_only()) { |
|
3319 |
VerifyUpdateClosure verify_update(cm, sp); |
|
3320 |
bitmap->iterate(&verify_update, beg_addr, end_addr); |
|
3321 |
return; |
|
3322 |
} |
|
3323 |
||
3324 |
if (cm->should_reset_only()) { |
|
3325 |
ResetObjectsClosure reset_objects(cm); |
|
3326 |
bitmap->iterate(&reset_objects, beg_addr, end_addr); |
|
3327 |
return; |
|
3328 |
} |
|
3329 |
#endif |
|
3330 |
||
3331 |
const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr); |
|
3332 |
const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr); |
|
3333 |
if (beg_chunk < dp_chunk) { |
|
3334 |
update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk); |
|
3335 |
} |
|
3336 |
||
3337 |
// The destination of the first live object that starts in the chunk is one |
|
3338 |
// past the end of the partial object entering the chunk (if any). |
|
3339 |
HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk); |
|
3340 |
HeapWord* const new_top = _space_info[space_id].new_top(); |
|
3341 |
assert(new_top >= dest_addr, "bad new_top value"); |
|
3342 |
const size_t words = pointer_delta(new_top, dest_addr); |
|
3343 |
||
3344 |
if (words > 0) { |
|
3345 |
ObjectStartArray* start_array = _space_info[space_id].start_array(); |
|
3346 |
MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); |
|
3347 |
||
3348 |
ParMarkBitMap::IterationStatus status; |
|
3349 |
status = bitmap->iterate(&closure, dest_addr, end_addr); |
|
3350 |
assert(status == ParMarkBitMap::full, "iteration not complete"); |
|
3351 |
assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr, |
|
3352 |
"live objects skipped because closure is full"); |
|
3353 |
} |
|
3354 |
} |
|
3355 |
||
3356 |
jlong PSParallelCompact::millis_since_last_gc() { |
|
3357 |
jlong ret_val = os::javaTimeMillis() - _time_of_last_gc; |
|
3358 |
// XXX See note in genCollectedHeap::millis_since_last_gc(). |
|
3359 |
if (ret_val < 0) { |
|
3360 |
NOT_PRODUCT(warning("time warp: %d", ret_val);) |
|
3361 |
return 0; |
|
3362 |
} |
|
3363 |
return ret_val; |
|
3364 |
} |
|
3365 |
||
3366 |
void PSParallelCompact::reset_millis_since_last_gc() { |
|
3367 |
_time_of_last_gc = os::javaTimeMillis(); |
|
3368 |
} |
|
3369 |
||
3370 |
ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full() |
|
3371 |
{ |
|
3372 |
if (source() != destination()) { |
|
3373 |
assert(source() > destination(), "must copy to the left"); |
|
3374 |
Copy::aligned_conjoint_words(source(), destination(), words_remaining()); |
|
3375 |
} |
|
3376 |
update_state(words_remaining()); |
|
3377 |
assert(is_full(), "sanity"); |
|
3378 |
return ParMarkBitMap::full; |
|
3379 |
} |
|
3380 |
||
3381 |
void MoveAndUpdateClosure::copy_partial_obj() |
|
3382 |
{ |
|
3383 |
size_t words = words_remaining(); |
|
3384 |
||
3385 |
HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end()); |
|
3386 |
HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end); |
|
3387 |
if (end_addr < range_end) { |
|
3388 |
words = bitmap()->obj_size(source(), end_addr); |
|
3389 |
} |
|
3390 |
||
3391 |
// This test is necessary; if omitted, the pointer updates to a partial object |
|
3392 |
// that crosses the dense prefix boundary could be overwritten. |
|
3393 |
if (source() != destination()) { |
|
3394 |
assert(source() > destination(), "must copy to the left"); |
|
3395 |
Copy::aligned_conjoint_words(source(), destination(), words); |
|
3396 |
} |
|
3397 |
update_state(words); |
|
3398 |
} |
|
3399 |
||
3400 |
ParMarkBitMapClosure::IterationStatus |
|
3401 |
MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { |
|
3402 |
assert(destination() != NULL, "sanity"); |
|
3403 |
assert(bitmap()->obj_size(addr) == words, "bad size"); |
|
3404 |
||
3405 |
_source = addr; |
|
3406 |
assert(PSParallelCompact::summary_data().calc_new_pointer(source()) == |
|
3407 |
destination(), "wrong destination"); |
|
3408 |
||
3409 |
if (words > words_remaining()) { |
|
3410 |
return ParMarkBitMap::would_overflow; |
|
3411 |
} |
|
3412 |
||
3413 |
// The start_array must be updated even if the object is not moving. |
|
3414 |
if (_start_array != NULL) { |
|
3415 |
_start_array->allocate_block(destination()); |
|
3416 |
} |
|
3417 |
||
3418 |
if (destination() != source()) { |
|
3419 |
assert(destination() < source(), "must copy to the left"); |
|
3420 |
Copy::aligned_conjoint_words(source(), destination(), words); |
|
3421 |
} |
|
3422 |
||
3423 |
oop moved_oop = (oop) destination(); |
|
3424 |
moved_oop->update_contents(compaction_manager()); |
|
3425 |
assert(moved_oop->is_oop_or_null(), "Object should be whole at this point"); |
|
3426 |
||
3427 |
update_state(words); |
|
3428 |
assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); |
|
3429 |
return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; |
|
3430 |
} |
|
3431 |
||
3432 |
UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm, |
|
3433 |
ParCompactionManager* cm, |
|
3434 |
PSParallelCompact::SpaceId space_id) : |
|
3435 |
ParMarkBitMapClosure(mbm, cm), |
|
3436 |
_space_id(space_id), |
|
3437 |
_start_array(PSParallelCompact::start_array(space_id)) |
|
3438 |
{ |
|
3439 |
} |
|
3440 |
||
3441 |
// Updates the references in the object to their new values. |
|
3442 |
ParMarkBitMapClosure::IterationStatus |
|
3443 |
UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) { |
|
3444 |
do_addr(addr); |
|
3445 |
return ParMarkBitMap::incomplete; |
|
3446 |
} |
|
3447 |
||
3448 |
BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm, |
|
3449 |
ParCompactionManager* cm, |
|
3450 |
size_t chunk_index) : |
|
3451 |
ParMarkBitMapClosure(mbm, cm), |
|
3452 |
_live_data_left(0), |
|
3453 |
_cur_block(0) { |
|
3454 |
_chunk_start = |
|
3455 |
PSParallelCompact::summary_data().chunk_to_addr(chunk_index); |
|
3456 |
_chunk_end = |
|
3457 |
PSParallelCompact::summary_data().chunk_to_addr(chunk_index) + |
|
3458 |
ParallelCompactData::ChunkSize; |
|
3459 |
_chunk_index = chunk_index; |
|
3460 |
_cur_block = |
|
3461 |
PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start); |
|
3462 |
} |
|
3463 |
||
3464 |
bool BitBlockUpdateClosure::chunk_contains_cur_block() { |
|
3465 |
return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block); |
|
3466 |
} |
|
3467 |
||
3468 |
void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) { |
|
3469 |
DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);) |
|
3470 |
ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
3471 |
_chunk_index = chunk_index; |
|
3472 |
_live_data_left = 0; |
|
3473 |
_chunk_start = sd.chunk_to_addr(chunk_index); |
|
3474 |
_chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize; |
|
3475 |
||
3476 |
// The first block in this chunk |
|
3477 |
size_t first_block = sd.addr_to_block_idx(_chunk_start); |
|
3478 |
size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size(); |
|
3479 |
||
3480 |
// Set the offset to 0. By definition it should have that value |
|
3481 |
// but it may have been written while processing an earlier chunk. |
|
3482 |
if (partial_live_size == 0) { |
|
3483 |
// No live object extends onto the chunk. The first bit |
|
3484 |
// in the bit map for the first chunk must be a start bit. |
|
3485 |
// Although there may not be any marked bits, it is safe |
|
3486 |
// to set it as a start bit. |
|
3487 |
sd.block(first_block)->set_start_bit_offset(0); |
|
3488 |
sd.block(first_block)->set_first_is_start_bit(true); |
|
3489 |
} else if (sd.partial_obj_ends_in_block(first_block)) { |
|
3490 |
sd.block(first_block)->set_end_bit_offset(0); |
|
3491 |
sd.block(first_block)->set_first_is_start_bit(false); |
|
3492 |
} else { |
|
3493 |
// The partial object extends beyond the first block. |
|
3494 |
// There is no object starting in the first block |
|
3495 |
// so the offset and bit parity are not needed. |
|
3496 |
// Set the the bit parity to start bit so assertions |
|
3497 |
// work when not bit is found. |
|
3498 |
sd.block(first_block)->set_end_bit_offset(0); |
|
3499 |
sd.block(first_block)->set_first_is_start_bit(false); |
|
3500 |
} |
|
3501 |
_cur_block = first_block; |
|
3502 |
#ifdef ASSERT |
|
3503 |
if (sd.block(first_block)->first_is_start_bit()) { |
|
3504 |
assert(!sd.partial_obj_ends_in_block(first_block), |
|
3505 |
"Partial object cannot end in first block"); |
|
3506 |
} |
|
3507 |
||
3508 |
if (PrintGCDetails && Verbose) { |
|
3509 |
if (partial_live_size == 1) { |
|
3510 |
gclog_or_tty->print_cr("first_block " PTR_FORMAT |
|
3511 |
" _offset " PTR_FORMAT |
|
3512 |
" _first_is_start_bit %d", |
|
3513 |
first_block, |
|
3514 |
sd.block(first_block)->raw_offset(), |
|
3515 |
sd.block(first_block)->first_is_start_bit()); |
|
3516 |
} |
|
3517 |
} |
|
3518 |
#endif |
|
3519 |
DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);) |
|
3520 |
} |
|
3521 |
||
3522 |
// This method is called when a object has been found (both beginning |
|
3523 |
// and end of the object) in the range of iteration. This method is |
|
3524 |
// calculating the words of live data to the left of a block. That live |
|
3525 |
// data includes any object starting to the left of the block (i.e., |
|
3526 |
// the live-data-to-the-left of block AAA will include the full size |
|
3527 |
// of any object entering AAA). |
|
3528 |
||
3529 |
ParMarkBitMapClosure::IterationStatus |
|
3530 |
BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) { |
|
3531 |
// add the size to the block data. |
|
3532 |
HeapWord* obj = addr; |
|
3533 |
ParallelCompactData& sd = PSParallelCompact::summary_data(); |
|
3534 |
||
3535 |
assert(bitmap()->obj_size(obj) == words, "bad size"); |
|
3536 |
assert(_chunk_start <= obj, "object is not in chunk"); |
|
3537 |
assert(obj + words <= _chunk_end, "object is not in chunk"); |
|
3538 |
||
3539 |
// Update the live data to the left |
|
3540 |
size_t prev_live_data_left = _live_data_left; |
|
3541 |
_live_data_left = _live_data_left + words; |
|
3542 |
||
3543 |
// Is this object in the current block. |
|
3544 |
size_t block_of_obj = sd.addr_to_block_idx(obj); |
|
3545 |
size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1); |
|
3546 |
HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last); |
|
3547 |
if (_cur_block < block_of_obj) { |
|
3548 |
||
3549 |
// |
|
3550 |
// No object crossed the block boundary and this object was found |
|
3551 |
// on the other side of the block boundary. Update the offset for |
|
3552 |
// the new block with the data size that does not include this object. |
|
3553 |
// |
|
3554 |
// The first bit in block_of_obj is a start bit except in the |
|
3555 |
// case where the partial object for the chunk extends into |
|
3556 |
// this block. |
|
3557 |
if (sd.partial_obj_ends_in_block(block_of_obj)) { |
|
3558 |
sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left); |
|
3559 |
} else { |
|
3560 |
sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left); |
|
3561 |
} |
|
3562 |
||
3563 |
// Does this object pass beyond the its block? |
|
3564 |
if (block_of_obj < block_of_obj_last) { |
|
3565 |
// Object crosses block boundary. Two blocks need to be udpated: |
|
3566 |
// the current block where the object started |
|
3567 |
// the block where the object ends |
|
3568 |
// |
|
3569 |
// The offset for blocks with no objects starting in them |
|
3570 |
// (e.g., blocks between _cur_block and block_of_obj_last) |
|
3571 |
// should not be needed. |
|
3572 |
// Note that block_of_obj_last may be in another chunk. If so, |
|
3573 |
// it should be overwritten later. This is a problem (writting |
|
3574 |
// into a block in a later chunk) for parallel execution. |
|
3575 |
assert(obj < block_of_obj_last_addr, |
|
3576 |
"Object should start in previous block"); |
|
3577 |
||
3578 |
// obj is crossing into block_of_obj_last so the first bit |
|
3579 |
// is and end bit. |
|
3580 |
sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); |
|
3581 |
||
3582 |
_cur_block = block_of_obj_last; |
|
3583 |
} else { |
|
3584 |
// _first_is_start_bit has already been set correctly |
|
3585 |
// in the if-then-else above so don't reset it here. |
|
3586 |
_cur_block = block_of_obj; |
|
3587 |
} |
|
3588 |
} else { |
|
3589 |
// The current block only changes if the object extends beyound |
|
3590 |
// the block it starts in. |
|
3591 |
// |
|
3592 |
// The object starts in the current block. |
|
3593 |
// Does this object pass beyond the end of it? |
|
3594 |
if (block_of_obj < block_of_obj_last) { |
|
3595 |
// Object crosses block boundary. |
|
3596 |
// See note above on possible blocks between block_of_obj and |
|
3597 |
// block_of_obj_last |
|
3598 |
assert(obj < block_of_obj_last_addr, |
|
3599 |
"Object should start in previous block"); |
|
3600 |
||
3601 |
sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); |
|
3602 |
||
3603 |
_cur_block = block_of_obj_last; |
|
3604 |
} |
|
3605 |
} |
|
3606 |
||
3607 |
// Return incomplete if there are more blocks to be done. |
|
3608 |
if (chunk_contains_cur_block()) { |
|
3609 |
return ParMarkBitMap::incomplete; |
|
3610 |
} |
|
3611 |
return ParMarkBitMap::complete; |
|
3612 |
} |
|
3613 |
||
3614 |
// Verify the new location using the forwarding pointer |
|
3615 |
// from MarkSweep::mark_sweep_phase2(). Set the mark_word |
|
3616 |
// to the initial value. |
|
3617 |
ParMarkBitMapClosure::IterationStatus |
|
3618 |
PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) { |
|
3619 |
// The second arg (words) is not used. |
|
3620 |
oop obj = (oop) addr; |
|
3621 |
HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer(); |
|
3622 |
HeapWord* new_pointer = summary_data().calc_new_pointer(obj); |
|
3623 |
if (forwarding_ptr == NULL) { |
|
3624 |
// The object is dead or not moving. |
|
3625 |
assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj), |
|
3626 |
"Object liveness is wrong."); |
|
3627 |
return ParMarkBitMap::incomplete; |
|
3628 |
} |
|
3629 |
assert(UseParallelOldGCDensePrefix || |
|
3630 |
(HeapMaximumCompactionInterval > 1) || |
|
3631 |
(MarkSweepAlwaysCompactCount > 1) || |
|
3632 |
(forwarding_ptr == new_pointer), |
|
3633 |
"Calculation of new location is incorrect"); |
|
3634 |
return ParMarkBitMap::incomplete; |
|
3635 |
} |
|
3636 |
||
3637 |
// Reset objects modified for debug checking. |
|
3638 |
ParMarkBitMapClosure::IterationStatus |
|
3639 |
PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) { |
|
3640 |
// The second arg (words) is not used. |
|
3641 |
oop obj = (oop) addr; |
|
3642 |
obj->init_mark(); |
|
3643 |
return ParMarkBitMap::incomplete; |
|
3644 |
} |
|
3645 |
||
3646 |
// Prepare for compaction. This method is executed once |
|
3647 |
// (i.e., by a single thread) before compaction. |
|
3648 |
// Save the updated location of the intArrayKlassObj for |
|
3649 |
// filling holes in the dense prefix. |
|
3650 |
void PSParallelCompact::compact_prologue() { |
|
3651 |
_updated_int_array_klass_obj = (klassOop) |
|
3652 |
summary_data().calc_new_pointer(Universe::intArrayKlassObj()); |
|
3653 |
} |
|
3654 |
||
3655 |
// The initial implementation of this method created a field |
|
3656 |
// _next_compaction_space_id in SpaceInfo and initialized |
|
3657 |
// that field in SpaceInfo::initialize_space_info(). That |
|
3658 |
// required that _next_compaction_space_id be declared a |
|
3659 |
// SpaceId in SpaceInfo and that would have required that |
|
3660 |
// either SpaceId be declared in a separate class or that |
|
3661 |
// it be declared in SpaceInfo. It didn't seem consistent |
|
3662 |
// to declare it in SpaceInfo (didn't really fit logically). |
|
3663 |
// Alternatively, defining a separate class to define SpaceId |
|
3664 |
// seem excessive. This implementation is simple and localizes |
|
3665 |
// the knowledge. |
|
3666 |
||
3667 |
PSParallelCompact::SpaceId |
|
3668 |
PSParallelCompact::next_compaction_space_id(SpaceId id) { |
|
3669 |
assert(id < last_space_id, "id out of range"); |
|
3670 |
switch (id) { |
|
3671 |
case perm_space_id : |
|
3672 |
return last_space_id; |
|
3673 |
case old_space_id : |
|
3674 |
return eden_space_id; |
|
3675 |
case eden_space_id : |
|
3676 |
return from_space_id; |
|
3677 |
case from_space_id : |
|
3678 |
return to_space_id; |
|
3679 |
case to_space_id : |
|
3680 |
return last_space_id; |
|
3681 |
default: |
|
3682 |
assert(false, "Bad space id"); |
|
3683 |
return last_space_id; |
|
3684 |
} |
|
3685 |
} |
|
3686 |
||
3687 |
// Here temporarily for debugging |
|
3688 |
#ifdef ASSERT |
|
3689 |
size_t ParallelCompactData::block_idx(BlockData* block) { |
|
3690 |
size_t index = pointer_delta(block, |
|
3691 |
PSParallelCompact::summary_data()._block_data, sizeof(BlockData)); |
|
3692 |
return index; |
|
3693 |
} |
|
3694 |
#endif |