author | coleenp |
Sun, 13 Apr 2008 17:43:42 -0400 | |
changeset 360 | 21d113ecbf6a |
parent 185 | cda2a1eb4be5 |
child 613 | 2aa2b913106c |
child 1374 | 4c24294029a9 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2 |
* Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
# include "incls/_precompiled.incl" |
|
26 |
# include "incls/_compactibleFreeListSpace.cpp.incl" |
|
27 |
||
28 |
///////////////////////////////////////////////////////////////////////// |
|
29 |
//// CompactibleFreeListSpace |
|
30 |
///////////////////////////////////////////////////////////////////////// |
|
31 |
||
32 |
// highest ranked free list lock rank |
|
33 |
int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; |
|
34 |
||
35 |
// Constructor |
|
36 |
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, |
|
37 |
MemRegion mr, bool use_adaptive_freelists, |
|
38 |
FreeBlockDictionary::DictionaryChoice dictionaryChoice) : |
|
39 |
_dictionaryChoice(dictionaryChoice), |
|
40 |
_adaptive_freelists(use_adaptive_freelists), |
|
41 |
_bt(bs, mr), |
|
42 |
// free list locks are in the range of values taken by _lockRank |
|
43 |
// This range currently is [_leaf+2, _leaf+3] |
|
44 |
// Note: this requires that CFLspace c'tors |
|
45 |
// are called serially in the order in which the locks are |
|
46 |
// are acquired in the program text. This is true today. |
|
47 |
_freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true), |
|
48 |
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 |
|
49 |
"CompactibleFreeListSpace._dict_par_lock", true), |
|
50 |
_rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * |
|
51 |
CMSRescanMultiple), |
|
52 |
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * |
|
53 |
CMSConcMarkMultiple), |
|
54 |
_collector(NULL) |
|
55 |
{ |
|
56 |
_bt.set_space(this); |
|
57 |
initialize(mr, true); |
|
58 |
// We have all of "mr", all of which we place in the dictionary |
|
59 |
// as one big chunk. We'll need to decide here which of several |
|
60 |
// possible alternative dictionary implementations to use. For |
|
61 |
// now the choice is easy, since we have only one working |
|
62 |
// implementation, namely, the simple binary tree (splaying |
|
63 |
// temporarily disabled). |
|
64 |
switch (dictionaryChoice) { |
|
65 |
case FreeBlockDictionary::dictionaryBinaryTree: |
|
66 |
_dictionary = new BinaryTreeDictionary(mr); |
|
67 |
break; |
|
68 |
case FreeBlockDictionary::dictionarySplayTree: |
|
69 |
case FreeBlockDictionary::dictionarySkipList: |
|
70 |
default: |
|
71 |
warning("dictionaryChoice: selected option not understood; using" |
|
72 |
" default BinaryTreeDictionary implementation instead."); |
|
73 |
_dictionary = new BinaryTreeDictionary(mr); |
|
74 |
break; |
|
75 |
} |
|
76 |
splitBirth(mr.word_size()); |
|
77 |
assert(_dictionary != NULL, "CMS dictionary initialization"); |
|
78 |
// The indexed free lists are initially all empty and are lazily |
|
79 |
// filled in on demand. Initialize the array elements to NULL. |
|
80 |
initializeIndexedFreeListArray(); |
|
81 |
||
82 |
// Not using adaptive free lists assumes that allocation is first |
|
83 |
// from the linAB's. Also a cms perm gen which can be compacted |
|
84 |
// has to have the klass's klassKlass allocated at a lower |
|
85 |
// address in the heap than the klass so that the klassKlass is |
|
86 |
// moved to its new location before the klass is moved. |
|
87 |
// Set the _refillSize for the linear allocation blocks |
|
88 |
if (!use_adaptive_freelists) { |
|
89 |
FreeChunk* fc = _dictionary->getChunk(mr.word_size()); |
|
90 |
// The small linAB initially has all the space and will allocate |
|
91 |
// a chunk of any size. |
|
92 |
HeapWord* addr = (HeapWord*) fc; |
|
93 |
_smallLinearAllocBlock.set(addr, fc->size() , |
|
94 |
1024*SmallForLinearAlloc, fc->size()); |
|
95 |
// Note that _unallocated_block is not updated here. |
|
96 |
// Allocations from the linear allocation block should |
|
97 |
// update it. |
|
98 |
} else { |
|
99 |
_smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, |
|
100 |
SmallForLinearAlloc); |
|
101 |
} |
|
102 |
// CMSIndexedFreeListReplenish should be at least 1 |
|
103 |
CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); |
|
104 |
_promoInfo.setSpace(this); |
|
105 |
if (UseCMSBestFit) { |
|
106 |
_fitStrategy = FreeBlockBestFitFirst; |
|
107 |
} else { |
|
108 |
_fitStrategy = FreeBlockStrategyNone; |
|
109 |
} |
|
110 |
checkFreeListConsistency(); |
|
111 |
||
112 |
// Initialize locks for parallel case. |
|
113 |
if (ParallelGCThreads > 0) { |
|
114 |
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
115 |
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 |
|
116 |
"a freelist par lock", |
|
117 |
true); |
|
118 |
if (_indexedFreeListParLocks[i] == NULL) |
|
119 |
vm_exit_during_initialization("Could not allocate a par lock"); |
|
120 |
DEBUG_ONLY( |
|
121 |
_indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); |
|
122 |
) |
|
123 |
} |
|
124 |
_dictionary->set_par_lock(&_parDictionaryAllocLock); |
|
125 |
} |
|
126 |
} |
|
127 |
||
128 |
// Like CompactibleSpace forward() but always calls cross_threshold() to |
|
129 |
// update the block offset table. Removed initialize_threshold call because |
|
130 |
// CFLS does not use a block offset array for contiguous spaces. |
|
131 |
HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, |
|
132 |
CompactPoint* cp, HeapWord* compact_top) { |
|
133 |
// q is alive |
|
134 |
// First check if we should switch compaction space |
|
135 |
assert(this == cp->space, "'this' should be current compaction space."); |
|
136 |
size_t compaction_max_size = pointer_delta(end(), compact_top); |
|
137 |
assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), |
|
138 |
"virtual adjustObjectSize_v() method is not correct"); |
|
139 |
size_t adjusted_size = adjustObjectSize(size); |
|
140 |
assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, |
|
141 |
"no small fragments allowed"); |
|
142 |
assert(minimum_free_block_size() == MinChunkSize, |
|
143 |
"for de-virtualized reference below"); |
|
144 |
// Can't leave a nonzero size, residual fragment smaller than MinChunkSize |
|
145 |
if (adjusted_size + MinChunkSize > compaction_max_size && |
|
146 |
adjusted_size != compaction_max_size) { |
|
147 |
do { |
|
148 |
// switch to next compaction space |
|
149 |
cp->space->set_compaction_top(compact_top); |
|
150 |
cp->space = cp->space->next_compaction_space(); |
|
151 |
if (cp->space == NULL) { |
|
152 |
cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); |
|
153 |
assert(cp->gen != NULL, "compaction must succeed"); |
|
154 |
cp->space = cp->gen->first_compaction_space(); |
|
155 |
assert(cp->space != NULL, "generation must have a first compaction space"); |
|
156 |
} |
|
157 |
compact_top = cp->space->bottom(); |
|
158 |
cp->space->set_compaction_top(compact_top); |
|
159 |
// The correct adjusted_size may not be the same as that for this method |
|
160 |
// (i.e., cp->space may no longer be "this" so adjust the size again. |
|
161 |
// Use the virtual method which is not used above to save the virtual |
|
162 |
// dispatch. |
|
163 |
adjusted_size = cp->space->adjust_object_size_v(size); |
|
164 |
compaction_max_size = pointer_delta(cp->space->end(), compact_top); |
|
165 |
assert(cp->space->minimum_free_block_size() == 0, "just checking"); |
|
166 |
} while (adjusted_size > compaction_max_size); |
|
167 |
} |
|
168 |
||
169 |
// store the forwarding pointer into the mark word |
|
170 |
if ((HeapWord*)q != compact_top) { |
|
171 |
q->forward_to(oop(compact_top)); |
|
172 |
assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); |
|
173 |
} else { |
|
174 |
// if the object isn't moving we can just set the mark to the default |
|
175 |
// mark and handle it specially later on. |
|
176 |
q->init_mark(); |
|
177 |
assert(q->forwardee() == NULL, "should be forwarded to NULL"); |
|
178 |
} |
|
179 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
180 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); |
1 | 181 |
compact_top += adjusted_size; |
182 |
||
183 |
// we need to update the offset table so that the beginnings of objects can be |
|
184 |
// found during scavenge. Note that we are updating the offset table based on |
|
185 |
// where the object will be once the compaction phase finishes. |
|
186 |
||
187 |
// Always call cross_threshold(). A contiguous space can only call it when |
|
188 |
// the compaction_top exceeds the current threshold but not for an |
|
189 |
// non-contiguous space. |
|
190 |
cp->threshold = |
|
191 |
cp->space->cross_threshold(compact_top - adjusted_size, compact_top); |
|
192 |
return compact_top; |
|
193 |
} |
|
194 |
||
195 |
// A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt |
|
196 |
// and use of single_block instead of alloc_block. The name here is not really |
|
197 |
// appropriate - maybe a more general name could be invented for both the |
|
198 |
// contiguous and noncontiguous spaces. |
|
199 |
||
200 |
HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { |
|
201 |
_bt.single_block(start, the_end); |
|
202 |
return end(); |
|
203 |
} |
|
204 |
||
205 |
// Initialize them to NULL. |
|
206 |
void CompactibleFreeListSpace::initializeIndexedFreeListArray() { |
|
207 |
for (size_t i = 0; i < IndexSetSize; i++) { |
|
208 |
// Note that on platforms where objects are double word aligned, |
|
209 |
// the odd array elements are not used. It is convenient, however, |
|
210 |
// to map directly from the object size to the array element. |
|
211 |
_indexedFreeList[i].reset(IndexSetSize); |
|
212 |
_indexedFreeList[i].set_size(i); |
|
213 |
assert(_indexedFreeList[i].count() == 0, "reset check failed"); |
|
214 |
assert(_indexedFreeList[i].head() == NULL, "reset check failed"); |
|
215 |
assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); |
|
216 |
assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); |
|
217 |
} |
|
218 |
} |
|
219 |
||
220 |
void CompactibleFreeListSpace::resetIndexedFreeListArray() { |
|
221 |
for (int i = 1; i < IndexSetSize; i++) { |
|
222 |
assert(_indexedFreeList[i].size() == (size_t) i, |
|
223 |
"Indexed free list sizes are incorrect"); |
|
224 |
_indexedFreeList[i].reset(IndexSetSize); |
|
225 |
assert(_indexedFreeList[i].count() == 0, "reset check failed"); |
|
226 |
assert(_indexedFreeList[i].head() == NULL, "reset check failed"); |
|
227 |
assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); |
|
228 |
assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); |
|
229 |
} |
|
230 |
} |
|
231 |
||
232 |
void CompactibleFreeListSpace::reset(MemRegion mr) { |
|
233 |
resetIndexedFreeListArray(); |
|
234 |
dictionary()->reset(); |
|
235 |
if (BlockOffsetArrayUseUnallocatedBlock) { |
|
236 |
assert(end() == mr.end(), "We are compacting to the bottom of CMS gen"); |
|
237 |
// Everything's allocated until proven otherwise. |
|
238 |
_bt.set_unallocated_block(end()); |
|
239 |
} |
|
240 |
if (!mr.is_empty()) { |
|
241 |
assert(mr.word_size() >= MinChunkSize, "Chunk size is too small"); |
|
242 |
_bt.single_block(mr.start(), mr.word_size()); |
|
243 |
FreeChunk* fc = (FreeChunk*) mr.start(); |
|
244 |
fc->setSize(mr.word_size()); |
|
245 |
if (mr.word_size() >= IndexSetSize ) { |
|
246 |
returnChunkToDictionary(fc); |
|
247 |
} else { |
|
248 |
_bt.verify_not_unallocated((HeapWord*)fc, fc->size()); |
|
249 |
_indexedFreeList[mr.word_size()].returnChunkAtHead(fc); |
|
250 |
} |
|
251 |
} |
|
252 |
_promoInfo.reset(); |
|
253 |
_smallLinearAllocBlock._ptr = NULL; |
|
254 |
_smallLinearAllocBlock._word_size = 0; |
|
255 |
} |
|
256 |
||
257 |
void CompactibleFreeListSpace::reset_after_compaction() { |
|
258 |
// Reset the space to the new reality - one free chunk. |
|
259 |
MemRegion mr(compaction_top(), end()); |
|
260 |
reset(mr); |
|
261 |
// Now refill the linear allocation block(s) if possible. |
|
262 |
if (_adaptive_freelists) { |
|
263 |
refillLinearAllocBlocksIfNeeded(); |
|
264 |
} else { |
|
265 |
// Place as much of mr in the linAB as we can get, |
|
266 |
// provided it was big enough to go into the dictionary. |
|
267 |
FreeChunk* fc = dictionary()->findLargestDict(); |
|
268 |
if (fc != NULL) { |
|
269 |
assert(fc->size() == mr.word_size(), |
|
270 |
"Why was the chunk broken up?"); |
|
271 |
removeChunkFromDictionary(fc); |
|
272 |
HeapWord* addr = (HeapWord*) fc; |
|
273 |
_smallLinearAllocBlock.set(addr, fc->size() , |
|
274 |
1024*SmallForLinearAlloc, fc->size()); |
|
275 |
// Note that _unallocated_block is not updated here. |
|
276 |
} |
|
277 |
} |
|
278 |
} |
|
279 |
||
280 |
// Walks the entire dictionary, returning a coterminal |
|
281 |
// chunk, if it exists. Use with caution since it involves |
|
282 |
// a potentially complete walk of a potentially large tree. |
|
283 |
FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { |
|
284 |
||
285 |
assert_lock_strong(&_freelistLock); |
|
286 |
||
287 |
return dictionary()->find_chunk_ends_at(end()); |
|
288 |
} |
|
289 |
||
290 |
||
291 |
#ifndef PRODUCT |
|
292 |
void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { |
|
293 |
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
294 |
_indexedFreeList[i].allocation_stats()->set_returnedBytes(0); |
|
295 |
} |
|
296 |
} |
|
297 |
||
298 |
size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { |
|
299 |
size_t sum = 0; |
|
300 |
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
301 |
sum += _indexedFreeList[i].allocation_stats()->returnedBytes(); |
|
302 |
} |
|
303 |
return sum; |
|
304 |
} |
|
305 |
||
306 |
size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { |
|
307 |
size_t count = 0; |
|
308 |
for (int i = MinChunkSize; i < IndexSetSize; i++) { |
|
309 |
debug_only( |
|
310 |
ssize_t total_list_count = 0; |
|
311 |
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
|
312 |
fc = fc->next()) { |
|
313 |
total_list_count++; |
|
314 |
} |
|
315 |
assert(total_list_count == _indexedFreeList[i].count(), |
|
316 |
"Count in list is incorrect"); |
|
317 |
) |
|
318 |
count += _indexedFreeList[i].count(); |
|
319 |
} |
|
320 |
return count; |
|
321 |
} |
|
322 |
||
323 |
size_t CompactibleFreeListSpace::totalCount() { |
|
324 |
size_t num = totalCountInIndexedFreeLists(); |
|
325 |
num += dictionary()->totalCount(); |
|
326 |
if (_smallLinearAllocBlock._word_size != 0) { |
|
327 |
num++; |
|
328 |
} |
|
329 |
return num; |
|
330 |
} |
|
331 |
#endif |
|
332 |
||
333 |
bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { |
|
334 |
FreeChunk* fc = (FreeChunk*) p; |
|
335 |
return fc->isFree(); |
|
336 |
} |
|
337 |
||
338 |
size_t CompactibleFreeListSpace::used() const { |
|
339 |
return capacity() - free(); |
|
340 |
} |
|
341 |
||
342 |
size_t CompactibleFreeListSpace::free() const { |
|
343 |
// "MT-safe, but not MT-precise"(TM), if you will: i.e. |
|
344 |
// if you do this while the structures are in flux you |
|
345 |
// may get an approximate answer only; for instance |
|
346 |
// because there is concurrent allocation either |
|
347 |
// directly by mutators or for promotion during a GC. |
|
348 |
// It's "MT-safe", however, in the sense that you are guaranteed |
|
349 |
// not to crash and burn, for instance, because of walking |
|
350 |
// pointers that could disappear as you were walking them. |
|
351 |
// The approximation is because the various components |
|
352 |
// that are read below are not read atomically (and |
|
353 |
// further the computation of totalSizeInIndexedFreeLists() |
|
354 |
// is itself a non-atomic computation. The normal use of |
|
355 |
// this is during a resize operation at the end of GC |
|
356 |
// and at that time you are guaranteed to get the |
|
357 |
// correct actual value. However, for instance, this is |
|
358 |
// also read completely asynchronously by the "perf-sampler" |
|
359 |
// that supports jvmstat, and you are apt to see the values |
|
360 |
// flicker in such cases. |
|
361 |
assert(_dictionary != NULL, "No _dictionary?"); |
|
362 |
return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) + |
|
363 |
totalSizeInIndexedFreeLists() + |
|
364 |
_smallLinearAllocBlock._word_size) * HeapWordSize; |
|
365 |
} |
|
366 |
||
367 |
size_t CompactibleFreeListSpace::max_alloc_in_words() const { |
|
368 |
assert(_dictionary != NULL, "No _dictionary?"); |
|
369 |
assert_locked(); |
|
370 |
size_t res = _dictionary->maxChunkSize(); |
|
371 |
res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, |
|
372 |
(size_t) SmallForLinearAlloc - 1)); |
|
373 |
// XXX the following could potentially be pretty slow; |
|
374 |
// should one, pesimally for the rare cases when res |
|
375 |
// caclulated above is less than IndexSetSize, |
|
376 |
// just return res calculated above? My reasoning was that |
|
377 |
// those cases will be so rare that the extra time spent doesn't |
|
378 |
// really matter.... |
|
379 |
// Note: do not change the loop test i >= res + IndexSetStride |
|
380 |
// to i > res below, because i is unsigned and res may be zero. |
|
381 |
for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; |
|
382 |
i -= IndexSetStride) { |
|
383 |
if (_indexedFreeList[i].head() != NULL) { |
|
384 |
assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); |
|
385 |
return i; |
|
386 |
} |
|
387 |
} |
|
388 |
return res; |
|
389 |
} |
|
390 |
||
391 |
void CompactibleFreeListSpace::reportFreeListStatistics() const { |
|
392 |
assert_lock_strong(&_freelistLock); |
|
393 |
assert(PrintFLSStatistics != 0, "Reporting error"); |
|
394 |
_dictionary->reportStatistics(); |
|
395 |
if (PrintFLSStatistics > 1) { |
|
396 |
reportIndexedFreeListStatistics(); |
|
397 |
size_t totalSize = totalSizeInIndexedFreeLists() + |
|
398 |
_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); |
|
399 |
gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag()); |
|
400 |
} |
|
401 |
} |
|
402 |
||
403 |
void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { |
|
404 |
assert_lock_strong(&_freelistLock); |
|
405 |
gclog_or_tty->print("Statistics for IndexedFreeLists:\n" |
|
406 |
"--------------------------------\n"); |
|
407 |
size_t totalSize = totalSizeInIndexedFreeLists(); |
|
408 |
size_t freeBlocks = numFreeBlocksInIndexedFreeLists(); |
|
409 |
gclog_or_tty->print("Total Free Space: %d\n", totalSize); |
|
410 |
gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists()); |
|
411 |
gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks); |
|
412 |
if (freeBlocks != 0) { |
|
413 |
gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks); |
|
414 |
} |
|
415 |
} |
|
416 |
||
417 |
size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { |
|
418 |
size_t res = 0; |
|
419 |
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
420 |
debug_only( |
|
421 |
ssize_t recount = 0; |
|
422 |
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
|
423 |
fc = fc->next()) { |
|
424 |
recount += 1; |
|
425 |
} |
|
426 |
assert(recount == _indexedFreeList[i].count(), |
|
427 |
"Incorrect count in list"); |
|
428 |
) |
|
429 |
res += _indexedFreeList[i].count(); |
|
430 |
} |
|
431 |
return res; |
|
432 |
} |
|
433 |
||
434 |
size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { |
|
435 |
for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { |
|
436 |
if (_indexedFreeList[i].head() != NULL) { |
|
437 |
assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); |
|
438 |
return (size_t)i; |
|
439 |
} |
|
440 |
} |
|
441 |
return 0; |
|
442 |
} |
|
443 |
||
444 |
void CompactibleFreeListSpace::set_end(HeapWord* value) { |
|
445 |
HeapWord* prevEnd = end(); |
|
446 |
assert(prevEnd != value, "unnecessary set_end call"); |
|
447 |
assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block"); |
|
448 |
_end = value; |
|
449 |
if (prevEnd != NULL) { |
|
450 |
// Resize the underlying block offset table. |
|
451 |
_bt.resize(pointer_delta(value, bottom())); |
|
452 |
if (value <= prevEnd) { |
|
453 |
assert(value >= unallocated_block(), "New end is below unallocated block"); |
|
454 |
} else { |
|
455 |
// Now, take this new chunk and add it to the free blocks. |
|
456 |
// Note that the BOT has not yet been updated for this block. |
|
457 |
size_t newFcSize = pointer_delta(value, prevEnd); |
|
458 |
// XXX This is REALLY UGLY and should be fixed up. XXX |
|
459 |
if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) { |
|
460 |
// Mark the boundary of the new block in BOT |
|
461 |
_bt.mark_block(prevEnd, value); |
|
462 |
// put it all in the linAB |
|
463 |
if (ParallelGCThreads == 0) { |
|
464 |
_smallLinearAllocBlock._ptr = prevEnd; |
|
465 |
_smallLinearAllocBlock._word_size = newFcSize; |
|
466 |
repairLinearAllocBlock(&_smallLinearAllocBlock); |
|
467 |
} else { // ParallelGCThreads > 0 |
|
468 |
MutexLockerEx x(parDictionaryAllocLock(), |
|
469 |
Mutex::_no_safepoint_check_flag); |
|
470 |
_smallLinearAllocBlock._ptr = prevEnd; |
|
471 |
_smallLinearAllocBlock._word_size = newFcSize; |
|
472 |
repairLinearAllocBlock(&_smallLinearAllocBlock); |
|
473 |
} |
|
474 |
// Births of chunks put into a LinAB are not recorded. Births |
|
475 |
// of chunks as they are allocated out of a LinAB are. |
|
476 |
} else { |
|
477 |
// Add the block to the free lists, if possible coalescing it |
|
478 |
// with the last free block, and update the BOT and census data. |
|
479 |
addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); |
|
480 |
} |
|
481 |
} |
|
482 |
} |
|
483 |
} |
|
484 |
||
485 |
class FreeListSpace_DCTOC : public Filtering_DCTOC { |
|
486 |
CompactibleFreeListSpace* _cfls; |
|
487 |
CMSCollector* _collector; |
|
488 |
protected: |
|
489 |
// Override. |
|
490 |
#define walk_mem_region_with_cl_DECL(ClosureType) \ |
|
491 |
virtual void walk_mem_region_with_cl(MemRegion mr, \ |
|
492 |
HeapWord* bottom, HeapWord* top, \ |
|
493 |
ClosureType* cl); \ |
|
494 |
void walk_mem_region_with_cl_par(MemRegion mr, \ |
|
495 |
HeapWord* bottom, HeapWord* top, \ |
|
496 |
ClosureType* cl); \ |
|
497 |
void walk_mem_region_with_cl_nopar(MemRegion mr, \ |
|
498 |
HeapWord* bottom, HeapWord* top, \ |
|
499 |
ClosureType* cl) |
|
500 |
walk_mem_region_with_cl_DECL(OopClosure); |
|
501 |
walk_mem_region_with_cl_DECL(FilteringClosure); |
|
502 |
||
503 |
public: |
|
504 |
FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, |
|
505 |
CMSCollector* collector, |
|
506 |
OopClosure* cl, |
|
507 |
CardTableModRefBS::PrecisionStyle precision, |
|
508 |
HeapWord* boundary) : |
|
509 |
Filtering_DCTOC(sp, cl, precision, boundary), |
|
510 |
_cfls(sp), _collector(collector) {} |
|
511 |
}; |
|
512 |
||
513 |
// We de-virtualize the block-related calls below, since we know that our |
|
514 |
// space is a CompactibleFreeListSpace. |
|
515 |
#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ |
|
516 |
void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ |
|
517 |
HeapWord* bottom, \ |
|
518 |
HeapWord* top, \ |
|
519 |
ClosureType* cl) { \ |
|
520 |
if (SharedHeap::heap()->n_par_threads() > 0) { \ |
|
521 |
walk_mem_region_with_cl_par(mr, bottom, top, cl); \ |
|
522 |
} else { \ |
|
523 |
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ |
|
524 |
} \ |
|
525 |
} \ |
|
526 |
void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ |
|
527 |
HeapWord* bottom, \ |
|
528 |
HeapWord* top, \ |
|
529 |
ClosureType* cl) { \ |
|
530 |
/* Skip parts that are before "mr", in case "block_start" sent us \ |
|
531 |
back too far. */ \ |
|
532 |
HeapWord* mr_start = mr.start(); \ |
|
533 |
size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ |
|
534 |
HeapWord* next = bottom + bot_size; \ |
|
535 |
while (next < mr_start) { \ |
|
536 |
bottom = next; \ |
|
537 |
bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ |
|
538 |
next = bottom + bot_size; \ |
|
539 |
} \ |
|
540 |
\ |
|
541 |
while (bottom < top) { \ |
|
542 |
if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ |
|
543 |
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ |
|
544 |
oop(bottom)) && \ |
|
545 |
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ |
|
546 |
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ |
|
547 |
bottom += _cfls->adjustObjectSize(word_sz); \ |
|
548 |
} else { \ |
|
549 |
bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ |
|
550 |
} \ |
|
551 |
} \ |
|
552 |
} \ |
|
553 |
void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ |
|
554 |
HeapWord* bottom, \ |
|
555 |
HeapWord* top, \ |
|
556 |
ClosureType* cl) { \ |
|
557 |
/* Skip parts that are before "mr", in case "block_start" sent us \ |
|
558 |
back too far. */ \ |
|
559 |
HeapWord* mr_start = mr.start(); \ |
|
560 |
size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ |
|
561 |
HeapWord* next = bottom + bot_size; \ |
|
562 |
while (next < mr_start) { \ |
|
563 |
bottom = next; \ |
|
564 |
bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ |
|
565 |
next = bottom + bot_size; \ |
|
566 |
} \ |
|
567 |
\ |
|
568 |
while (bottom < top) { \ |
|
569 |
if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ |
|
570 |
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ |
|
571 |
oop(bottom)) && \ |
|
572 |
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ |
|
573 |
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ |
|
574 |
bottom += _cfls->adjustObjectSize(word_sz); \ |
|
575 |
} else { \ |
|
576 |
bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ |
|
577 |
} \ |
|
578 |
} \ |
|
579 |
} |
|
580 |
||
581 |
// (There are only two of these, rather than N, because the split is due |
|
582 |
// only to the introduction of the FilteringClosure, a local part of the |
|
583 |
// impl of this abstraction.) |
|
584 |
FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) |
|
585 |
FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) |
|
586 |
||
587 |
DirtyCardToOopClosure* |
|
588 |
CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, |
|
589 |
CardTableModRefBS::PrecisionStyle precision, |
|
590 |
HeapWord* boundary) { |
|
591 |
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); |
|
592 |
} |
|
593 |
||
594 |
||
595 |
// Note on locking for the space iteration functions: |
|
596 |
// since the collector's iteration activities are concurrent with |
|
597 |
// allocation activities by mutators, absent a suitable mutual exclusion |
|
598 |
// mechanism the iterators may go awry. For instace a block being iterated |
|
599 |
// may suddenly be allocated or divided up and part of it allocated and |
|
600 |
// so on. |
|
601 |
||
602 |
// Apply the given closure to each block in the space. |
|
603 |
void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { |
|
604 |
assert_lock_strong(freelistLock()); |
|
605 |
HeapWord *cur, *limit; |
|
606 |
for (cur = bottom(), limit = end(); cur < limit; |
|
607 |
cur += cl->do_blk_careful(cur)); |
|
608 |
} |
|
609 |
||
610 |
// Apply the given closure to each block in the space. |
|
611 |
void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { |
|
612 |
assert_lock_strong(freelistLock()); |
|
613 |
HeapWord *cur, *limit; |
|
614 |
for (cur = bottom(), limit = end(); cur < limit; |
|
615 |
cur += cl->do_blk(cur)); |
|
616 |
} |
|
617 |
||
618 |
// Apply the given closure to each oop in the space. |
|
619 |
void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { |
|
620 |
assert_lock_strong(freelistLock()); |
|
621 |
HeapWord *cur, *limit; |
|
622 |
size_t curSize; |
|
623 |
for (cur = bottom(), limit = end(); cur < limit; |
|
624 |
cur += curSize) { |
|
625 |
curSize = block_size(cur); |
|
626 |
if (block_is_obj(cur)) { |
|
627 |
oop(cur)->oop_iterate(cl); |
|
628 |
} |
|
629 |
} |
|
630 |
} |
|
631 |
||
632 |
// Apply the given closure to each oop in the space \intersect memory region. |
|
633 |
void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { |
|
634 |
assert_lock_strong(freelistLock()); |
|
635 |
if (is_empty()) { |
|
636 |
return; |
|
637 |
} |
|
638 |
MemRegion cur = MemRegion(bottom(), end()); |
|
639 |
mr = mr.intersection(cur); |
|
640 |
if (mr.is_empty()) { |
|
641 |
return; |
|
642 |
} |
|
643 |
if (mr.equals(cur)) { |
|
644 |
oop_iterate(cl); |
|
645 |
return; |
|
646 |
} |
|
647 |
assert(mr.end() <= end(), "just took an intersection above"); |
|
648 |
HeapWord* obj_addr = block_start(mr.start()); |
|
649 |
HeapWord* t = mr.end(); |
|
650 |
||
651 |
SpaceMemRegionOopsIterClosure smr_blk(cl, mr); |
|
652 |
if (block_is_obj(obj_addr)) { |
|
653 |
// Handle first object specially. |
|
654 |
oop obj = oop(obj_addr); |
|
655 |
obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); |
|
656 |
} else { |
|
657 |
FreeChunk* fc = (FreeChunk*)obj_addr; |
|
658 |
obj_addr += fc->size(); |
|
659 |
} |
|
660 |
while (obj_addr < t) { |
|
661 |
HeapWord* obj = obj_addr; |
|
662 |
obj_addr += block_size(obj_addr); |
|
663 |
// If "obj_addr" is not greater than top, then the |
|
664 |
// entire object "obj" is within the region. |
|
665 |
if (obj_addr <= t) { |
|
666 |
if (block_is_obj(obj)) { |
|
667 |
oop(obj)->oop_iterate(cl); |
|
668 |
} |
|
669 |
} else { |
|
670 |
// "obj" extends beyond end of region |
|
671 |
if (block_is_obj(obj)) { |
|
672 |
oop(obj)->oop_iterate(&smr_blk); |
|
673 |
} |
|
674 |
break; |
|
675 |
} |
|
676 |
} |
|
677 |
} |
|
678 |
||
679 |
// NOTE: In the following methods, in order to safely be able to |
|
680 |
// apply the closure to an object, we need to be sure that the |
|
681 |
// object has been initialized. We are guaranteed that an object |
|
682 |
// is initialized if we are holding the Heap_lock with the |
|
683 |
// world stopped. |
|
684 |
void CompactibleFreeListSpace::verify_objects_initialized() const { |
|
685 |
if (is_init_completed()) { |
|
686 |
assert_locked_or_safepoint(Heap_lock); |
|
687 |
if (Universe::is_fully_initialized()) { |
|
688 |
guarantee(SafepointSynchronize::is_at_safepoint(), |
|
689 |
"Required for objects to be initialized"); |
|
690 |
} |
|
691 |
} // else make a concession at vm start-up |
|
692 |
} |
|
693 |
||
694 |
// Apply the given closure to each object in the space |
|
695 |
void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { |
|
696 |
assert_lock_strong(freelistLock()); |
|
697 |
NOT_PRODUCT(verify_objects_initialized()); |
|
698 |
HeapWord *cur, *limit; |
|
699 |
size_t curSize; |
|
700 |
for (cur = bottom(), limit = end(); cur < limit; |
|
701 |
cur += curSize) { |
|
702 |
curSize = block_size(cur); |
|
703 |
if (block_is_obj(cur)) { |
|
704 |
blk->do_object(oop(cur)); |
|
705 |
} |
|
706 |
} |
|
707 |
} |
|
708 |
||
709 |
void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
|
710 |
UpwardsObjectClosure* cl) { |
|
711 |
assert_locked(); |
|
712 |
NOT_PRODUCT(verify_objects_initialized()); |
|
713 |
Space::object_iterate_mem(mr, cl); |
|
714 |
} |
|
715 |
||
716 |
// Callers of this iterator beware: The closure application should |
|
717 |
// be robust in the face of uninitialized objects and should (always) |
|
718 |
// return a correct size so that the next addr + size below gives us a |
|
719 |
// valid block boundary. [See for instance, |
|
720 |
// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() |
|
721 |
// in ConcurrentMarkSweepGeneration.cpp.] |
|
722 |
HeapWord* |
|
723 |
CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { |
|
724 |
assert_lock_strong(freelistLock()); |
|
725 |
HeapWord *addr, *last; |
|
726 |
size_t size; |
|
727 |
for (addr = bottom(), last = end(); |
|
728 |
addr < last; addr += size) { |
|
729 |
FreeChunk* fc = (FreeChunk*)addr; |
|
730 |
if (fc->isFree()) { |
|
731 |
// Since we hold the free list lock, which protects direct |
|
732 |
// allocation in this generation by mutators, a free object |
|
733 |
// will remain free throughout this iteration code. |
|
734 |
size = fc->size(); |
|
735 |
} else { |
|
736 |
// Note that the object need not necessarily be initialized, |
|
737 |
// because (for instance) the free list lock does NOT protect |
|
738 |
// object initialization. The closure application below must |
|
739 |
// therefore be correct in the face of uninitialized objects. |
|
740 |
size = cl->do_object_careful(oop(addr)); |
|
741 |
if (size == 0) { |
|
742 |
// An unparsable object found. Signal early termination. |
|
743 |
return addr; |
|
744 |
} |
|
745 |
} |
|
746 |
} |
|
747 |
return NULL; |
|
748 |
} |
|
749 |
||
750 |
// Callers of this iterator beware: The closure application should |
|
751 |
// be robust in the face of uninitialized objects and should (always) |
|
752 |
// return a correct size so that the next addr + size below gives us a |
|
753 |
// valid block boundary. [See for instance, |
|
754 |
// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() |
|
755 |
// in ConcurrentMarkSweepGeneration.cpp.] |
|
756 |
HeapWord* |
|
757 |
CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, |
|
758 |
ObjectClosureCareful* cl) { |
|
759 |
assert_lock_strong(freelistLock()); |
|
760 |
// Can't use used_region() below because it may not necessarily |
|
761 |
// be the same as [bottom(),end()); although we could |
|
762 |
// use [used_region().start(),round_to(used_region().end(),CardSize)), |
|
763 |
// that appears too cumbersome, so we just do the simpler check |
|
764 |
// in the assertion below. |
|
765 |
assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), |
|
766 |
"mr should be non-empty and within used space"); |
|
767 |
HeapWord *addr, *end; |
|
768 |
size_t size; |
|
769 |
for (addr = block_start_careful(mr.start()), end = mr.end(); |
|
770 |
addr < end; addr += size) { |
|
771 |
FreeChunk* fc = (FreeChunk*)addr; |
|
772 |
if (fc->isFree()) { |
|
773 |
// Since we hold the free list lock, which protects direct |
|
774 |
// allocation in this generation by mutators, a free object |
|
775 |
// will remain free throughout this iteration code. |
|
776 |
size = fc->size(); |
|
777 |
} else { |
|
778 |
// Note that the object need not necessarily be initialized, |
|
779 |
// because (for instance) the free list lock does NOT protect |
|
780 |
// object initialization. The closure application below must |
|
781 |
// therefore be correct in the face of uninitialized objects. |
|
782 |
size = cl->do_object_careful_m(oop(addr), mr); |
|
783 |
if (size == 0) { |
|
784 |
// An unparsable object found. Signal early termination. |
|
785 |
return addr; |
|
786 |
} |
|
787 |
} |
|
788 |
} |
|
789 |
return NULL; |
|
790 |
} |
|
791 |
||
792 |
||
793 |
HeapWord* CompactibleFreeListSpace::block_start(const void* p) const { |
|
794 |
NOT_PRODUCT(verify_objects_initialized()); |
|
795 |
return _bt.block_start(p); |
|
796 |
} |
|
797 |
||
798 |
HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { |
|
799 |
return _bt.block_start_careful(p); |
|
800 |
} |
|
801 |
||
802 |
size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { |
|
803 |
NOT_PRODUCT(verify_objects_initialized()); |
|
804 |
assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
|
805 |
// This must be volatile, or else there is a danger that the compiler |
|
806 |
// will compile the code below into a sometimes-infinite loop, by keeping |
|
807 |
// the value read the first time in a register. |
|
808 |
oop o = (oop)p; |
|
809 |
volatile oop* second_word_addr = o->klass_addr(); |
|
810 |
while (true) { |
|
811 |
klassOop k = (klassOop)(*second_word_addr); |
|
812 |
// We must do this until we get a consistent view of the object. |
|
813 |
if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { |
|
814 |
FreeChunk* fc = (FreeChunk*)p; |
|
815 |
volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); |
|
816 |
size_t res = (*sz_addr); |
|
817 |
klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm. |
|
818 |
if (k == k2) { |
|
819 |
assert(res != 0, "Block size should not be 0"); |
|
820 |
return res; |
|
821 |
} |
|
822 |
} else if (k != NULL) { |
|
823 |
assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); |
|
824 |
assert(o->is_parsable(), "Should be parsable"); |
|
825 |
assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); |
|
826 |
size_t res = o->size_given_klass(k->klass_part()); |
|
827 |
res = adjustObjectSize(res); |
|
828 |
assert(res != 0, "Block size should not be 0"); |
|
829 |
return res; |
|
830 |
} |
|
831 |
} |
|
832 |
} |
|
833 |
||
834 |
// A variant of the above that uses the Printezis bits for |
|
835 |
// unparsable but allocated objects. This avoids any possible |
|
836 |
// stalls waiting for mutators to initialize objects, and is |
|
837 |
// thus potentially faster than the variant above. However, |
|
838 |
// this variant may return a zero size for a block that is |
|
839 |
// under mutation and for which a consistent size cannot be |
|
840 |
// inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). |
|
841 |
size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, |
|
842 |
const CMSCollector* c) |
|
843 |
const { |
|
844 |
assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
|
845 |
// This must be volatile, or else there is a danger that the compiler |
|
846 |
// will compile the code below into a sometimes-infinite loop, by keeping |
|
847 |
// the value read the first time in a register. |
|
848 |
oop o = (oop)p; |
|
849 |
volatile oop* second_word_addr = o->klass_addr(); |
|
850 |
DEBUG_ONLY(uint loops = 0;) |
|
851 |
while (true) { |
|
852 |
klassOop k = (klassOop)(*second_word_addr); |
|
853 |
// We must do this until we get a consistent view of the object. |
|
854 |
if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { |
|
855 |
FreeChunk* fc = (FreeChunk*)p; |
|
856 |
volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); |
|
857 |
size_t res = (*sz_addr); |
|
858 |
klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm. |
|
859 |
if (k == k2) { |
|
860 |
assert(res != 0, "Block size should not be 0"); |
|
861 |
assert(loops == 0, "Should be 0"); |
|
862 |
return res; |
|
863 |
} |
|
864 |
} else if (k != NULL && o->is_parsable()) { |
|
865 |
assert(k->is_oop(), "Should really be klass oop."); |
|
866 |
assert(o->is_oop(), "Should be an oop"); |
|
867 |
size_t res = o->size_given_klass(k->klass_part()); |
|
868 |
res = adjustObjectSize(res); |
|
869 |
assert(res != 0, "Block size should not be 0"); |
|
870 |
return res; |
|
871 |
} else { |
|
872 |
return c->block_size_if_printezis_bits(p); |
|
873 |
} |
|
874 |
assert(loops == 0, "Can loop at most once"); |
|
875 |
DEBUG_ONLY(loops++;) |
|
876 |
} |
|
877 |
} |
|
878 |
||
879 |
size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { |
|
880 |
NOT_PRODUCT(verify_objects_initialized()); |
|
881 |
assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
|
882 |
FreeChunk* fc = (FreeChunk*)p; |
|
883 |
if (fc->isFree()) { |
|
884 |
return fc->size(); |
|
885 |
} else { |
|
886 |
// Ignore mark word because this may be a recently promoted |
|
887 |
// object whose mark word is used to chain together grey |
|
888 |
// objects (the last one would have a null value). |
|
889 |
assert(oop(p)->is_oop(true), "Should be an oop"); |
|
890 |
return adjustObjectSize(oop(p)->size()); |
|
891 |
} |
|
892 |
} |
|
893 |
||
894 |
// This implementation assumes that the property of "being an object" is |
|
895 |
// stable. But being a free chunk may not be (because of parallel |
|
896 |
// promotion.) |
|
897 |
bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { |
|
898 |
FreeChunk* fc = (FreeChunk*)p; |
|
899 |
assert(is_in_reserved(p), "Should be in space"); |
|
900 |
// When doing a mark-sweep-compact of the CMS generation, this |
|
901 |
// assertion may fail because prepare_for_compaction() uses |
|
902 |
// space that is garbage to maintain information on ranges of |
|
903 |
// live objects so that these live ranges can be moved as a whole. |
|
904 |
// Comment out this assertion until that problem can be solved |
|
905 |
// (i.e., that the block start calculation may look at objects |
|
906 |
// at address below "p" in finding the object that contains "p" |
|
907 |
// and those objects (if garbage) may have been modified to hold |
|
908 |
// live range information. |
|
909 |
// assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary"); |
|
910 |
klassOop k = oop(p)->klass(); |
|
911 |
intptr_t ki = (intptr_t)k; |
|
912 |
if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false; |
|
913 |
if (k != NULL) { |
|
914 |
// Ignore mark word because it may have been used to |
|
915 |
// chain together promoted objects (the last one |
|
916 |
// would have a null value). |
|
917 |
assert(oop(p)->is_oop(true), "Should be an oop"); |
|
918 |
return true; |
|
919 |
} else { |
|
920 |
return false; // Was not an object at the start of collection. |
|
921 |
} |
|
922 |
} |
|
923 |
||
924 |
// Check if the object is alive. This fact is checked either by consulting |
|
925 |
// the main marking bitmap in the sweeping phase or, if it's a permanent |
|
926 |
// generation and we're not in the sweeping phase, by checking the |
|
927 |
// perm_gen_verify_bit_map where we store the "deadness" information if |
|
928 |
// we did not sweep the perm gen in the most recent previous GC cycle. |
|
929 |
bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { |
|
930 |
assert (block_is_obj(p), "The address should point to an object"); |
|
931 |
||
932 |
// If we're sweeping, we use object liveness information from the main bit map |
|
933 |
// for both perm gen and old gen. |
|
934 |
// We don't need to lock the bitmap (live_map or dead_map below), because |
|
935 |
// EITHER we are in the middle of the sweeping phase, and the |
|
936 |
// main marking bit map (live_map below) is locked, |
|
937 |
// OR we're in other phases and perm_gen_verify_bit_map (dead_map below) |
|
938 |
// is stable, because it's mutated only in the sweeping phase. |
|
939 |
if (_collector->abstract_state() == CMSCollector::Sweeping) { |
|
940 |
CMSBitMap* live_map = _collector->markBitMap(); |
|
941 |
return live_map->isMarked((HeapWord*) p); |
|
942 |
} else { |
|
943 |
// If we're not currently sweeping and we haven't swept the perm gen in |
|
944 |
// the previous concurrent cycle then we may have dead but unswept objects |
|
945 |
// in the perm gen. In this case, we use the "deadness" information |
|
946 |
// that we had saved in perm_gen_verify_bit_map at the last sweep. |
|
947 |
if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { |
|
948 |
if (_collector->verifying()) { |
|
949 |
CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); |
|
950 |
// Object is marked in the dead_map bitmap at the previous sweep |
|
951 |
// when we know that it's dead; if the bitmap is not allocated then |
|
952 |
// the object is alive. |
|
953 |
return (dead_map->sizeInBits() == 0) // bit_map has been allocated |
|
954 |
|| !dead_map->par_isMarked((HeapWord*) p); |
|
955 |
} else { |
|
956 |
return false; // We can't say for sure if it's live, so we say that it's dead. |
|
957 |
} |
|
958 |
} |
|
959 |
} |
|
960 |
return true; |
|
961 |
} |
|
962 |
||
963 |
bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { |
|
964 |
FreeChunk* fc = (FreeChunk*)p; |
|
965 |
assert(is_in_reserved(p), "Should be in space"); |
|
966 |
assert(_bt.block_start(p) == p, "Should be a block boundary"); |
|
967 |
if (!fc->isFree()) { |
|
968 |
// Ignore mark word because it may have been used to |
|
969 |
// chain together promoted objects (the last one |
|
970 |
// would have a null value). |
|
971 |
assert(oop(p)->is_oop(true), "Should be an oop"); |
|
972 |
return true; |
|
973 |
} |
|
974 |
return false; |
|
975 |
} |
|
976 |
||
977 |
// "MT-safe but not guaranteed MT-precise" (TM); you may get an |
|
978 |
// approximate answer if you don't hold the freelistlock when you call this. |
|
979 |
size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { |
|
980 |
size_t size = 0; |
|
981 |
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
982 |
debug_only( |
|
983 |
// We may be calling here without the lock in which case we |
|
984 |
// won't do this modest sanity check. |
|
985 |
if (freelistLock()->owned_by_self()) { |
|
986 |
size_t total_list_size = 0; |
|
987 |
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
|
988 |
fc = fc->next()) { |
|
989 |
total_list_size += i; |
|
990 |
} |
|
991 |
assert(total_list_size == i * _indexedFreeList[i].count(), |
|
992 |
"Count in list is incorrect"); |
|
993 |
} |
|
994 |
) |
|
995 |
size += i * _indexedFreeList[i].count(); |
|
996 |
} |
|
997 |
return size; |
|
998 |
} |
|
999 |
||
1000 |
HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { |
|
1001 |
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); |
|
1002 |
return allocate(size); |
|
1003 |
} |
|
1004 |
||
1005 |
HeapWord* |
|
1006 |
CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { |
|
1007 |
return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); |
|
1008 |
} |
|
1009 |
||
1010 |
HeapWord* CompactibleFreeListSpace::allocate(size_t size) { |
|
1011 |
assert_lock_strong(freelistLock()); |
|
1012 |
HeapWord* res = NULL; |
|
1013 |
assert(size == adjustObjectSize(size), |
|
1014 |
"use adjustObjectSize() before calling into allocate()"); |
|
1015 |
||
1016 |
if (_adaptive_freelists) { |
|
1017 |
res = allocate_adaptive_freelists(size); |
|
1018 |
} else { // non-adaptive free lists |
|
1019 |
res = allocate_non_adaptive_freelists(size); |
|
1020 |
} |
|
1021 |
||
1022 |
if (res != NULL) { |
|
1023 |
// check that res does lie in this space! |
|
1024 |
assert(is_in_reserved(res), "Not in this space!"); |
|
1025 |
assert(is_aligned((void*)res), "alignment check"); |
|
1026 |
||
1027 |
FreeChunk* fc = (FreeChunk*)res; |
|
1028 |
fc->markNotFree(); |
|
1029 |
assert(!fc->isFree(), "shouldn't be marked free"); |
|
1030 |
assert(oop(fc)->klass() == NULL, "should look uninitialized"); |
|
1031 |
// Verify that the block offset table shows this to |
|
1032 |
// be a single block, but not one which is unallocated. |
|
1033 |
_bt.verify_single_block(res, size); |
|
1034 |
_bt.verify_not_unallocated(res, size); |
|
1035 |
// mangle a just allocated object with a distinct pattern. |
|
1036 |
debug_only(fc->mangleAllocated(size)); |
|
1037 |
} |
|
1038 |
||
1039 |
return res; |
|
1040 |
} |
|
1041 |
||
1042 |
HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) { |
|
1043 |
HeapWord* res = NULL; |
|
1044 |
// try and use linear allocation for smaller blocks |
|
1045 |
if (size < _smallLinearAllocBlock._allocation_size_limit) { |
|
1046 |
// if successful, the following also adjusts block offset table |
|
1047 |
res = getChunkFromSmallLinearAllocBlock(size); |
|
1048 |
} |
|
1049 |
// Else triage to indexed lists for smaller sizes |
|
1050 |
if (res == NULL) { |
|
1051 |
if (size < SmallForDictionary) { |
|
1052 |
res = (HeapWord*) getChunkFromIndexedFreeList(size); |
|
1053 |
} else { |
|
1054 |
// else get it from the big dictionary; if even this doesn't |
|
1055 |
// work we are out of luck. |
|
1056 |
res = (HeapWord*)getChunkFromDictionaryExact(size); |
|
1057 |
} |
|
1058 |
} |
|
1059 |
||
1060 |
return res; |
|
1061 |
} |
|
1062 |
||
1063 |
HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { |
|
1064 |
assert_lock_strong(freelistLock()); |
|
1065 |
HeapWord* res = NULL; |
|
1066 |
assert(size == adjustObjectSize(size), |
|
1067 |
"use adjustObjectSize() before calling into allocate()"); |
|
1068 |
||
1069 |
// Strategy |
|
1070 |
// if small |
|
1071 |
// exact size from small object indexed list if small |
|
1072 |
// small or large linear allocation block (linAB) as appropriate |
|
1073 |
// take from lists of greater sized chunks |
|
1074 |
// else |
|
1075 |
// dictionary |
|
1076 |
// small or large linear allocation block if it has the space |
|
1077 |
// Try allocating exact size from indexTable first |
|
1078 |
if (size < IndexSetSize) { |
|
1079 |
res = (HeapWord*) getChunkFromIndexedFreeList(size); |
|
1080 |
if(res != NULL) { |
|
1081 |
assert(res != (HeapWord*)_indexedFreeList[size].head(), |
|
1082 |
"Not removed from free list"); |
|
1083 |
// no block offset table adjustment is necessary on blocks in |
|
1084 |
// the indexed lists. |
|
1085 |
||
1086 |
// Try allocating from the small LinAB |
|
1087 |
} else if (size < _smallLinearAllocBlock._allocation_size_limit && |
|
1088 |
(res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { |
|
1089 |
// if successful, the above also adjusts block offset table |
|
1090 |
// Note that this call will refill the LinAB to |
|
1091 |
// satisfy the request. This is different that |
|
1092 |
// evm. |
|
1093 |
// Don't record chunk off a LinAB? smallSplitBirth(size); |
|
1094 |
||
1095 |
} else { |
|
1096 |
// Raid the exact free lists larger than size, even if they are not |
|
1097 |
// overpopulated. |
|
1098 |
res = (HeapWord*) getChunkFromGreater(size); |
|
1099 |
} |
|
1100 |
} else { |
|
1101 |
// Big objects get allocated directly from the dictionary. |
|
1102 |
res = (HeapWord*) getChunkFromDictionaryExact(size); |
|
1103 |
if (res == NULL) { |
|
1104 |
// Try hard not to fail since an allocation failure will likely |
|
1105 |
// trigger a synchronous GC. Try to get the space from the |
|
1106 |
// allocation blocks. |
|
1107 |
res = getChunkFromSmallLinearAllocBlockRemainder(size); |
|
1108 |
} |
|
1109 |
} |
|
1110 |
||
1111 |
return res; |
|
1112 |
} |
|
1113 |
||
1114 |
// A worst-case estimate of the space required (in HeapWords) to expand the heap |
|
1115 |
// when promoting obj. |
|
1116 |
size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { |
|
1117 |
// Depending on the object size, expansion may require refilling either a |
|
1118 |
// bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize |
|
1119 |
// is added because the dictionary may over-allocate to avoid fragmentation. |
|
1120 |
size_t space = obj_size; |
|
1121 |
if (!_adaptive_freelists) { |
|
1122 |
space = MAX2(space, _smallLinearAllocBlock._refillSize); |
|
1123 |
} |
|
1124 |
space += _promoInfo.refillSize() + 2 * MinChunkSize; |
|
1125 |
return space; |
|
1126 |
} |
|
1127 |
||
1128 |
FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { |
|
1129 |
FreeChunk* ret; |
|
1130 |
||
1131 |
assert(numWords >= MinChunkSize, "Size is less than minimum"); |
|
1132 |
assert(linearAllocationWouldFail() || bestFitFirst(), |
|
1133 |
"Should not be here"); |
|
1134 |
||
1135 |
size_t i; |
|
1136 |
size_t currSize = numWords + MinChunkSize; |
|
1137 |
assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); |
|
1138 |
for (i = currSize; i < IndexSetSize; i += IndexSetStride) { |
|
1139 |
FreeList* fl = &_indexedFreeList[i]; |
|
1140 |
if (fl->head()) { |
|
1141 |
ret = getFromListGreater(fl, numWords); |
|
1142 |
assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); |
|
1143 |
return ret; |
|
1144 |
} |
|
1145 |
} |
|
1146 |
||
1147 |
currSize = MAX2((size_t)SmallForDictionary, |
|
1148 |
(size_t)(numWords + MinChunkSize)); |
|
1149 |
||
1150 |
/* Try to get a chunk that satisfies request, while avoiding |
|
1151 |
fragmentation that can't be handled. */ |
|
1152 |
{ |
|
1153 |
ret = dictionary()->getChunk(currSize); |
|
1154 |
if (ret != NULL) { |
|
1155 |
assert(ret->size() - numWords >= MinChunkSize, |
|
1156 |
"Chunk is too small"); |
|
1157 |
_bt.allocated((HeapWord*)ret, ret->size()); |
|
1158 |
/* Carve returned chunk. */ |
|
1159 |
(void) splitChunkAndReturnRemainder(ret, numWords); |
|
1160 |
/* Label this as no longer a free chunk. */ |
|
1161 |
assert(ret->isFree(), "This chunk should be free"); |
|
1162 |
ret->linkPrev(NULL); |
|
1163 |
} |
|
1164 |
assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); |
|
1165 |
return ret; |
|
1166 |
} |
|
1167 |
ShouldNotReachHere(); |
|
1168 |
} |
|
1169 |
||
1170 |
bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) |
|
1171 |
const { |
|
1172 |
assert(fc->size() < IndexSetSize, "Size of chunk is too large"); |
|
1173 |
return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); |
|
1174 |
} |
|
1175 |
||
1176 |
bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { |
|
1177 |
if (fc->size() >= IndexSetSize) { |
|
1178 |
return dictionary()->verifyChunkInFreeLists(fc); |
|
1179 |
} else { |
|
1180 |
return verifyChunkInIndexedFreeLists(fc); |
|
1181 |
} |
|
1182 |
} |
|
1183 |
||
1184 |
#ifndef PRODUCT |
|
1185 |
void CompactibleFreeListSpace::assert_locked() const { |
|
1186 |
CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); |
|
1187 |
} |
|
1188 |
#endif |
|
1189 |
||
1190 |
FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { |
|
1191 |
// In the parallel case, the main thread holds the free list lock |
|
1192 |
// on behalf the parallel threads. |
|
1193 |
assert_locked(); |
|
1194 |
FreeChunk* fc; |
|
1195 |
{ |
|
1196 |
// If GC is parallel, this might be called by several threads. |
|
1197 |
// This should be rare enough that the locking overhead won't affect |
|
1198 |
// the sequential code. |
|
1199 |
MutexLockerEx x(parDictionaryAllocLock(), |
|
1200 |
Mutex::_no_safepoint_check_flag); |
|
1201 |
fc = getChunkFromDictionary(size); |
|
1202 |
} |
|
1203 |
if (fc != NULL) { |
|
1204 |
fc->dontCoalesce(); |
|
1205 |
assert(fc->isFree(), "Should be free, but not coalescable"); |
|
1206 |
// Verify that the block offset table shows this to |
|
1207 |
// be a single block, but not one which is unallocated. |
|
1208 |
_bt.verify_single_block((HeapWord*)fc, fc->size()); |
|
1209 |
_bt.verify_not_unallocated((HeapWord*)fc, fc->size()); |
|
1210 |
} |
|
1211 |
return fc; |
|
1212 |
} |
|
1213 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
1214 |
oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { |
1 | 1215 |
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1216 |
assert_locked(); |
|
1217 |
||
1218 |
// if we are tracking promotions, then first ensure space for |
|
1219 |
// promotion (including spooling space for saving header if necessary). |
|
1220 |
// then allocate and copy, then track promoted info if needed. |
|
1221 |
// When tracking (see PromotionInfo::track()), the mark word may |
|
1222 |
// be displaced and in this case restoration of the mark word |
|
1223 |
// occurs in the (oop_since_save_marks_)iterate phase. |
|
1224 |
if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { |
|
1225 |
return NULL; |
|
1226 |
} |
|
1227 |
// Call the allocate(size_t, bool) form directly to avoid the |
|
1228 |
// additional call through the allocate(size_t) form. Having |
|
1229 |
// the compile inline the call is problematic because allocate(size_t) |
|
1230 |
// is a virtual method. |
|
1231 |
HeapWord* res = allocate(adjustObjectSize(obj_size)); |
|
1232 |
if (res != NULL) { |
|
1233 |
Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); |
|
1234 |
// if we should be tracking promotions, do so. |
|
1235 |
if (_promoInfo.tracking()) { |
|
1236 |
_promoInfo.track((PromotedObject*)res); |
|
1237 |
} |
|
1238 |
} |
|
1239 |
return oop(res); |
|
1240 |
} |
|
1241 |
||
1242 |
HeapWord* |
|
1243 |
CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { |
|
1244 |
assert_locked(); |
|
1245 |
assert(size >= MinChunkSize, "minimum chunk size"); |
|
1246 |
assert(size < _smallLinearAllocBlock._allocation_size_limit, |
|
1247 |
"maximum from smallLinearAllocBlock"); |
|
1248 |
return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); |
|
1249 |
} |
|
1250 |
||
1251 |
HeapWord* |
|
1252 |
CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, |
|
1253 |
size_t size) { |
|
1254 |
assert_locked(); |
|
1255 |
assert(size >= MinChunkSize, "too small"); |
|
1256 |
HeapWord* res = NULL; |
|
1257 |
// Try to do linear allocation from blk, making sure that |
|
1258 |
if (blk->_word_size == 0) { |
|
1259 |
// We have probably been unable to fill this either in the prologue or |
|
1260 |
// when it was exhausted at the last linear allocation. Bail out until |
|
1261 |
// next time. |
|
1262 |
assert(blk->_ptr == NULL, "consistency check"); |
|
1263 |
return NULL; |
|
1264 |
} |
|
1265 |
assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check"); |
|
1266 |
res = getChunkFromLinearAllocBlockRemainder(blk, size); |
|
1267 |
if (res != NULL) return res; |
|
1268 |
||
1269 |
// about to exhaust this linear allocation block |
|
1270 |
if (blk->_word_size == size) { // exactly satisfied |
|
1271 |
res = blk->_ptr; |
|
1272 |
_bt.allocated(res, blk->_word_size); |
|
1273 |
} else if (size + MinChunkSize <= blk->_refillSize) { |
|
1274 |
// Update _unallocated_block if the size is such that chunk would be |
|
1275 |
// returned to the indexed free list. All other chunks in the indexed |
|
1276 |
// free lists are allocated from the dictionary so that _unallocated_block |
|
1277 |
// has already been adjusted for them. Do it here so that the cost |
|
1278 |
// for all chunks added back to the indexed free lists. |
|
1279 |
if (blk->_word_size < SmallForDictionary) { |
|
1280 |
_bt.allocated(blk->_ptr, blk->_word_size); |
|
1281 |
} |
|
1282 |
// Return the chunk that isn't big enough, and then refill below. |
|
1283 |
addChunkToFreeLists(blk->_ptr, blk->_word_size); |
|
1284 |
_bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size)); |
|
1285 |
// Don't keep statistics on adding back chunk from a LinAB. |
|
1286 |
} else { |
|
1287 |
// A refilled block would not satisfy the request. |
|
1288 |
return NULL; |
|
1289 |
} |
|
1290 |
||
1291 |
blk->_ptr = NULL; blk->_word_size = 0; |
|
1292 |
refillLinearAllocBlock(blk); |
|
1293 |
assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, |
|
1294 |
"block was replenished"); |
|
1295 |
if (res != NULL) { |
|
1296 |
splitBirth(size); |
|
1297 |
repairLinearAllocBlock(blk); |
|
1298 |
} else if (blk->_ptr != NULL) { |
|
1299 |
res = blk->_ptr; |
|
1300 |
size_t blk_size = blk->_word_size; |
|
1301 |
blk->_word_size -= size; |
|
1302 |
blk->_ptr += size; |
|
1303 |
splitBirth(size); |
|
1304 |
repairLinearAllocBlock(blk); |
|
1305 |
// Update BOT last so that other (parallel) GC threads see a consistent |
|
1306 |
// view of the BOT and free blocks. |
|
1307 |
// Above must occur before BOT is updated below. |
|
1308 |
_bt.split_block(res, blk_size, size); // adjust block offset table |
|
1309 |
} |
|
1310 |
return res; |
|
1311 |
} |
|
1312 |
||
1313 |
HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( |
|
1314 |
LinearAllocBlock* blk, |
|
1315 |
size_t size) { |
|
1316 |
assert_locked(); |
|
1317 |
assert(size >= MinChunkSize, "too small"); |
|
1318 |
||
1319 |
HeapWord* res = NULL; |
|
1320 |
// This is the common case. Keep it simple. |
|
1321 |
if (blk->_word_size >= size + MinChunkSize) { |
|
1322 |
assert(blk->_ptr != NULL, "consistency check"); |
|
1323 |
res = blk->_ptr; |
|
1324 |
// Note that the BOT is up-to-date for the linAB before allocation. It |
|
1325 |
// indicates the start of the linAB. The split_block() updates the |
|
1326 |
// BOT for the linAB after the allocation (indicates the start of the |
|
1327 |
// next chunk to be allocated). |
|
1328 |
size_t blk_size = blk->_word_size; |
|
1329 |
blk->_word_size -= size; |
|
1330 |
blk->_ptr += size; |
|
1331 |
splitBirth(size); |
|
1332 |
repairLinearAllocBlock(blk); |
|
1333 |
// Update BOT last so that other (parallel) GC threads see a consistent |
|
1334 |
// view of the BOT and free blocks. |
|
1335 |
// Above must occur before BOT is updated below. |
|
1336 |
_bt.split_block(res, blk_size, size); // adjust block offset table |
|
1337 |
_bt.allocated(res, size); |
|
1338 |
} |
|
1339 |
return res; |
|
1340 |
} |
|
1341 |
||
1342 |
FreeChunk* |
|
1343 |
CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { |
|
1344 |
assert_locked(); |
|
1345 |
assert(size < SmallForDictionary, "just checking"); |
|
1346 |
FreeChunk* res; |
|
1347 |
res = _indexedFreeList[size].getChunkAtHead(); |
|
1348 |
if (res == NULL) { |
|
1349 |
res = getChunkFromIndexedFreeListHelper(size); |
|
1350 |
} |
|
1351 |
_bt.verify_not_unallocated((HeapWord*) res, size); |
|
1352 |
return res; |
|
1353 |
} |
|
1354 |
||
1355 |
FreeChunk* |
|
1356 |
CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) { |
|
1357 |
assert_locked(); |
|
1358 |
FreeChunk* fc = NULL; |
|
1359 |
if (size < SmallForDictionary) { |
|
1360 |
assert(_indexedFreeList[size].head() == NULL || |
|
1361 |
_indexedFreeList[size].surplus() <= 0, |
|
1362 |
"List for this size should be empty or under populated"); |
|
1363 |
// Try best fit in exact lists before replenishing the list |
|
1364 |
if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { |
|
1365 |
// Replenish list. |
|
1366 |
// |
|
1367 |
// Things tried that failed. |
|
1368 |
// Tried allocating out of the two LinAB's first before |
|
1369 |
// replenishing lists. |
|
1370 |
// Tried small linAB of size 256 (size in indexed list) |
|
1371 |
// and replenishing indexed lists from the small linAB. |
|
1372 |
// |
|
1373 |
FreeChunk* newFc = NULL; |
|
1374 |
size_t replenish_size = CMSIndexedFreeListReplenish * size; |
|
1375 |
if (replenish_size < SmallForDictionary) { |
|
1376 |
// Do not replenish from an underpopulated size. |
|
1377 |
if (_indexedFreeList[replenish_size].surplus() > 0 && |
|
1378 |
_indexedFreeList[replenish_size].head() != NULL) { |
|
1379 |
newFc = |
|
1380 |
_indexedFreeList[replenish_size].getChunkAtHead(); |
|
1381 |
} else { |
|
1382 |
newFc = bestFitSmall(replenish_size); |
|
1383 |
} |
|
1384 |
} |
|
1385 |
if (newFc != NULL) { |
|
1386 |
splitDeath(replenish_size); |
|
1387 |
} else if (replenish_size > size) { |
|
1388 |
assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant"); |
|
1389 |
newFc = |
|
1390 |
getChunkFromIndexedFreeListHelper(replenish_size); |
|
1391 |
} |
|
1392 |
if (newFc != NULL) { |
|
1393 |
assert(newFc->size() == replenish_size, "Got wrong size"); |
|
1394 |
size_t i; |
|
1395 |
FreeChunk *curFc, *nextFc; |
|
1396 |
// carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2 |
|
1397 |
// The last chunk is not added to the lists but is returned as the |
|
1398 |
// free chunk. |
|
1399 |
for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), |
|
1400 |
i = 0; |
|
1401 |
i < (CMSIndexedFreeListReplenish - 1); |
|
1402 |
curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), |
|
1403 |
i++) { |
|
1404 |
curFc->setSize(size); |
|
1405 |
// Don't record this as a return in order to try and |
|
1406 |
// determine the "returns" from a GC. |
|
1407 |
_bt.verify_not_unallocated((HeapWord*) fc, size); |
|
1408 |
_indexedFreeList[size].returnChunkAtTail(curFc, false); |
|
1409 |
_bt.mark_block((HeapWord*)curFc, size); |
|
1410 |
splitBirth(size); |
|
1411 |
// Don't record the initial population of the indexed list |
|
1412 |
// as a split birth. |
|
1413 |
} |
|
1414 |
||
1415 |
// check that the arithmetic was OK above |
|
1416 |
assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size, |
|
1417 |
"inconsistency in carving newFc"); |
|
1418 |
curFc->setSize(size); |
|
1419 |
_bt.mark_block((HeapWord*)curFc, size); |
|
1420 |
splitBirth(size); |
|
1421 |
return curFc; |
|
1422 |
} |
|
1423 |
} |
|
1424 |
} else { |
|
1425 |
// Get a free chunk from the free chunk dictionary to be returned to |
|
1426 |
// replenish the indexed free list. |
|
1427 |
fc = getChunkFromDictionaryExact(size); |
|
1428 |
} |
|
1429 |
assert(fc == NULL || fc->isFree(), "Should be returning a free chunk"); |
|
1430 |
return fc; |
|
1431 |
} |
|
1432 |
||
1433 |
FreeChunk* |
|
1434 |
CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { |
|
1435 |
assert_locked(); |
|
1436 |
FreeChunk* fc = _dictionary->getChunk(size); |
|
1437 |
if (fc == NULL) { |
|
1438 |
return NULL; |
|
1439 |
} |
|
1440 |
_bt.allocated((HeapWord*)fc, fc->size()); |
|
1441 |
if (fc->size() >= size + MinChunkSize) { |
|
1442 |
fc = splitChunkAndReturnRemainder(fc, size); |
|
1443 |
} |
|
1444 |
assert(fc->size() >= size, "chunk too small"); |
|
1445 |
assert(fc->size() < size + MinChunkSize, "chunk too big"); |
|
1446 |
_bt.verify_single_block((HeapWord*)fc, fc->size()); |
|
1447 |
return fc; |
|
1448 |
} |
|
1449 |
||
1450 |
FreeChunk* |
|
1451 |
CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { |
|
1452 |
assert_locked(); |
|
1453 |
FreeChunk* fc = _dictionary->getChunk(size); |
|
1454 |
if (fc == NULL) { |
|
1455 |
return fc; |
|
1456 |
} |
|
1457 |
_bt.allocated((HeapWord*)fc, fc->size()); |
|
1458 |
if (fc->size() == size) { |
|
1459 |
_bt.verify_single_block((HeapWord*)fc, size); |
|
1460 |
return fc; |
|
1461 |
} |
|
1462 |
assert(fc->size() > size, "getChunk() guarantee"); |
|
1463 |
if (fc->size() < size + MinChunkSize) { |
|
1464 |
// Return the chunk to the dictionary and go get a bigger one. |
|
1465 |
returnChunkToDictionary(fc); |
|
1466 |
fc = _dictionary->getChunk(size + MinChunkSize); |
|
1467 |
if (fc == NULL) { |
|
1468 |
return NULL; |
|
1469 |
} |
|
1470 |
_bt.allocated((HeapWord*)fc, fc->size()); |
|
1471 |
} |
|
1472 |
assert(fc->size() >= size + MinChunkSize, "tautology"); |
|
1473 |
fc = splitChunkAndReturnRemainder(fc, size); |
|
1474 |
assert(fc->size() == size, "chunk is wrong size"); |
|
1475 |
_bt.verify_single_block((HeapWord*)fc, size); |
|
1476 |
return fc; |
|
1477 |
} |
|
1478 |
||
1479 |
void |
|
1480 |
CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { |
|
1481 |
assert_locked(); |
|
1482 |
||
1483 |
size_t size = chunk->size(); |
|
1484 |
_bt.verify_single_block((HeapWord*)chunk, size); |
|
1485 |
// adjust _unallocated_block downward, as necessary |
|
1486 |
_bt.freed((HeapWord*)chunk, size); |
|
1487 |
_dictionary->returnChunk(chunk); |
|
1488 |
} |
|
1489 |
||
1490 |
void |
|
1491 |
CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { |
|
1492 |
assert_locked(); |
|
1493 |
size_t size = fc->size(); |
|
1494 |
_bt.verify_single_block((HeapWord*) fc, size); |
|
1495 |
_bt.verify_not_unallocated((HeapWord*) fc, size); |
|
1496 |
if (_adaptive_freelists) { |
|
1497 |
_indexedFreeList[size].returnChunkAtTail(fc); |
|
1498 |
} else { |
|
1499 |
_indexedFreeList[size].returnChunkAtHead(fc); |
|
1500 |
} |
|
1501 |
} |
|
1502 |
||
1503 |
// Add chunk to end of last block -- if it's the largest |
|
1504 |
// block -- and update BOT and census data. We would |
|
1505 |
// of course have preferred to coalesce it with the |
|
1506 |
// last block, but it's currently less expensive to find the |
|
1507 |
// largest block than it is to find the last. |
|
1508 |
void |
|
1509 |
CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( |
|
1510 |
HeapWord* chunk, size_t size) { |
|
1511 |
// check that the chunk does lie in this space! |
|
1512 |
assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); |
|
1513 |
assert_locked(); |
|
1514 |
// One of the parallel gc task threads may be here |
|
1515 |
// whilst others are allocating. |
|
1516 |
Mutex* lock = NULL; |
|
1517 |
if (ParallelGCThreads != 0) { |
|
1518 |
lock = &_parDictionaryAllocLock; |
|
1519 |
} |
|
1520 |
FreeChunk* ec; |
|
1521 |
{ |
|
1522 |
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
|
1523 |
ec = dictionary()->findLargestDict(); // get largest block |
|
1524 |
if (ec != NULL && ec->end() == chunk) { |
|
1525 |
// It's a coterminal block - we can coalesce. |
|
1526 |
size_t old_size = ec->size(); |
|
1527 |
coalDeath(old_size); |
|
1528 |
removeChunkFromDictionary(ec); |
|
1529 |
size += old_size; |
|
1530 |
} else { |
|
1531 |
ec = (FreeChunk*)chunk; |
|
1532 |
} |
|
1533 |
} |
|
1534 |
ec->setSize(size); |
|
1535 |
debug_only(ec->mangleFreed(size)); |
|
1536 |
if (size < SmallForDictionary) { |
|
1537 |
lock = _indexedFreeListParLocks[size]; |
|
1538 |
} |
|
1539 |
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
|
1540 |
addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); |
|
1541 |
// record the birth under the lock since the recording involves |
|
1542 |
// manipulation of the list on which the chunk lives and |
|
1543 |
// if the chunk is allocated and is the last on the list, |
|
1544 |
// the list can go away. |
|
1545 |
coalBirth(size); |
|
1546 |
} |
|
1547 |
||
1548 |
void |
|
1549 |
CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, |
|
1550 |
size_t size) { |
|
1551 |
// check that the chunk does lie in this space! |
|
1552 |
assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); |
|
1553 |
assert_locked(); |
|
1554 |
_bt.verify_single_block(chunk, size); |
|
1555 |
||
1556 |
FreeChunk* fc = (FreeChunk*) chunk; |
|
1557 |
fc->setSize(size); |
|
1558 |
debug_only(fc->mangleFreed(size)); |
|
1559 |
if (size < SmallForDictionary) { |
|
1560 |
returnChunkToFreeList(fc); |
|
1561 |
} else { |
|
1562 |
returnChunkToDictionary(fc); |
|
1563 |
} |
|
1564 |
} |
|
1565 |
||
1566 |
void |
|
1567 |
CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, |
|
1568 |
size_t size, bool coalesced) { |
|
1569 |
assert_locked(); |
|
1570 |
assert(chunk != NULL, "null chunk"); |
|
1571 |
if (coalesced) { |
|
1572 |
// repair BOT |
|
1573 |
_bt.single_block(chunk, size); |
|
1574 |
} |
|
1575 |
addChunkToFreeLists(chunk, size); |
|
1576 |
} |
|
1577 |
||
1578 |
// We _must_ find the purported chunk on our free lists; |
|
1579 |
// we assert if we don't. |
|
1580 |
void |
|
1581 |
CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { |
|
1582 |
size_t size = fc->size(); |
|
1583 |
assert_locked(); |
|
1584 |
debug_only(verifyFreeLists()); |
|
1585 |
if (size < SmallForDictionary) { |
|
1586 |
removeChunkFromIndexedFreeList(fc); |
|
1587 |
} else { |
|
1588 |
removeChunkFromDictionary(fc); |
|
1589 |
} |
|
1590 |
_bt.verify_single_block((HeapWord*)fc, size); |
|
1591 |
debug_only(verifyFreeLists()); |
|
1592 |
} |
|
1593 |
||
1594 |
void |
|
1595 |
CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { |
|
1596 |
size_t size = fc->size(); |
|
1597 |
assert_locked(); |
|
1598 |
assert(fc != NULL, "null chunk"); |
|
1599 |
_bt.verify_single_block((HeapWord*)fc, size); |
|
1600 |
_dictionary->removeChunk(fc); |
|
1601 |
// adjust _unallocated_block upward, as necessary |
|
1602 |
_bt.allocated((HeapWord*)fc, size); |
|
1603 |
} |
|
1604 |
||
1605 |
void |
|
1606 |
CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { |
|
1607 |
assert_locked(); |
|
1608 |
size_t size = fc->size(); |
|
1609 |
_bt.verify_single_block((HeapWord*)fc, size); |
|
1610 |
NOT_PRODUCT( |
|
1611 |
if (FLSVerifyIndexTable) { |
|
1612 |
verifyIndexedFreeList(size); |
|
1613 |
} |
|
1614 |
) |
|
1615 |
_indexedFreeList[size].removeChunk(fc); |
|
1616 |
debug_only(fc->clearNext()); |
|
1617 |
debug_only(fc->clearPrev()); |
|
1618 |
NOT_PRODUCT( |
|
1619 |
if (FLSVerifyIndexTable) { |
|
1620 |
verifyIndexedFreeList(size); |
|
1621 |
} |
|
1622 |
) |
|
1623 |
} |
|
1624 |
||
1625 |
FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { |
|
1626 |
/* A hint is the next larger size that has a surplus. |
|
1627 |
Start search at a size large enough to guarantee that |
|
1628 |
the excess is >= MIN_CHUNK. */ |
|
1629 |
size_t start = align_object_size(numWords + MinChunkSize); |
|
1630 |
if (start < IndexSetSize) { |
|
1631 |
FreeList* it = _indexedFreeList; |
|
1632 |
size_t hint = _indexedFreeList[start].hint(); |
|
1633 |
while (hint < IndexSetSize) { |
|
1634 |
assert(hint % MinObjAlignment == 0, "hint should be aligned"); |
|
1635 |
FreeList *fl = &_indexedFreeList[hint]; |
|
1636 |
if (fl->surplus() > 0 && fl->head() != NULL) { |
|
1637 |
// Found a list with surplus, reset original hint |
|
1638 |
// and split out a free chunk which is returned. |
|
1639 |
_indexedFreeList[start].set_hint(hint); |
|
1640 |
FreeChunk* res = getFromListGreater(fl, numWords); |
|
1641 |
assert(res == NULL || res->isFree(), |
|
1642 |
"Should be returning a free chunk"); |
|
1643 |
return res; |
|
1644 |
} |
|
1645 |
hint = fl->hint(); /* keep looking */ |
|
1646 |
} |
|
1647 |
/* None found. */ |
|
1648 |
it[start].set_hint(IndexSetSize); |
|
1649 |
} |
|
1650 |
return NULL; |
|
1651 |
} |
|
1652 |
||
1653 |
/* Requires fl->size >= numWords + MinChunkSize */ |
|
1654 |
FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl, |
|
1655 |
size_t numWords) { |
|
1656 |
FreeChunk *curr = fl->head(); |
|
1657 |
size_t oldNumWords = curr->size(); |
|
1658 |
assert(numWords >= MinChunkSize, "Word size is too small"); |
|
1659 |
assert(curr != NULL, "List is empty"); |
|
1660 |
assert(oldNumWords >= numWords + MinChunkSize, |
|
1661 |
"Size of chunks in the list is too small"); |
|
1662 |
||
1663 |
fl->removeChunk(curr); |
|
1664 |
// recorded indirectly by splitChunkAndReturnRemainder - |
|
1665 |
// smallSplit(oldNumWords, numWords); |
|
1666 |
FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); |
|
1667 |
// Does anything have to be done for the remainder in terms of |
|
1668 |
// fixing the card table? |
|
1669 |
assert(new_chunk == NULL || new_chunk->isFree(), |
|
1670 |
"Should be returning a free chunk"); |
|
1671 |
return new_chunk; |
|
1672 |
} |
|
1673 |
||
1674 |
FreeChunk* |
|
1675 |
CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, |
|
1676 |
size_t new_size) { |
|
1677 |
assert_locked(); |
|
1678 |
size_t size = chunk->size(); |
|
1679 |
assert(size > new_size, "Split from a smaller block?"); |
|
1680 |
assert(is_aligned(chunk), "alignment problem"); |
|
1681 |
assert(size == adjustObjectSize(size), "alignment problem"); |
|
1682 |
size_t rem_size = size - new_size; |
|
1683 |
assert(rem_size == adjustObjectSize(rem_size), "alignment problem"); |
|
1684 |
assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum"); |
|
1685 |
FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); |
|
1686 |
assert(is_aligned(ffc), "alignment problem"); |
|
1687 |
ffc->setSize(rem_size); |
|
1688 |
ffc->linkNext(NULL); |
|
1689 |
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
|
1690 |
// Above must occur before BOT is updated below. |
|
1691 |
// adjust block offset table |
|
1692 |
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size); |
|
1693 |
if (rem_size < SmallForDictionary) { |
|
1694 |
bool is_par = (SharedHeap::heap()->n_par_threads() > 0); |
|
1695 |
if (is_par) _indexedFreeListParLocks[rem_size]->lock(); |
|
1696 |
returnChunkToFreeList(ffc); |
|
1697 |
split(size, rem_size); |
|
1698 |
if (is_par) _indexedFreeListParLocks[rem_size]->unlock(); |
|
1699 |
} else { |
|
1700 |
returnChunkToDictionary(ffc); |
|
1701 |
split(size ,rem_size); |
|
1702 |
} |
|
1703 |
chunk->setSize(new_size); |
|
1704 |
return chunk; |
|
1705 |
} |
|
1706 |
||
1707 |
void |
|
1708 |
CompactibleFreeListSpace::sweep_completed() { |
|
1709 |
// Now that space is probably plentiful, refill linear |
|
1710 |
// allocation blocks as needed. |
|
1711 |
refillLinearAllocBlocksIfNeeded(); |
|
1712 |
} |
|
1713 |
||
1714 |
void |
|
1715 |
CompactibleFreeListSpace::gc_prologue() { |
|
1716 |
assert_locked(); |
|
1717 |
if (PrintFLSStatistics != 0) { |
|
1718 |
gclog_or_tty->print("Before GC:\n"); |
|
1719 |
reportFreeListStatistics(); |
|
1720 |
} |
|
1721 |
refillLinearAllocBlocksIfNeeded(); |
|
1722 |
} |
|
1723 |
||
1724 |
void |
|
1725 |
CompactibleFreeListSpace::gc_epilogue() { |
|
1726 |
assert_locked(); |
|
1727 |
if (PrintGCDetails && Verbose && !_adaptive_freelists) { |
|
1728 |
if (_smallLinearAllocBlock._word_size == 0) |
|
1729 |
warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure"); |
|
1730 |
} |
|
1731 |
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); |
|
1732 |
_promoInfo.stopTrackingPromotions(); |
|
1733 |
repairLinearAllocationBlocks(); |
|
1734 |
// Print Space's stats |
|
1735 |
if (PrintFLSStatistics != 0) { |
|
1736 |
gclog_or_tty->print("After GC:\n"); |
|
1737 |
reportFreeListStatistics(); |
|
1738 |
} |
|
1739 |
} |
|
1740 |
||
1741 |
// Iteration support, mostly delegated from a CMS generation |
|
1742 |
||
1743 |
void CompactibleFreeListSpace::save_marks() { |
|
1744 |
// mark the "end" of the used space at the time of this call; |
|
1745 |
// note, however, that promoted objects from this point |
|
1746 |
// on are tracked in the _promoInfo below. |
|
1747 |
set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? |
|
1748 |
unallocated_block() : end()); |
|
1749 |
// inform allocator that promotions should be tracked. |
|
1750 |
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); |
|
1751 |
_promoInfo.startTrackingPromotions(); |
|
1752 |
} |
|
1753 |
||
1754 |
bool CompactibleFreeListSpace::no_allocs_since_save_marks() { |
|
1755 |
assert(_promoInfo.tracking(), "No preceding save_marks?"); |
|
1756 |
guarantee(SharedHeap::heap()->n_par_threads() == 0, |
|
1757 |
"Shouldn't be called (yet) during parallel part of gc."); |
|
1758 |
return _promoInfo.noPromotions(); |
|
1759 |
} |
|
1760 |
||
1761 |
#define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
|
1762 |
\ |
|
1763 |
void CompactibleFreeListSpace:: \ |
|
1764 |
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ |
|
1765 |
assert(SharedHeap::heap()->n_par_threads() == 0, \ |
|
1766 |
"Shouldn't be called (yet) during parallel part of gc."); \ |
|
1767 |
_promoInfo.promoted_oops_iterate##nv_suffix(blk); \ |
|
1768 |
/* \ |
|
1769 |
* This also restores any displaced headers and removes the elements from \ |
|
1770 |
* the iteration set as they are processed, so that we have a clean slate \ |
|
1771 |
* at the end of the iteration. Note, thus, that if new objects are \ |
|
1772 |
* promoted as a result of the iteration they are iterated over as well. \ |
|
1773 |
*/ \ |
|
1774 |
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ |
|
1775 |
} |
|
1776 |
||
1777 |
ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) |
|
1778 |
||
1779 |
////////////////////////////////////////////////////////////////////////////// |
|
1780 |
// We go over the list of promoted objects, removing each from the list, |
|
1781 |
// and applying the closure (this may, in turn, add more elements to |
|
1782 |
// the tail of the promoted list, and these newly added objects will |
|
1783 |
// also be processed) until the list is empty. |
|
1784 |
// To aid verification and debugging, in the non-product builds |
|
1785 |
// we actually forward _promoHead each time we process a promoted oop. |
|
1786 |
// Note that this is not necessary in general (i.e. when we don't need to |
|
1787 |
// call PromotionInfo::verify()) because oop_iterate can only add to the |
|
1788 |
// end of _promoTail, and never needs to look at _promoHead. |
|
1789 |
||
1790 |
#define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
|
1791 |
\ |
|
1792 |
void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \ |
|
1793 |
NOT_PRODUCT(verify()); \ |
|
1794 |
PromotedObject *curObj, *nextObj; \ |
|
1795 |
for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \ |
|
1796 |
if ((nextObj = curObj->next()) == NULL) { \ |
|
1797 |
/* protect ourselves against additions due to closure application \ |
|
1798 |
below by resetting the list. */ \ |
|
1799 |
assert(_promoTail == curObj, "Should have been the tail"); \ |
|
1800 |
_promoHead = _promoTail = NULL; \ |
|
1801 |
} \ |
|
1802 |
if (curObj->hasDisplacedMark()) { \ |
|
1803 |
/* restore displaced header */ \ |
|
1804 |
oop(curObj)->set_mark(nextDisplacedHeader()); \ |
|
1805 |
} else { \ |
|
1806 |
/* restore prototypical header */ \ |
|
1807 |
oop(curObj)->init_mark(); \ |
|
1808 |
} \ |
|
1809 |
/* The "promoted_mark" should now not be set */ \ |
|
1810 |
assert(!curObj->hasPromotedMark(), \ |
|
1811 |
"Should have been cleared by restoring displaced mark-word"); \ |
|
1812 |
NOT_PRODUCT(_promoHead = nextObj); \ |
|
1813 |
if (cl != NULL) oop(curObj)->oop_iterate(cl); \ |
|
1814 |
if (nextObj == NULL) { /* start at head of list reset above */ \ |
|
1815 |
nextObj = _promoHead; \ |
|
1816 |
} \ |
|
1817 |
} \ |
|
1818 |
assert(noPromotions(), "post-condition violation"); \ |
|
1819 |
assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\ |
|
1820 |
assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \ |
|
1821 |
assert(_firstIndex == _nextIndex, "empty buffer"); \ |
|
1822 |
} |
|
1823 |
||
1824 |
// This should have been ALL_SINCE_...() just like the others, |
|
1825 |
// but, because the body of the method above is somehwat longer, |
|
1826 |
// the MSVC compiler cannot cope; as a workaround, we split the |
|
1827 |
// macro into its 3 constituent parts below (see original macro |
|
1828 |
// definition in specializedOopClosures.hpp). |
|
1829 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN) |
|
1830 |
PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v) |
|
1831 |
||
1832 |
||
1833 |
void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { |
|
1834 |
// ugghh... how would one do this efficiently for a non-contiguous space? |
|
1835 |
guarantee(false, "NYI"); |
|
1836 |
} |
|
1837 |
||
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1838 |
bool CompactibleFreeListSpace::linearAllocationWouldFail() const { |
1 | 1839 |
return _smallLinearAllocBlock._word_size == 0; |
1840 |
} |
|
1841 |
||
1842 |
void CompactibleFreeListSpace::repairLinearAllocationBlocks() { |
|
1843 |
// Fix up linear allocation blocks to look like free blocks |
|
1844 |
repairLinearAllocBlock(&_smallLinearAllocBlock); |
|
1845 |
} |
|
1846 |
||
1847 |
void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { |
|
1848 |
assert_locked(); |
|
1849 |
if (blk->_ptr != NULL) { |
|
1850 |
assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, |
|
1851 |
"Minimum block size requirement"); |
|
1852 |
FreeChunk* fc = (FreeChunk*)(blk->_ptr); |
|
1853 |
fc->setSize(blk->_word_size); |
|
1854 |
fc->linkPrev(NULL); // mark as free |
|
1855 |
fc->dontCoalesce(); |
|
1856 |
assert(fc->isFree(), "just marked it free"); |
|
1857 |
assert(fc->cantCoalesce(), "just marked it uncoalescable"); |
|
1858 |
} |
|
1859 |
} |
|
1860 |
||
1861 |
void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { |
|
1862 |
assert_locked(); |
|
1863 |
if (_smallLinearAllocBlock._ptr == NULL) { |
|
1864 |
assert(_smallLinearAllocBlock._word_size == 0, |
|
1865 |
"Size of linAB should be zero if the ptr is NULL"); |
|
1866 |
// Reset the linAB refill and allocation size limit. |
|
1867 |
_smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); |
|
1868 |
} |
|
1869 |
refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); |
|
1870 |
} |
|
1871 |
||
1872 |
void |
|
1873 |
CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { |
|
1874 |
assert_locked(); |
|
1875 |
assert((blk->_ptr == NULL && blk->_word_size == 0) || |
|
1876 |
(blk->_ptr != NULL && blk->_word_size >= MinChunkSize), |
|
1877 |
"blk invariant"); |
|
1878 |
if (blk->_ptr == NULL) { |
|
1879 |
refillLinearAllocBlock(blk); |
|
1880 |
} |
|
1881 |
if (PrintMiscellaneous && Verbose) { |
|
1882 |
if (blk->_word_size == 0) { |
|
1883 |
warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); |
|
1884 |
} |
|
1885 |
} |
|
1886 |
} |
|
1887 |
||
1888 |
void |
|
1889 |
CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { |
|
1890 |
assert_locked(); |
|
1891 |
assert(blk->_word_size == 0 && blk->_ptr == NULL, |
|
1892 |
"linear allocation block should be empty"); |
|
1893 |
FreeChunk* fc; |
|
1894 |
if (blk->_refillSize < SmallForDictionary && |
|
1895 |
(fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { |
|
1896 |
// A linAB's strategy might be to use small sizes to reduce |
|
1897 |
// fragmentation but still get the benefits of allocation from a |
|
1898 |
// linAB. |
|
1899 |
} else { |
|
1900 |
fc = getChunkFromDictionary(blk->_refillSize); |
|
1901 |
} |
|
1902 |
if (fc != NULL) { |
|
1903 |
blk->_ptr = (HeapWord*)fc; |
|
1904 |
blk->_word_size = fc->size(); |
|
1905 |
fc->dontCoalesce(); // to prevent sweeper from sweeping us up |
|
1906 |
} |
|
1907 |
} |
|
1908 |
||
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1909 |
// Support for concurrent collection policy decisions. |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1910 |
bool CompactibleFreeListSpace::should_concurrent_collect() const { |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1911 |
// In the future we might want to add in frgamentation stats -- |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1912 |
// including erosion of the "mountain" into this decision as well. |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1913 |
return !adaptive_freelists() && linearAllocationWouldFail(); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1914 |
} |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
1915 |
|
1 | 1916 |
// Support for compaction |
1917 |
||
1918 |
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { |
|
1919 |
SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); |
|
1920 |
// prepare_for_compaction() uses the space between live objects |
|
1921 |
// so that later phase can skip dead space quickly. So verification |
|
1922 |
// of the free lists doesn't work after. |
|
1923 |
} |
|
1924 |
||
1925 |
#define obj_size(q) adjustObjectSize(oop(q)->size()) |
|
1926 |
#define adjust_obj_size(s) adjustObjectSize(s) |
|
1927 |
||
1928 |
void CompactibleFreeListSpace::adjust_pointers() { |
|
1929 |
// In other versions of adjust_pointers(), a bail out |
|
1930 |
// based on the amount of live data in the generation |
|
1931 |
// (i.e., if 0, bail out) may be used. |
|
1932 |
// Cannot test used() == 0 here because the free lists have already |
|
1933 |
// been mangled by the compaction. |
|
1934 |
||
1935 |
SCAN_AND_ADJUST_POINTERS(adjust_obj_size); |
|
1936 |
// See note about verification in prepare_for_compaction(). |
|
1937 |
} |
|
1938 |
||
1939 |
void CompactibleFreeListSpace::compact() { |
|
1940 |
SCAN_AND_COMPACT(obj_size); |
|
1941 |
} |
|
1942 |
||
1943 |
// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] |
|
1944 |
// where fbs is free block sizes |
|
1945 |
double CompactibleFreeListSpace::flsFrag() const { |
|
1946 |
size_t itabFree = totalSizeInIndexedFreeLists(); |
|
1947 |
double frag = 0.0; |
|
1948 |
size_t i; |
|
1949 |
||
1950 |
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
1951 |
double sz = i; |
|
1952 |
frag += _indexedFreeList[i].count() * (sz * sz); |
|
1953 |
} |
|
1954 |
||
1955 |
double totFree = itabFree + |
|
1956 |
_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); |
|
1957 |
if (totFree > 0) { |
|
1958 |
frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / |
|
1959 |
(totFree * totFree)); |
|
1960 |
frag = (double)1.0 - frag; |
|
1961 |
} else { |
|
1962 |
assert(frag == 0.0, "Follows from totFree == 0"); |
|
1963 |
} |
|
1964 |
return frag; |
|
1965 |
} |
|
1966 |
||
1967 |
#define CoalSurplusPercent 1.05 |
|
1968 |
#define SplitSurplusPercent 1.10 |
|
1969 |
||
1970 |
void CompactibleFreeListSpace::beginSweepFLCensus( |
|
1971 |
float inter_sweep_current, |
|
1972 |
float inter_sweep_estimate) { |
|
1973 |
assert_locked(); |
|
1974 |
size_t i; |
|
1975 |
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
1976 |
FreeList* fl = &_indexedFreeList[i]; |
|
1977 |
fl->compute_desired(inter_sweep_current, inter_sweep_estimate); |
|
1978 |
fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent)); |
|
1979 |
fl->set_beforeSweep(fl->count()); |
|
1980 |
fl->set_bfrSurp(fl->surplus()); |
|
1981 |
} |
|
1982 |
_dictionary->beginSweepDictCensus(CoalSurplusPercent, |
|
1983 |
inter_sweep_current, |
|
1984 |
inter_sweep_estimate); |
|
1985 |
} |
|
1986 |
||
1987 |
void CompactibleFreeListSpace::setFLSurplus() { |
|
1988 |
assert_locked(); |
|
1989 |
size_t i; |
|
1990 |
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
1991 |
FreeList *fl = &_indexedFreeList[i]; |
|
1992 |
fl->set_surplus(fl->count() - |
|
1993 |
(ssize_t)((double)fl->desired() * SplitSurplusPercent)); |
|
1994 |
} |
|
1995 |
} |
|
1996 |
||
1997 |
void CompactibleFreeListSpace::setFLHints() { |
|
1998 |
assert_locked(); |
|
1999 |
size_t i; |
|
2000 |
size_t h = IndexSetSize; |
|
2001 |
for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { |
|
2002 |
FreeList *fl = &_indexedFreeList[i]; |
|
2003 |
fl->set_hint(h); |
|
2004 |
if (fl->surplus() > 0) { |
|
2005 |
h = i; |
|
2006 |
} |
|
2007 |
} |
|
2008 |
} |
|
2009 |
||
2010 |
void CompactibleFreeListSpace::clearFLCensus() { |
|
2011 |
assert_locked(); |
|
2012 |
int i; |
|
2013 |
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
2014 |
FreeList *fl = &_indexedFreeList[i]; |
|
2015 |
fl->set_prevSweep(fl->count()); |
|
2016 |
fl->set_coalBirths(0); |
|
2017 |
fl->set_coalDeaths(0); |
|
2018 |
fl->set_splitBirths(0); |
|
2019 |
fl->set_splitDeaths(0); |
|
2020 |
} |
|
2021 |
} |
|
2022 |
||
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2023 |
void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { |
1 | 2024 |
setFLSurplus(); |
2025 |
setFLHints(); |
|
2026 |
if (PrintGC && PrintFLSCensus > 0) { |
|
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2027 |
printFLCensus(sweep_count); |
1 | 2028 |
} |
2029 |
clearFLCensus(); |
|
2030 |
assert_locked(); |
|
2031 |
_dictionary->endSweepDictCensus(SplitSurplusPercent); |
|
2032 |
} |
|
2033 |
||
2034 |
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { |
|
2035 |
if (size < SmallForDictionary) { |
|
2036 |
FreeList *fl = &_indexedFreeList[size]; |
|
2037 |
return (fl->coalDesired() < 0) || |
|
2038 |
((int)fl->count() > fl->coalDesired()); |
|
2039 |
} else { |
|
2040 |
return dictionary()->coalDictOverPopulated(size); |
|
2041 |
} |
|
2042 |
} |
|
2043 |
||
2044 |
void CompactibleFreeListSpace::smallCoalBirth(size_t size) { |
|
2045 |
assert(size < SmallForDictionary, "Size too large for indexed list"); |
|
2046 |
FreeList *fl = &_indexedFreeList[size]; |
|
2047 |
fl->increment_coalBirths(); |
|
2048 |
fl->increment_surplus(); |
|
2049 |
} |
|
2050 |
||
2051 |
void CompactibleFreeListSpace::smallCoalDeath(size_t size) { |
|
2052 |
assert(size < SmallForDictionary, "Size too large for indexed list"); |
|
2053 |
FreeList *fl = &_indexedFreeList[size]; |
|
2054 |
fl->increment_coalDeaths(); |
|
2055 |
fl->decrement_surplus(); |
|
2056 |
} |
|
2057 |
||
2058 |
void CompactibleFreeListSpace::coalBirth(size_t size) { |
|
2059 |
if (size < SmallForDictionary) { |
|
2060 |
smallCoalBirth(size); |
|
2061 |
} else { |
|
2062 |
dictionary()->dictCensusUpdate(size, |
|
2063 |
false /* split */, |
|
2064 |
true /* birth */); |
|
2065 |
} |
|
2066 |
} |
|
2067 |
||
2068 |
void CompactibleFreeListSpace::coalDeath(size_t size) { |
|
2069 |
if(size < SmallForDictionary) { |
|
2070 |
smallCoalDeath(size); |
|
2071 |
} else { |
|
2072 |
dictionary()->dictCensusUpdate(size, |
|
2073 |
false /* split */, |
|
2074 |
false /* birth */); |
|
2075 |
} |
|
2076 |
} |
|
2077 |
||
2078 |
void CompactibleFreeListSpace::smallSplitBirth(size_t size) { |
|
2079 |
assert(size < SmallForDictionary, "Size too large for indexed list"); |
|
2080 |
FreeList *fl = &_indexedFreeList[size]; |
|
2081 |
fl->increment_splitBirths(); |
|
2082 |
fl->increment_surplus(); |
|
2083 |
} |
|
2084 |
||
2085 |
void CompactibleFreeListSpace::smallSplitDeath(size_t size) { |
|
2086 |
assert(size < SmallForDictionary, "Size too large for indexed list"); |
|
2087 |
FreeList *fl = &_indexedFreeList[size]; |
|
2088 |
fl->increment_splitDeaths(); |
|
2089 |
fl->decrement_surplus(); |
|
2090 |
} |
|
2091 |
||
2092 |
void CompactibleFreeListSpace::splitBirth(size_t size) { |
|
2093 |
if (size < SmallForDictionary) { |
|
2094 |
smallSplitBirth(size); |
|
2095 |
} else { |
|
2096 |
dictionary()->dictCensusUpdate(size, |
|
2097 |
true /* split */, |
|
2098 |
true /* birth */); |
|
2099 |
} |
|
2100 |
} |
|
2101 |
||
2102 |
void CompactibleFreeListSpace::splitDeath(size_t size) { |
|
2103 |
if (size < SmallForDictionary) { |
|
2104 |
smallSplitDeath(size); |
|
2105 |
} else { |
|
2106 |
dictionary()->dictCensusUpdate(size, |
|
2107 |
true /* split */, |
|
2108 |
false /* birth */); |
|
2109 |
} |
|
2110 |
} |
|
2111 |
||
2112 |
void CompactibleFreeListSpace::split(size_t from, size_t to1) { |
|
2113 |
size_t to2 = from - to1; |
|
2114 |
splitDeath(from); |
|
2115 |
splitBirth(to1); |
|
2116 |
splitBirth(to2); |
|
2117 |
} |
|
2118 |
||
2119 |
void CompactibleFreeListSpace::print() const { |
|
2120 |
tty->print(" CompactibleFreeListSpace"); |
|
2121 |
Space::print(); |
|
2122 |
} |
|
2123 |
||
2124 |
void CompactibleFreeListSpace::prepare_for_verify() { |
|
2125 |
assert_locked(); |
|
2126 |
repairLinearAllocationBlocks(); |
|
2127 |
// Verify that the SpoolBlocks look like free blocks of |
|
2128 |
// appropriate sizes... To be done ... |
|
2129 |
} |
|
2130 |
||
2131 |
class VerifyAllBlksClosure: public BlkClosure { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2132 |
private: |
1 | 2133 |
const CompactibleFreeListSpace* _sp; |
2134 |
const MemRegion _span; |
|
2135 |
||
2136 |
public: |
|
2137 |
VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, |
|
2138 |
MemRegion span) : _sp(sp), _span(span) { } |
|
2139 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2140 |
virtual size_t do_blk(HeapWord* addr) { |
1 | 2141 |
size_t res; |
2142 |
if (_sp->block_is_obj(addr)) { |
|
2143 |
oop p = oop(addr); |
|
2144 |
guarantee(p->is_oop(), "Should be an oop"); |
|
2145 |
res = _sp->adjustObjectSize(p->size()); |
|
2146 |
if (_sp->obj_is_alive(addr)) { |
|
2147 |
p->verify(); |
|
2148 |
} |
|
2149 |
} else { |
|
2150 |
FreeChunk* fc = (FreeChunk*)addr; |
|
2151 |
res = fc->size(); |
|
2152 |
if (FLSVerifyLists && !fc->cantCoalesce()) { |
|
2153 |
guarantee(_sp->verifyChunkInFreeLists(fc), |
|
2154 |
"Chunk should be on a free list"); |
|
2155 |
} |
|
2156 |
} |
|
2157 |
guarantee(res != 0, "Livelock: no rank reduction!"); |
|
2158 |
return res; |
|
2159 |
} |
|
2160 |
}; |
|
2161 |
||
2162 |
class VerifyAllOopsClosure: public OopClosure { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2163 |
private: |
1 | 2164 |
const CMSCollector* _collector; |
2165 |
const CompactibleFreeListSpace* _sp; |
|
2166 |
const MemRegion _span; |
|
2167 |
const bool _past_remark; |
|
2168 |
const CMSBitMap* _bit_map; |
|
2169 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2170 |
protected: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2171 |
void do_oop(void* p, oop obj) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2172 |
if (_span.contains(obj)) { // the interior oop points into CMS heap |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2173 |
if (!_span.contains(p)) { // reference from outside CMS heap |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2174 |
// Should be a valid object; the first disjunct below allows |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2175 |
// us to sidestep an assertion in block_is_obj() that insists |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2176 |
// that p be in _sp. Note that several generations (and spaces) |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2177 |
// are spanned by _span (CMS heap) above. |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2178 |
guarantee(!_sp->is_in_reserved(obj) || |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2179 |
_sp->block_is_obj((HeapWord*)obj), |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2180 |
"Should be an object"); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2181 |
guarantee(obj->is_oop(), "Should be an oop"); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2182 |
obj->verify(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2183 |
if (_past_remark) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2184 |
// Remark has been completed, the object should be marked |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2185 |
_bit_map->isMarked((HeapWord*)obj); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2186 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2187 |
} else { // reference within CMS heap |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2188 |
if (_past_remark) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2189 |
// Remark has been completed -- so the referent should have |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2190 |
// been marked, if referring object is. |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2191 |
if (_bit_map->isMarked(_collector->block_start(p))) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2192 |
guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2193 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2194 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2195 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2196 |
} else if (_sp->is_in_reserved(p)) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2197 |
// the reference is from FLS, and points out of FLS |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2198 |
guarantee(obj->is_oop(), "Should be an oop"); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2199 |
obj->verify(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2200 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2201 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2202 |
|
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2203 |
template <class T> void do_oop_work(T* p) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2204 |
T heap_oop = oopDesc::load_heap_oop(p); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2205 |
if (!oopDesc::is_null(heap_oop)) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2206 |
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2207 |
do_oop(p, obj); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2208 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2209 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2210 |
|
1 | 2211 |
public: |
2212 |
VerifyAllOopsClosure(const CMSCollector* collector, |
|
2213 |
const CompactibleFreeListSpace* sp, MemRegion span, |
|
2214 |
bool past_remark, CMSBitMap* bit_map) : |
|
2215 |
OopClosure(), _collector(collector), _sp(sp), _span(span), |
|
2216 |
_past_remark(past_remark), _bit_map(bit_map) { } |
|
2217 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2218 |
virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
2219 |
virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
1 | 2220 |
}; |
2221 |
||
2222 |
void CompactibleFreeListSpace::verify(bool ignored) const { |
|
2223 |
assert_lock_strong(&_freelistLock); |
|
2224 |
verify_objects_initialized(); |
|
2225 |
MemRegion span = _collector->_span; |
|
2226 |
bool past_remark = (_collector->abstract_state() == |
|
2227 |
CMSCollector::Sweeping); |
|
2228 |
||
2229 |
ResourceMark rm; |
|
2230 |
HandleMark hm; |
|
2231 |
||
2232 |
// Check integrity of CFL data structures |
|
2233 |
_promoInfo.verify(); |
|
2234 |
_dictionary->verify(); |
|
2235 |
if (FLSVerifyIndexTable) { |
|
2236 |
verifyIndexedFreeLists(); |
|
2237 |
} |
|
2238 |
// Check integrity of all objects and free blocks in space |
|
2239 |
{ |
|
2240 |
VerifyAllBlksClosure cl(this, span); |
|
2241 |
((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const |
|
2242 |
} |
|
2243 |
// Check that all references in the heap to FLS |
|
2244 |
// are to valid objects in FLS or that references in |
|
2245 |
// FLS are to valid objects elsewhere in the heap |
|
2246 |
if (FLSVerifyAllHeapReferences) |
|
2247 |
{ |
|
2248 |
VerifyAllOopsClosure cl(_collector, this, span, past_remark, |
|
2249 |
_collector->markBitMap()); |
|
2250 |
CollectedHeap* ch = Universe::heap(); |
|
2251 |
ch->oop_iterate(&cl); // all oops in generations |
|
2252 |
ch->permanent_oop_iterate(&cl); // all oops in perm gen |
|
2253 |
} |
|
2254 |
||
2255 |
if (VerifyObjectStartArray) { |
|
2256 |
// Verify the block offset table |
|
2257 |
_bt.verify(); |
|
2258 |
} |
|
2259 |
} |
|
2260 |
||
2261 |
#ifndef PRODUCT |
|
2262 |
void CompactibleFreeListSpace::verifyFreeLists() const { |
|
2263 |
if (FLSVerifyLists) { |
|
2264 |
_dictionary->verify(); |
|
2265 |
verifyIndexedFreeLists(); |
|
2266 |
} else { |
|
2267 |
if (FLSVerifyDictionary) { |
|
2268 |
_dictionary->verify(); |
|
2269 |
} |
|
2270 |
if (FLSVerifyIndexTable) { |
|
2271 |
verifyIndexedFreeLists(); |
|
2272 |
} |
|
2273 |
} |
|
2274 |
} |
|
2275 |
#endif |
|
2276 |
||
2277 |
void CompactibleFreeListSpace::verifyIndexedFreeLists() const { |
|
2278 |
size_t i = 0; |
|
2279 |
for (; i < MinChunkSize; i++) { |
|
2280 |
guarantee(_indexedFreeList[i].head() == NULL, "should be NULL"); |
|
2281 |
} |
|
2282 |
for (; i < IndexSetSize; i++) { |
|
2283 |
verifyIndexedFreeList(i); |
|
2284 |
} |
|
2285 |
} |
|
2286 |
||
2287 |
void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { |
|
2288 |
guarantee(size % 2 == 0, "Odd slots should be empty"); |
|
2289 |
for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL; |
|
2290 |
fc = fc->next()) { |
|
2291 |
guarantee(fc->size() == size, "Size inconsistency"); |
|
2292 |
guarantee(fc->isFree(), "!free?"); |
|
2293 |
guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); |
|
2294 |
} |
|
2295 |
} |
|
2296 |
||
2297 |
#ifndef PRODUCT |
|
2298 |
void CompactibleFreeListSpace::checkFreeListConsistency() const { |
|
2299 |
assert(_dictionary->minSize() <= IndexSetSize, |
|
2300 |
"Some sizes can't be allocated without recourse to" |
|
2301 |
" linear allocation buffers"); |
|
2302 |
assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), |
|
2303 |
"else MIN_TREE_CHUNK_SIZE is wrong"); |
|
2304 |
assert((IndexSetStride == 2 && IndexSetStart == 2) || |
|
2305 |
(IndexSetStride == 1 && IndexSetStart == 1), "just checking"); |
|
2306 |
assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), |
|
2307 |
"Some for-loops may be incorrectly initialized"); |
|
2308 |
assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), |
|
2309 |
"For-loops that iterate over IndexSet with stride 2 may be wrong"); |
|
2310 |
} |
|
2311 |
#endif |
|
2312 |
||
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2313 |
void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { |
1 | 2314 |
assert_lock_strong(&_freelistLock); |
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2315 |
FreeList total; |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2316 |
gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2317 |
FreeList::print_labels_on(gclog_or_tty, "size"); |
1 | 2318 |
size_t totalFree = 0; |
2319 |
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
|
2320 |
const FreeList *fl = &_indexedFreeList[i]; |
|
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2321 |
totalFree += fl->count() * fl->size(); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2322 |
if (i % (40*IndexSetStride) == 0) { |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2323 |
FreeList::print_labels_on(gclog_or_tty, "size"); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2324 |
} |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2325 |
fl->print_on(gclog_or_tty); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2326 |
total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2327 |
total.set_surplus( total.surplus() + fl->surplus() ); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2328 |
total.set_desired( total.desired() + fl->desired() ); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2329 |
total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2330 |
total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2331 |
total.set_count( total.count() + fl->count() ); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2332 |
total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2333 |
total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2334 |
total.set_splitBirths(total.splitBirths() + fl->splitBirths()); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2335 |
total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); |
1 | 2336 |
} |
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2337 |
total.print_on(gclog_or_tty, "TOTAL"); |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2338 |
gclog_or_tty->print_cr("Total free in indexed lists " |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2339 |
SIZE_FORMAT " words", totalFree); |
1 | 2340 |
gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", |
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2341 |
(double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2342 |
(total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
2343 |
(double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); |
1 | 2344 |
_dictionary->printDictCensus(); |
2345 |
} |
|
2346 |
||
2347 |
// Return the next displaced header, incrementing the pointer and |
|
2348 |
// recycling spool area as necessary. |
|
2349 |
markOop PromotionInfo::nextDisplacedHeader() { |
|
2350 |
assert(_spoolHead != NULL, "promotionInfo inconsistency"); |
|
2351 |
assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex, |
|
2352 |
"Empty spool space: no displaced header can be fetched"); |
|
2353 |
assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?"); |
|
2354 |
markOop hdr = _spoolHead->displacedHdr[_firstIndex]; |
|
2355 |
// Spool forward |
|
2356 |
if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block |
|
2357 |
// forward to next block, recycling this block into spare spool buffer |
|
2358 |
SpoolBlock* tmp = _spoolHead->nextSpoolBlock; |
|
2359 |
assert(_spoolHead != _spoolTail, "Spooling storage mix-up"); |
|
2360 |
_spoolHead->nextSpoolBlock = _spareSpool; |
|
2361 |
_spareSpool = _spoolHead; |
|
2362 |
_spoolHead = tmp; |
|
2363 |
_firstIndex = 1; |
|
2364 |
NOT_PRODUCT( |
|
2365 |
if (_spoolHead == NULL) { // all buffers fully consumed |
|
2366 |
assert(_spoolTail == NULL && _nextIndex == 1, |
|
2367 |
"spool buffers processing inconsistency"); |
|
2368 |
} |
|
2369 |
) |
|
2370 |
} |
|
2371 |
return hdr; |
|
2372 |
} |
|
2373 |
||
2374 |
void PromotionInfo::track(PromotedObject* trackOop) { |
|
2375 |
track(trackOop, oop(trackOop)->klass()); |
|
2376 |
} |
|
2377 |
||
2378 |
void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) { |
|
2379 |
// make a copy of header as it may need to be spooled |
|
2380 |
markOop mark = oop(trackOop)->mark(); |
|
2381 |
trackOop->clearNext(); |
|
2382 |
if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) { |
|
2383 |
// save non-prototypical header, and mark oop |
|
2384 |
saveDisplacedHeader(mark); |
|
2385 |
trackOop->setDisplacedMark(); |
|
2386 |
} else { |
|
2387 |
// we'd like to assert something like the following: |
|
2388 |
// assert(mark == markOopDesc::prototype(), "consistency check"); |
|
2389 |
// ... but the above won't work because the age bits have not (yet) been |
|
2390 |
// cleared. The remainder of the check would be identical to the |
|
2391 |
// condition checked in must_be_preserved() above, so we don't really |
|
2392 |
// have anything useful to check here! |
|
2393 |
} |
|
2394 |
if (_promoTail != NULL) { |
|
2395 |
assert(_promoHead != NULL, "List consistency"); |
|
2396 |
_promoTail->setNext(trackOop); |
|
2397 |
_promoTail = trackOop; |
|
2398 |
} else { |
|
2399 |
assert(_promoHead == NULL, "List consistency"); |
|
2400 |
_promoHead = _promoTail = trackOop; |
|
2401 |
} |
|
2402 |
// Mask as newly promoted, so we can skip over such objects |
|
2403 |
// when scanning dirty cards |
|
2404 |
assert(!trackOop->hasPromotedMark(), "Should not have been marked"); |
|
2405 |
trackOop->setPromotedMark(); |
|
2406 |
} |
|
2407 |
||
2408 |
// Save the given displaced header, incrementing the pointer and |
|
2409 |
// obtaining more spool area as necessary. |
|
2410 |
void PromotionInfo::saveDisplacedHeader(markOop hdr) { |
|
2411 |
assert(_spoolHead != NULL && _spoolTail != NULL, |
|
2412 |
"promotionInfo inconsistency"); |
|
2413 |
assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?"); |
|
2414 |
_spoolTail->displacedHdr[_nextIndex] = hdr; |
|
2415 |
// Spool forward |
|
2416 |
if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block |
|
2417 |
// get a new spooling block |
|
2418 |
assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list"); |
|
2419 |
_splice_point = _spoolTail; // save for splicing |
|
2420 |
_spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail |
|
2421 |
_spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ... |
|
2422 |
// ... but will attempt filling before next promotion attempt |
|
2423 |
_nextIndex = 1; |
|
2424 |
} |
|
2425 |
} |
|
2426 |
||
2427 |
// Ensure that spooling space exists. Return false if spooling space |
|
2428 |
// could not be obtained. |
|
2429 |
bool PromotionInfo::ensure_spooling_space_work() { |
|
2430 |
assert(!has_spooling_space(), "Only call when there is no spooling space"); |
|
2431 |
// Try and obtain more spooling space |
|
2432 |
SpoolBlock* newSpool = getSpoolBlock(); |
|
2433 |
assert(newSpool == NULL || |
|
2434 |
(newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL), |
|
2435 |
"getSpoolBlock() sanity check"); |
|
2436 |
if (newSpool == NULL) { |
|
2437 |
return false; |
|
2438 |
} |
|
2439 |
_nextIndex = 1; |
|
2440 |
if (_spoolTail == NULL) { |
|
2441 |
_spoolTail = newSpool; |
|
2442 |
if (_spoolHead == NULL) { |
|
2443 |
_spoolHead = newSpool; |
|
2444 |
_firstIndex = 1; |
|
2445 |
} else { |
|
2446 |
assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL, |
|
2447 |
"Splice point invariant"); |
|
2448 |
// Extra check that _splice_point is connected to list |
|
2449 |
#ifdef ASSERT |
|
2450 |
{ |
|
2451 |
SpoolBlock* blk = _spoolHead; |
|
2452 |
for (; blk->nextSpoolBlock != NULL; |
|
2453 |
blk = blk->nextSpoolBlock); |
|
2454 |
assert(blk != NULL && blk == _splice_point, |
|
2455 |
"Splice point incorrect"); |
|
2456 |
} |
|
2457 |
#endif // ASSERT |
|
2458 |
_splice_point->nextSpoolBlock = newSpool; |
|
2459 |
} |
|
2460 |
} else { |
|
2461 |
assert(_spoolHead != NULL, "spool list consistency"); |
|
2462 |
_spoolTail->nextSpoolBlock = newSpool; |
|
2463 |
_spoolTail = newSpool; |
|
2464 |
} |
|
2465 |
return true; |
|
2466 |
} |
|
2467 |
||
2468 |
// Get a free spool buffer from the free pool, getting a new block |
|
2469 |
// from the heap if necessary. |
|
2470 |
SpoolBlock* PromotionInfo::getSpoolBlock() { |
|
2471 |
SpoolBlock* res; |
|
2472 |
if ((res = _spareSpool) != NULL) { |
|
2473 |
_spareSpool = _spareSpool->nextSpoolBlock; |
|
2474 |
res->nextSpoolBlock = NULL; |
|
2475 |
} else { // spare spool exhausted, get some from heap |
|
2476 |
res = (SpoolBlock*)(space()->allocateScratch(refillSize())); |
|
2477 |
if (res != NULL) { |
|
2478 |
res->init(); |
|
2479 |
} |
|
2480 |
} |
|
2481 |
assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition"); |
|
2482 |
return res; |
|
2483 |
} |
|
2484 |
||
2485 |
void PromotionInfo::startTrackingPromotions() { |
|
2486 |
assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex, |
|
2487 |
"spooling inconsistency?"); |
|
2488 |
_firstIndex = _nextIndex = 1; |
|
2489 |
_tracking = true; |
|
2490 |
} |
|
2491 |
||
2492 |
void PromotionInfo::stopTrackingPromotions() { |
|
2493 |
assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex, |
|
2494 |
"spooling inconsistency?"); |
|
2495 |
_firstIndex = _nextIndex = 1; |
|
2496 |
_tracking = false; |
|
2497 |
} |
|
2498 |
||
2499 |
// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex> |
|
2500 |
// points to the next slot available for filling. |
|
2501 |
// The set of slots holding displaced headers are then all those in the |
|
2502 |
// right-open interval denoted by: |
|
2503 |
// |
|
2504 |
// [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> ) |
|
2505 |
// |
|
2506 |
// When _spoolTail is NULL, then the set of slots with displaced headers |
|
2507 |
// is all those starting at the slot <_spoolHead, _firstIndex> and |
|
2508 |
// going up to the last slot of last block in the linked list. |
|
2509 |
// In this lartter case, _splice_point points to the tail block of |
|
2510 |
// this linked list of blocks holding displaced headers. |
|
2511 |
void PromotionInfo::verify() const { |
|
2512 |
// Verify the following: |
|
2513 |
// 1. the number of displaced headers matches the number of promoted |
|
2514 |
// objects that have displaced headers |
|
2515 |
// 2. each promoted object lies in this space |
|
2516 |
debug_only( |
|
2517 |
PromotedObject* junk = NULL; |
|
2518 |
assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()), |
|
2519 |
"Offset of PromotedObject::_next is expected to align with " |
|
2520 |
" the OopDesc::_mark within OopDesc"); |
|
2521 |
) |
|
2522 |
// FIXME: guarantee???? |
|
2523 |
guarantee(_spoolHead == NULL || _spoolTail != NULL || |
|
2524 |
_splice_point != NULL, "list consistency"); |
|
2525 |
guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency"); |
|
2526 |
// count the number of objects with displaced headers |
|
2527 |
size_t numObjsWithDisplacedHdrs = 0; |
|
2528 |
for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) { |
|
2529 |
guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment"); |
|
2530 |
// the last promoted object may fail the mark() != NULL test of is_oop(). |
|
2531 |
guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop"); |
|
2532 |
if (curObj->hasDisplacedMark()) { |
|
2533 |
numObjsWithDisplacedHdrs++; |
|
2534 |
} |
|
2535 |
} |
|
2536 |
// Count the number of displaced headers |
|
2537 |
size_t numDisplacedHdrs = 0; |
|
2538 |
for (SpoolBlock* curSpool = _spoolHead; |
|
2539 |
curSpool != _spoolTail && curSpool != NULL; |
|
2540 |
curSpool = curSpool->nextSpoolBlock) { |
|
2541 |
// the first entry is just a self-pointer; indices 1 through |
|
2542 |
// bufferSize - 1 are occupied (thus, bufferSize - 1 slots). |
|
2543 |
guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr, |
|
2544 |
"first entry of displacedHdr should be self-referential"); |
|
2545 |
numDisplacedHdrs += curSpool->bufferSize - 1; |
|
2546 |
} |
|
2547 |
guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0), |
|
2548 |
"internal consistency"); |
|
2549 |
guarantee(_spoolTail != NULL || _nextIndex == 1, |
|
2550 |
"Inconsistency between _spoolTail and _nextIndex"); |
|
2551 |
// We overcounted (_firstIndex-1) worth of slots in block |
|
2552 |
// _spoolHead and we undercounted (_nextIndex-1) worth of |
|
2553 |
// slots in block _spoolTail. We make an appropriate |
|
2554 |
// adjustment by subtracting the first and adding the |
|
2555 |
// second: - (_firstIndex - 1) + (_nextIndex - 1) |
|
2556 |
numDisplacedHdrs += (_nextIndex - _firstIndex); |
|
2557 |
guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count"); |
|
2558 |
} |
|
2559 |
||
2560 |
||
2561 |
CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : |
|
2562 |
_cfls(cfls) |
|
2563 |
{ |
|
2564 |
_blocks_to_claim = CMSParPromoteBlocksToClaim; |
|
2565 |
for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
|
2566 |
i < CompactibleFreeListSpace::IndexSetSize; |
|
2567 |
i += CompactibleFreeListSpace::IndexSetStride) { |
|
2568 |
_indexedFreeList[i].set_size(i); |
|
2569 |
} |
|
2570 |
} |
|
2571 |
||
2572 |
HeapWord* CFLS_LAB::alloc(size_t word_sz) { |
|
2573 |
FreeChunk* res; |
|
2574 |
word_sz = _cfls->adjustObjectSize(word_sz); |
|
2575 |
if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { |
|
2576 |
// This locking manages sync with other large object allocations. |
|
2577 |
MutexLockerEx x(_cfls->parDictionaryAllocLock(), |
|
2578 |
Mutex::_no_safepoint_check_flag); |
|
2579 |
res = _cfls->getChunkFromDictionaryExact(word_sz); |
|
2580 |
if (res == NULL) return NULL; |
|
2581 |
} else { |
|
2582 |
FreeList* fl = &_indexedFreeList[word_sz]; |
|
2583 |
bool filled = false; //TRAP |
|
2584 |
if (fl->count() == 0) { |
|
2585 |
bool filled = true; //TRAP |
|
2586 |
// Attempt to refill this local free list. |
|
2587 |
_cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl); |
|
2588 |
// If it didn't work, give up. |
|
2589 |
if (fl->count() == 0) return NULL; |
|
2590 |
} |
|
2591 |
res = fl->getChunkAtHead(); |
|
2592 |
assert(res != NULL, "Why was count non-zero?"); |
|
2593 |
} |
|
2594 |
res->markNotFree(); |
|
2595 |
assert(!res->isFree(), "shouldn't be marked free"); |
|
2596 |
assert(oop(res)->klass() == NULL, "should look uninitialized"); |
|
2597 |
// mangle a just allocated object with a distinct pattern. |
|
2598 |
debug_only(res->mangleAllocated(word_sz)); |
|
2599 |
return (HeapWord*)res; |
|
2600 |
} |
|
2601 |
||
2602 |
void CFLS_LAB::retire() { |
|
2603 |
for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
|
2604 |
i < CompactibleFreeListSpace::IndexSetSize; |
|
2605 |
i += CompactibleFreeListSpace::IndexSetStride) { |
|
2606 |
if (_indexedFreeList[i].count() > 0) { |
|
2607 |
MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], |
|
2608 |
Mutex::_no_safepoint_check_flag); |
|
2609 |
_cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); |
|
2610 |
// Reset this list. |
|
2611 |
_indexedFreeList[i] = FreeList(); |
|
2612 |
_indexedFreeList[i].set_size(i); |
|
2613 |
} |
|
2614 |
} |
|
2615 |
} |
|
2616 |
||
2617 |
void |
|
2618 |
CompactibleFreeListSpace:: |
|
2619 |
par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) { |
|
2620 |
assert(fl->count() == 0, "Precondition."); |
|
2621 |
assert(word_sz < CompactibleFreeListSpace::IndexSetSize, |
|
2622 |
"Precondition"); |
|
2623 |
||
2624 |
// We'll try all multiples of word_sz in the indexed set (starting with |
|
2625 |
// word_sz itself), then try getting a big chunk and splitting it. |
|
2626 |
int k = 1; |
|
2627 |
size_t cur_sz = k * word_sz; |
|
2628 |
bool found = false; |
|
2629 |
while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) { |
|
2630 |
FreeList* gfl = &_indexedFreeList[cur_sz]; |
|
2631 |
FreeList fl_for_cur_sz; // Empty. |
|
2632 |
fl_for_cur_sz.set_size(cur_sz); |
|
2633 |
{ |
|
2634 |
MutexLockerEx x(_indexedFreeListParLocks[cur_sz], |
|
2635 |
Mutex::_no_safepoint_check_flag); |
|
2636 |
if (gfl->count() != 0) { |
|
2637 |
size_t nn = MAX2(n/k, (size_t)1); |
|
2638 |
gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); |
|
2639 |
found = true; |
|
2640 |
} |
|
2641 |
} |
|
2642 |
// Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. |
|
2643 |
if (found) { |
|
2644 |
if (k == 1) { |
|
2645 |
fl->prepend(&fl_for_cur_sz); |
|
2646 |
} else { |
|
2647 |
// Divide each block on fl_for_cur_sz up k ways. |
|
2648 |
FreeChunk* fc; |
|
2649 |
while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { |
|
2650 |
// Must do this in reverse order, so that anybody attempting to |
|
2651 |
// access the main chunk sees it as a single free block until we |
|
2652 |
// change it. |
|
2653 |
size_t fc_size = fc->size(); |
|
2654 |
for (int i = k-1; i >= 0; i--) { |
|
2655 |
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
|
2656 |
ffc->setSize(word_sz); |
|
2657 |
ffc->linkNext(NULL); |
|
2658 |
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
|
2659 |
// Above must occur before BOT is updated below. |
|
2660 |
// splitting from the right, fc_size == (k - i + 1) * wordsize |
|
2661 |
_bt.mark_block((HeapWord*)ffc, word_sz); |
|
2662 |
fc_size -= word_sz; |
|
2663 |
_bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); |
|
2664 |
_bt.verify_single_block((HeapWord*)fc, fc_size); |
|
2665 |
_bt.verify_single_block((HeapWord*)ffc, ffc->size()); |
|
2666 |
// Push this on "fl". |
|
2667 |
fl->returnChunkAtHead(ffc); |
|
2668 |
} |
|
2669 |
// TRAP |
|
2670 |
assert(fl->tail()->next() == NULL, "List invariant."); |
|
2671 |
} |
|
2672 |
} |
|
2673 |
return; |
|
2674 |
} |
|
2675 |
k++; cur_sz = k * word_sz; |
|
2676 |
} |
|
2677 |
// Otherwise, we'll split a block from the dictionary. |
|
2678 |
FreeChunk* fc = NULL; |
|
2679 |
FreeChunk* rem_fc = NULL; |
|
2680 |
size_t rem; |
|
2681 |
{ |
|
2682 |
MutexLockerEx x(parDictionaryAllocLock(), |
|
2683 |
Mutex::_no_safepoint_check_flag); |
|
2684 |
while (n > 0) { |
|
2685 |
fc = dictionary()->getChunk(MAX2(n * word_sz, |
|
2686 |
_dictionary->minSize()), |
|
2687 |
FreeBlockDictionary::atLeast); |
|
2688 |
if (fc != NULL) { |
|
2689 |
_bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk |
|
2690 |
dictionary()->dictCensusUpdate(fc->size(), |
|
2691 |
true /*split*/, |
|
2692 |
false /*birth*/); |
|
2693 |
break; |
|
2694 |
} else { |
|
2695 |
n--; |
|
2696 |
} |
|
2697 |
} |
|
2698 |
if (fc == NULL) return; |
|
2699 |
// Otherwise, split up that block. |
|
2700 |
size_t nn = fc->size() / word_sz; |
|
2701 |
n = MIN2(nn, n); |
|
2702 |
rem = fc->size() - n * word_sz; |
|
2703 |
// If there is a remainder, and it's too small, allocate one fewer. |
|
2704 |
if (rem > 0 && rem < MinChunkSize) { |
|
2705 |
n--; rem += word_sz; |
|
2706 |
} |
|
2707 |
// First return the remainder, if any. |
|
2708 |
// Note that we hold the lock until we decide if we're going to give |
|
2709 |
// back the remainder to the dictionary, since a contending allocator |
|
2710 |
// may otherwise see the heap as empty. (We're willing to take that |
|
2711 |
// hit if the block is a small block.) |
|
2712 |
if (rem > 0) { |
|
2713 |
size_t prefix_size = n * word_sz; |
|
2714 |
rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); |
|
2715 |
rem_fc->setSize(rem); |
|
2716 |
rem_fc->linkNext(NULL); |
|
2717 |
rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
|
2718 |
// Above must occur before BOT is updated below. |
|
2719 |
_bt.split_block((HeapWord*)fc, fc->size(), prefix_size); |
|
2720 |
if (rem >= IndexSetSize) { |
|
2721 |
returnChunkToDictionary(rem_fc); |
|
2722 |
dictionary()->dictCensusUpdate(fc->size(), |
|
2723 |
true /*split*/, |
|
2724 |
true /*birth*/); |
|
2725 |
rem_fc = NULL; |
|
2726 |
} |
|
2727 |
// Otherwise, return it to the small list below. |
|
2728 |
} |
|
2729 |
} |
|
2730 |
// |
|
2731 |
if (rem_fc != NULL) { |
|
2732 |
MutexLockerEx x(_indexedFreeListParLocks[rem], |
|
2733 |
Mutex::_no_safepoint_check_flag); |
|
2734 |
_bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); |
|
2735 |
_indexedFreeList[rem].returnChunkAtHead(rem_fc); |
|
2736 |
smallSplitBirth(rem); |
|
2737 |
} |
|
2738 |
||
2739 |
// Now do the splitting up. |
|
2740 |
// Must do this in reverse order, so that anybody attempting to |
|
2741 |
// access the main chunk sees it as a single free block until we |
|
2742 |
// change it. |
|
2743 |
size_t fc_size = n * word_sz; |
|
2744 |
// All but first chunk in this loop |
|
2745 |
for (ssize_t i = n-1; i > 0; i--) { |
|
2746 |
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
|
2747 |
ffc->setSize(word_sz); |
|
2748 |
ffc->linkNext(NULL); |
|
2749 |
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
|
2750 |
// Above must occur before BOT is updated below. |
|
2751 |
// splitting from the right, fc_size == (n - i + 1) * wordsize |
|
2752 |
_bt.mark_block((HeapWord*)ffc, word_sz); |
|
2753 |
fc_size -= word_sz; |
|
2754 |
_bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); |
|
2755 |
_bt.verify_single_block((HeapWord*)ffc, ffc->size()); |
|
2756 |
_bt.verify_single_block((HeapWord*)fc, fc_size); |
|
2757 |
// Push this on "fl". |
|
2758 |
fl->returnChunkAtHead(ffc); |
|
2759 |
} |
|
2760 |
// First chunk |
|
2761 |
fc->setSize(word_sz); |
|
2762 |
fc->linkNext(NULL); |
|
2763 |
fc->linkPrev(NULL); |
|
2764 |
_bt.verify_not_unallocated((HeapWord*)fc, fc->size()); |
|
2765 |
_bt.verify_single_block((HeapWord*)fc, fc->size()); |
|
2766 |
fl->returnChunkAtHead(fc); |
|
2767 |
||
2768 |
{ |
|
2769 |
MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
|
2770 |
Mutex::_no_safepoint_check_flag); |
|
2771 |
ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n; |
|
2772 |
_indexedFreeList[word_sz].set_splitBirths(new_births); |
|
2773 |
ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; |
|
2774 |
_indexedFreeList[word_sz].set_surplus(new_surplus); |
|
2775 |
} |
|
2776 |
||
2777 |
// TRAP |
|
2778 |
assert(fl->tail()->next() == NULL, "List invariant."); |
|
2779 |
} |
|
2780 |
||
2781 |
// Set up the space's par_seq_tasks structure for work claiming |
|
2782 |
// for parallel rescan. See CMSParRemarkTask where this is currently used. |
|
2783 |
// XXX Need to suitably abstract and generalize this and the next |
|
2784 |
// method into one. |
|
2785 |
void |
|
2786 |
CompactibleFreeListSpace:: |
|
2787 |
initialize_sequential_subtasks_for_rescan(int n_threads) { |
|
2788 |
// The "size" of each task is fixed according to rescan_task_size. |
|
2789 |
assert(n_threads > 0, "Unexpected n_threads argument"); |
|
2790 |
const size_t task_size = rescan_task_size(); |
|
2791 |
size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; |
|
2792 |
assert((used_region().start() + (n_tasks - 1)*task_size < |
|
2793 |
used_region().end()) && |
|
2794 |
(used_region().start() + n_tasks*task_size >= |
|
2795 |
used_region().end()), "n_task calculation incorrect"); |
|
2796 |
SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
|
2797 |
assert(!pst->valid(), "Clobbering existing data?"); |
|
2798 |
pst->set_par_threads(n_threads); |
|
2799 |
pst->set_n_tasks((int)n_tasks); |
|
2800 |
} |
|
2801 |
||
2802 |
// Set up the space's par_seq_tasks structure for work claiming |
|
2803 |
// for parallel concurrent marking. See CMSConcMarkTask where this is currently used. |
|
2804 |
void |
|
2805 |
CompactibleFreeListSpace:: |
|
2806 |
initialize_sequential_subtasks_for_marking(int n_threads, |
|
2807 |
HeapWord* low) { |
|
2808 |
// The "size" of each task is fixed according to rescan_task_size. |
|
2809 |
assert(n_threads > 0, "Unexpected n_threads argument"); |
|
2810 |
const size_t task_size = marking_task_size(); |
|
2811 |
assert(task_size > CardTableModRefBS::card_size_in_words && |
|
2812 |
(task_size % CardTableModRefBS::card_size_in_words == 0), |
|
2813 |
"Otherwise arithmetic below would be incorrect"); |
|
2814 |
MemRegion span = _gen->reserved(); |
|
2815 |
if (low != NULL) { |
|
2816 |
if (span.contains(low)) { |
|
2817 |
// Align low down to a card boundary so that |
|
2818 |
// we can use block_offset_careful() on span boundaries. |
|
2819 |
HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, |
|
2820 |
CardTableModRefBS::card_size); |
|
2821 |
// Clip span prefix at aligned_low |
|
2822 |
span = span.intersection(MemRegion(aligned_low, span.end())); |
|
2823 |
} else if (low > span.end()) { |
|
2824 |
span = MemRegion(low, low); // Null region |
|
2825 |
} // else use entire span |
|
2826 |
} |
|
2827 |
assert(span.is_empty() || |
|
2828 |
((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), |
|
2829 |
"span should start at a card boundary"); |
|
2830 |
size_t n_tasks = (span.word_size() + task_size - 1)/task_size; |
|
2831 |
assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); |
|
2832 |
assert(n_tasks == 0 || |
|
2833 |
((span.start() + (n_tasks - 1)*task_size < span.end()) && |
|
2834 |
(span.start() + n_tasks*task_size >= span.end())), |
|
2835 |
"n_task calculation incorrect"); |
|
2836 |
SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
|
2837 |
assert(!pst->valid(), "Clobbering existing data?"); |
|
2838 |
pst->set_par_threads(n_threads); |
|
2839 |
pst->set_n_tasks((int)n_tasks); |
|
2840 |
} |