author | jmasa |
Thu, 29 Mar 2012 19:46:24 -0700 | |
changeset 12507 | 6182ca66bc7b |
parent 12379 | 2cf45b79ce3a |
child 12509 | 6228e2085074 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
12379 | 2 |
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5434
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5434
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5434
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |
26 |
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |
|
27 |
||
28 |
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" |
|
12507 | 29 |
#include "memory/binaryTreeDictionary.hpp" |
7397 | 30 |
#include "memory/blockOffsetTable.inline.hpp" |
12507 | 31 |
#include "memory/freeList.hpp" |
7397 | 32 |
#include "memory/space.hpp" |
33 |
||
1 | 34 |
// Classes in support of keeping track of promotions into a non-Contiguous |
35 |
// space, in this case a CompactibleFreeListSpace. |
|
36 |
||
37 |
// Forward declarations |
|
38 |
class CompactibleFreeListSpace; |
|
39 |
class BlkClosure; |
|
40 |
class BlkClosureCareful; |
|
41 |
class UpwardsObjectClosure; |
|
42 |
class ObjectClosureCareful; |
|
43 |
class Klass; |
|
44 |
||
45 |
class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { |
|
46 |
public: |
|
47 |
LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), |
|
48 |
_allocation_size_limit(0) {} |
|
49 |
void set(HeapWord* ptr, size_t word_size, size_t refill_size, |
|
50 |
size_t allocation_size_limit) { |
|
51 |
_ptr = ptr; |
|
52 |
_word_size = word_size; |
|
53 |
_refillSize = refill_size; |
|
54 |
_allocation_size_limit = allocation_size_limit; |
|
55 |
} |
|
56 |
HeapWord* _ptr; |
|
57 |
size_t _word_size; |
|
58 |
size_t _refillSize; |
|
59 |
size_t _allocation_size_limit; // largest size that will be allocated |
|
6258
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
60 |
|
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
61 |
void print_on(outputStream* st) const; |
1 | 62 |
}; |
63 |
||
64 |
// Concrete subclass of CompactibleSpace that implements |
|
65 |
// a free list space, such as used in the concurrent mark sweep |
|
66 |
// generation. |
|
67 |
||
68 |
class CompactibleFreeListSpace: public CompactibleSpace { |
|
69 |
friend class VMStructs; |
|
70 |
friend class ConcurrentMarkSweepGeneration; |
|
71 |
friend class ASConcurrentMarkSweepGeneration; |
|
72 |
friend class CMSCollector; |
|
73 |
friend class CMSPermGenGen; |
|
74 |
// Local alloc buffer for promotion into this space. |
|
75 |
friend class CFLS_LAB; |
|
76 |
||
77 |
// "Size" of chunks of work (executed during parallel remark phases |
|
78 |
// of CMS collection); this probably belongs in CMSCollector, although |
|
79 |
// it's cached here because it's used in |
|
80 |
// initialize_sequential_subtasks_for_rescan() which modifies |
|
81 |
// par_seq_tasks which also lives in Space. XXX |
|
82 |
const size_t _rescan_task_size; |
|
83 |
const size_t _marking_task_size; |
|
84 |
||
85 |
// Yet another sequential tasks done structure. This supports |
|
86 |
// CMS GC, where we have threads dynamically |
|
87 |
// claiming sub-tasks from a larger parallel task. |
|
88 |
SequentialSubTasksDone _conc_par_seq_tasks; |
|
89 |
||
90 |
BlockOffsetArrayNonContigSpace _bt; |
|
91 |
||
92 |
CMSCollector* _collector; |
|
93 |
ConcurrentMarkSweepGeneration* _gen; |
|
94 |
||
95 |
// Data structures for free blocks (used during allocation/sweeping) |
|
96 |
||
97 |
// Allocation is done linearly from two different blocks depending on |
|
98 |
// whether the request is small or large, in an effort to reduce |
|
99 |
// fragmentation. We assume that any locking for allocation is done |
|
100 |
// by the containing generation. Thus, none of the methods in this |
|
101 |
// space are re-entrant. |
|
102 |
enum SomeConstants { |
|
103 |
SmallForLinearAlloc = 16, // size < this then use _sLAB |
|
104 |
SmallForDictionary = 257, // size < this then use _indexedFreeList |
|
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
5434
diff
changeset
|
105 |
IndexSetSize = SmallForDictionary // keep this odd-sized |
1 | 106 |
}; |
10992
b998c6b89fa2
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
10771
diff
changeset
|
107 |
static size_t IndexSetStart; |
b998c6b89fa2
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
10771
diff
changeset
|
108 |
static size_t IndexSetStride; |
1 | 109 |
|
110 |
private: |
|
111 |
enum FitStrategyOptions { |
|
112 |
FreeBlockStrategyNone = 0, |
|
113 |
FreeBlockBestFitFirst |
|
114 |
}; |
|
115 |
||
116 |
PromotionInfo _promoInfo; |
|
117 |
||
118 |
// helps to impose a global total order on freelistLock ranks; |
|
119 |
// assumes that CFLSpace's are allocated in global total order |
|
120 |
static int _lockRank; |
|
121 |
||
122 |
// a lock protecting the free lists and free blocks; |
|
123 |
// mutable because of ubiquity of locking even for otherwise const methods |
|
124 |
mutable Mutex _freelistLock; |
|
125 |
// locking verifier convenience function |
|
126 |
void assert_locked() const PRODUCT_RETURN; |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
127 |
void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
1 | 128 |
|
129 |
// Linear allocation blocks |
|
130 |
LinearAllocBlock _smallLinearAllocBlock; |
|
131 |
||
12507 | 132 |
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; |
133 |
FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks |
|
1 | 134 |
|
12507 | 135 |
FreeList<FreeChunk> _indexedFreeList[IndexSetSize]; |
1 | 136 |
// indexed array for small size blocks |
137 |
// allocation stategy |
|
138 |
bool _fitStrategy; // Use best fit strategy. |
|
139 |
bool _adaptive_freelists; // Use adaptive freelists |
|
140 |
||
141 |
// This is an address close to the largest free chunk in the heap. |
|
142 |
// It is currently assumed to be at the end of the heap. Free |
|
143 |
// chunks with addresses greater than nearLargestChunk are coalesced |
|
144 |
// in an effort to maintain a large chunk at the end of the heap. |
|
145 |
HeapWord* _nearLargestChunk; |
|
146 |
||
147 |
// Used to keep track of limit of sweep for the space |
|
148 |
HeapWord* _sweep_limit; |
|
149 |
||
150 |
// Support for compacting cms |
|
151 |
HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
|
152 |
HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); |
|
153 |
||
154 |
// Initialization helpers. |
|
155 |
void initializeIndexedFreeListArray(); |
|
156 |
||
157 |
// Extra stuff to manage promotion parallelism. |
|
158 |
||
159 |
// a lock protecting the dictionary during par promotion allocation. |
|
160 |
mutable Mutex _parDictionaryAllocLock; |
|
161 |
Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } |
|
162 |
||
163 |
// Locks protecting the exact lists during par promotion allocation. |
|
164 |
Mutex* _indexedFreeListParLocks[IndexSetSize]; |
|
165 |
||
166 |
// Attempt to obtain up to "n" blocks of the size "word_sz" (which is |
|
167 |
// required to be smaller than "IndexSetSize".) If successful, |
|
168 |
// adds them to "fl", which is required to be an empty free list. |
|
169 |
// If the count of "fl" is negative, it's absolute value indicates a |
|
170 |
// number of free chunks that had been previously "borrowed" from global |
|
171 |
// list of size "word_sz", and must now be decremented. |
|
12507 | 172 |
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl); |
1 | 173 |
|
174 |
// Allocation helper functions |
|
175 |
// Allocate using a strategy that takes from the indexed free lists |
|
176 |
// first. This allocation strategy assumes a companion sweeping |
|
177 |
// strategy that attempts to keep the needed number of chunks in each |
|
178 |
// indexed free lists. |
|
179 |
HeapWord* allocate_adaptive_freelists(size_t size); |
|
180 |
// Allocate from the linear allocation buffers first. This allocation |
|
181 |
// strategy assumes maximal coalescing can maintain chunks large enough |
|
182 |
// to be used as linear allocation buffers. |
|
183 |
HeapWord* allocate_non_adaptive_freelists(size_t size); |
|
184 |
||
185 |
// Gets a chunk from the linear allocation block (LinAB). If there |
|
186 |
// is not enough space in the LinAB, refills it. |
|
187 |
HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); |
|
188 |
HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); |
|
189 |
// Get a chunk from the space remaining in the linear allocation block. Do |
|
190 |
// not attempt to refill if the space is not available, return NULL. Do the |
|
191 |
// repairs on the linear allocation block as appropriate. |
|
192 |
HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); |
|
193 |
inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); |
|
194 |
||
195 |
// Helper function for getChunkFromIndexedFreeList. |
|
196 |
// Replenish the indexed free list for this "size". Do not take from an |
|
197 |
// underpopulated size. |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
198 |
FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); |
1 | 199 |
|
200 |
// Get a chunk from the indexed free list. If the indexed free list |
|
201 |
// does not have a free chunk, try to replenish the indexed free list |
|
202 |
// then get the free chunk from the replenished indexed free list. |
|
203 |
inline FreeChunk* getChunkFromIndexedFreeList(size_t size); |
|
204 |
||
205 |
// The returned chunk may be larger than requested (or null). |
|
206 |
FreeChunk* getChunkFromDictionary(size_t size); |
|
207 |
// The returned chunk is the exact size requested (or null). |
|
208 |
FreeChunk* getChunkFromDictionaryExact(size_t size); |
|
209 |
||
210 |
// Find a chunk in the indexed free list that is the best |
|
211 |
// fit for size "numWords". |
|
212 |
FreeChunk* bestFitSmall(size_t numWords); |
|
213 |
// For free list "fl" of chunks of size > numWords, |
|
214 |
// remove a chunk, split off a chunk of size numWords |
|
215 |
// and return it. The split off remainder is returned to |
|
216 |
// the free lists. The old name for getFromListGreater |
|
217 |
// was lookInListGreater. |
|
12507 | 218 |
FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords); |
1 | 219 |
// Get a chunk in the indexed free list or dictionary, |
220 |
// by considering a larger chunk and splitting it. |
|
221 |
FreeChunk* getChunkFromGreater(size_t numWords); |
|
222 |
// Verify that the given chunk is in the indexed free lists. |
|
223 |
bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; |
|
224 |
// Remove the specified chunk from the indexed free lists. |
|
225 |
void removeChunkFromIndexedFreeList(FreeChunk* fc); |
|
226 |
// Remove the specified chunk from the dictionary. |
|
227 |
void removeChunkFromDictionary(FreeChunk* fc); |
|
228 |
// Split a free chunk into a smaller free chunk of size "new_size". |
|
229 |
// Return the smaller free chunk and return the remainder to the |
|
230 |
// free lists. |
|
231 |
FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); |
|
232 |
// Add a chunk to the free lists. |
|
233 |
void addChunkToFreeLists(HeapWord* chunk, size_t size); |
|
234 |
// Add a chunk to the free lists, preferring to suffix it |
|
235 |
// to the last free chunk at end of space if possible, and |
|
236 |
// updating the block census stats as well as block offset table. |
|
237 |
// Take any locks as appropriate if we are multithreaded. |
|
238 |
void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); |
|
239 |
// Add a free chunk to the indexed free lists. |
|
240 |
void returnChunkToFreeList(FreeChunk* chunk); |
|
241 |
// Add a free chunk to the dictionary. |
|
242 |
void returnChunkToDictionary(FreeChunk* chunk); |
|
243 |
||
244 |
// Functions for maintaining the linear allocation buffers (LinAB). |
|
245 |
// Repairing a linear allocation block refers to operations |
|
246 |
// performed on the remainder of a LinAB after an allocation |
|
247 |
// has been made from it. |
|
248 |
void repairLinearAllocationBlocks(); |
|
249 |
void repairLinearAllocBlock(LinearAllocBlock* blk); |
|
250 |
void refillLinearAllocBlock(LinearAllocBlock* blk); |
|
251 |
void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); |
|
252 |
void refillLinearAllocBlocksIfNeeded(); |
|
253 |
||
254 |
void verify_objects_initialized() const; |
|
255 |
||
256 |
// Statistics reporting helper functions |
|
257 |
void reportFreeListStatistics() const; |
|
258 |
void reportIndexedFreeListStatistics() const; |
|
259 |
size_t maxChunkSizeInIndexedFreeLists() const; |
|
260 |
size_t numFreeBlocksInIndexedFreeLists() const; |
|
261 |
// Accessor |
|
262 |
HeapWord* unallocated_block() const { |
|
6258
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
263 |
if (BlockOffsetArrayUseUnallocatedBlock) { |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
264 |
HeapWord* ub = _bt.unallocated_block(); |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
265 |
assert(ub >= bottom() && |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
266 |
ub <= end(), "space invariant"); |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
267 |
return ub; |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
268 |
} else { |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
269 |
return end(); |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
270 |
} |
1 | 271 |
} |
272 |
void freed(HeapWord* start, size_t size) { |
|
273 |
_bt.freed(start, size); |
|
274 |
} |
|
275 |
||
276 |
protected: |
|
277 |
// reset the indexed free list to its initial empty condition. |
|
278 |
void resetIndexedFreeListArray(); |
|
279 |
// reset to an initial state with a single free block described |
|
280 |
// by the MemRegion parameter. |
|
281 |
void reset(MemRegion mr); |
|
282 |
// Return the total number of words in the indexed free lists. |
|
283 |
size_t totalSizeInIndexedFreeLists() const; |
|
284 |
||
285 |
public: |
|
286 |
// Constructor... |
|
287 |
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, |
|
288 |
bool use_adaptive_freelists, |
|
12507 | 289 |
FreeBlockDictionary<FreeChunk>::DictionaryChoice); |
1 | 290 |
// accessors |
291 |
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } |
|
12507 | 292 |
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } |
1 | 293 |
HeapWord* nearLargestChunk() const { return _nearLargestChunk; } |
294 |
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } |
|
295 |
||
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
5434
diff
changeset
|
296 |
// Set CMS global values |
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
5434
diff
changeset
|
297 |
static void set_cms_values(); |
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
5434
diff
changeset
|
298 |
|
1 | 299 |
// Return the free chunk at the end of the space. If no such |
300 |
// chunk exists, return NULL. |
|
301 |
FreeChunk* find_chunk_at_end(); |
|
302 |
||
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
303 |
bool adaptive_freelists() const { return _adaptive_freelists; } |
1 | 304 |
|
305 |
void set_collector(CMSCollector* collector) { _collector = collector; } |
|
306 |
||
307 |
// Support for parallelization of rescan and marking |
|
308 |
const size_t rescan_task_size() const { return _rescan_task_size; } |
|
309 |
const size_t marking_task_size() const { return _marking_task_size; } |
|
310 |
SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } |
|
311 |
void initialize_sequential_subtasks_for_rescan(int n_threads); |
|
312 |
void initialize_sequential_subtasks_for_marking(int n_threads, |
|
313 |
HeapWord* low = NULL); |
|
314 |
||
315 |
// Space enquiries |
|
316 |
size_t used() const; |
|
317 |
size_t free() const; |
|
318 |
size_t max_alloc_in_words() const; |
|
319 |
// XXX: should have a less conservative used_region() than that of |
|
320 |
// Space; we could consider keeping track of highest allocated |
|
321 |
// address and correcting that at each sweep, as the sweeper |
|
322 |
// goes through the entire allocated part of the generation. We |
|
323 |
// could also use that information to keep the sweeper from |
|
324 |
// sweeping more than is necessary. The allocator and sweeper will |
|
325 |
// of course need to synchronize on this, since the sweeper will |
|
326 |
// try to bump down the address and the allocator will try to bump it up. |
|
327 |
// For now, however, we'll just use the default used_region() |
|
328 |
// which overestimates the region by returning the entire |
|
329 |
// committed region (this is safe, but inefficient). |
|
330 |
||
331 |
// Returns a subregion of the space containing all the objects in |
|
332 |
// the space. |
|
333 |
MemRegion used_region() const { |
|
334 |
return MemRegion(bottom(), |
|
335 |
BlockOffsetArrayUseUnallocatedBlock ? |
|
336 |
unallocated_block() : end()); |
|
337 |
} |
|
338 |
||
339 |
bool is_in(const void* p) const { |
|
340 |
return used_region().contains(p); |
|
341 |
} |
|
342 |
||
343 |
virtual bool is_free_block(const HeapWord* p) const; |
|
344 |
||
345 |
// Resizing support |
|
346 |
void set_end(HeapWord* value); // override |
|
347 |
||
348 |
// mutual exclusion support |
|
349 |
Mutex* freelistLock() const { return &_freelistLock; } |
|
350 |
||
351 |
// Iteration support |
|
352 |
void oop_iterate(MemRegion mr, OopClosure* cl); |
|
353 |
void oop_iterate(OopClosure* cl); |
|
354 |
||
355 |
void object_iterate(ObjectClosure* blk); |
|
1893
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
356 |
// Apply the closure to each object in the space whose references |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
357 |
// point to objects in the heap. The usage of CompactibleFreeListSpace |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
358 |
// by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
359 |
// objects in the space with references to objects that are no longer |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
360 |
// valid. For example, an object may reference another object |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
361 |
// that has already been sweep up (collected). This method uses |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
362 |
// obj_is_alive() to determine whether it is safe to iterate of |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
363 |
// an object. |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
364 |
void safe_object_iterate(ObjectClosure* blk); |
1 | 365 |
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
366 |
||
367 |
// Requires that "mr" be entirely within the space. |
|
368 |
// Apply "cl->do_object" to all objects that intersect with "mr". |
|
369 |
// If the iteration encounters an unparseable portion of the region, |
|
370 |
// terminate the iteration and return the address of the start of the |
|
371 |
// subregion that isn't done. Return of "NULL" indicates that the |
|
372 |
// interation completed. |
|
373 |
virtual HeapWord* |
|
374 |
object_iterate_careful_m(MemRegion mr, |
|
375 |
ObjectClosureCareful* cl); |
|
376 |
virtual HeapWord* |
|
377 |
object_iterate_careful(ObjectClosureCareful* cl); |
|
378 |
||
379 |
// Override: provides a DCTO_CL specific to this kind of space. |
|
380 |
DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, |
|
381 |
CardTableModRefBS::PrecisionStyle precision, |
|
382 |
HeapWord* boundary); |
|
383 |
||
384 |
void blk_iterate(BlkClosure* cl); |
|
385 |
void blk_iterate_careful(BlkClosureCareful* cl); |
|
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
386 |
HeapWord* block_start_const(const void* p) const; |
1 | 387 |
HeapWord* block_start_careful(const void* p) const; |
388 |
size_t block_size(const HeapWord* p) const; |
|
389 |
size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; |
|
390 |
bool block_is_obj(const HeapWord* p) const; |
|
391 |
bool obj_is_alive(const HeapWord* p) const; |
|
392 |
size_t block_size_nopar(const HeapWord* p) const; |
|
393 |
bool block_is_obj_nopar(const HeapWord* p) const; |
|
394 |
||
395 |
// iteration support for promotion |
|
396 |
void save_marks(); |
|
397 |
bool no_allocs_since_save_marks(); |
|
398 |
void object_iterate_since_last_GC(ObjectClosure* cl); |
|
399 |
||
400 |
// iteration support for sweeping |
|
401 |
void save_sweep_limit() { |
|
402 |
_sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? |
|
403 |
unallocated_block() : end(); |
|
9969
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
404 |
if (CMSTraceSweeper) { |
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
405 |
gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT |
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
406 |
" for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", |
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
407 |
_sweep_limit, bottom(), end()); |
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
408 |
} |
1 | 409 |
} |
410 |
NOT_PRODUCT( |
|
411 |
void clear_sweep_limit() { _sweep_limit = NULL; } |
|
412 |
) |
|
413 |
HeapWord* sweep_limit() { return _sweep_limit; } |
|
414 |
||
415 |
// Apply "blk->do_oop" to the addresses of all reference fields in objects |
|
416 |
// promoted into this generation since the most recent save_marks() call. |
|
417 |
// Fields in objects allocated by applications of the closure |
|
418 |
// *are* included in the iteration. Thus, when the iteration completes |
|
419 |
// there should be no further such objects remaining. |
|
420 |
#define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
421 |
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); |
|
422 |
ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) |
|
423 |
#undef CFLS_OOP_SINCE_SAVE_MARKS_DECL |
|
424 |
||
425 |
// Allocation support |
|
426 |
HeapWord* allocate(size_t size); |
|
427 |
HeapWord* par_allocate(size_t size); |
|
428 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
429 |
oop promote(oop obj, size_t obj_size); |
1 | 430 |
void gc_prologue(); |
431 |
void gc_epilogue(); |
|
432 |
||
433 |
// This call is used by a containing CMS generation / collector |
|
434 |
// to inform the CFLS space that a sweep has been completed |
|
435 |
// and that the space can do any related house-keeping functions. |
|
436 |
void sweep_completed(); |
|
437 |
||
438 |
// For an object in this space, the mark-word's two |
|
439 |
// LSB's having the value [11] indicates that it has been |
|
440 |
// promoted since the most recent call to save_marks() on |
|
441 |
// this generation and has not subsequently been iterated |
|
442 |
// over (using oop_since_save_marks_iterate() above). |
|
5434 | 443 |
// This property holds only for single-threaded collections, |
444 |
// and is typically used for Cheney scans; for MT scavenges, |
|
445 |
// the property holds for all objects promoted during that |
|
446 |
// scavenge for the duration of the scavenge and is used |
|
447 |
// by card-scanning to avoid scanning objects (being) promoted |
|
448 |
// during that scavenge. |
|
1 | 449 |
bool obj_allocated_since_save_marks(const oop obj) const { |
450 |
assert(is_in_reserved(obj), "Wrong space?"); |
|
451 |
return ((PromotedObject*)obj)->hasPromotedMark(); |
|
452 |
} |
|
453 |
||
454 |
// A worst-case estimate of the space required (in HeapWords) to expand the |
|
455 |
// heap when promoting an obj of size obj_size. |
|
456 |
size_t expansionSpaceRequired(size_t obj_size) const; |
|
457 |
||
458 |
FreeChunk* allocateScratch(size_t size); |
|
459 |
||
460 |
// returns true if either the small or large linear allocation buffer is empty. |
|
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
461 |
bool linearAllocationWouldFail() const; |
1 | 462 |
|
463 |
// Adjust the chunk for the minimum size. This version is called in |
|
464 |
// most cases in CompactibleFreeListSpace methods. |
|
465 |
inline static size_t adjustObjectSize(size_t size) { |
|
466 |
return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); |
|
467 |
} |
|
468 |
// This is a virtual version of adjustObjectSize() that is called |
|
469 |
// only occasionally when the compaction space changes and the type |
|
470 |
// of the new compaction space is is only known to be CompactibleSpace. |
|
471 |
size_t adjust_object_size_v(size_t size) const { |
|
472 |
return adjustObjectSize(size); |
|
473 |
} |
|
474 |
// Minimum size of a free block. |
|
475 |
virtual size_t minimum_free_block_size() const { return MinChunkSize; } |
|
476 |
void removeFreeChunkFromFreeLists(FreeChunk* chunk); |
|
477 |
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, |
|
478 |
bool coalesced); |
|
479 |
||
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
480 |
// Support for decisions regarding concurrent collection policy |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
481 |
bool should_concurrent_collect() const; |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
482 |
|
1 | 483 |
// Support for compaction |
484 |
void prepare_for_compaction(CompactPoint* cp); |
|
485 |
void adjust_pointers(); |
|
486 |
void compact(); |
|
487 |
// reset the space to reflect the fact that a compaction of the |
|
488 |
// space has been done. |
|
489 |
virtual void reset_after_compaction(); |
|
490 |
||
491 |
// Debugging support |
|
492 |
void print() const; |
|
6258
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
493 |
void print_on(outputStream* st) const; |
1 | 494 |
void prepare_for_verify(); |
12379 | 495 |
void verify() const; |
1 | 496 |
void verifyFreeLists() const PRODUCT_RETURN; |
497 |
void verifyIndexedFreeLists() const; |
|
498 |
void verifyIndexedFreeList(size_t size) const; |
|
10771
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
499 |
// Verify that the given chunk is in the free lists: |
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
500 |
// i.e. either the binary tree dictionary, the indexed free lists |
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
501 |
// or the linear allocation block. |
1 | 502 |
bool verifyChunkInFreeLists(FreeChunk* fc) const; |
10771
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
503 |
// Verify that the given chunk is the linear allocation block |
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
504 |
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; |
1 | 505 |
// Do some basic checks on the the free lists. |
10771
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
506 |
void check_free_list_consistency() const PRODUCT_RETURN; |
1 | 507 |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
508 |
// Printing support |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
509 |
void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
510 |
void print_indexed_free_lists(outputStream* st) const; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
511 |
void print_dictionary_free_lists(outputStream* st) const; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
512 |
void print_promo_info_blocks(outputStream* st) const; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
513 |
|
1 | 514 |
NOT_PRODUCT ( |
515 |
void initializeIndexedFreeListArrayReturnedBytes(); |
|
516 |
size_t sumIndexedFreeListArrayReturnedBytes(); |
|
517 |
// Return the total number of chunks in the indexed free lists. |
|
518 |
size_t totalCountInIndexedFreeLists() const; |
|
519 |
// Return the total numberof chunks in the space. |
|
520 |
size_t totalCount(); |
|
521 |
) |
|
522 |
||
523 |
// The census consists of counts of the quantities such as |
|
524 |
// the current count of the free chunks, number of chunks |
|
525 |
// created as a result of the split of a larger chunk or |
|
526 |
// coalescing of smaller chucks, etc. The counts in the |
|
527 |
// census is used to make decisions on splitting and |
|
528 |
// coalescing of chunks during the sweep of garbage. |
|
529 |
||
530 |
// Print the statistics for the free lists. |
|
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
531 |
void printFLCensus(size_t sweep_count) const; |
1 | 532 |
|
533 |
// Statistics functions |
|
534 |
// Initialize census for lists before the sweep. |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
535 |
void beginSweepFLCensus(float inter_sweep_current, |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
536 |
float inter_sweep_estimate, |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
537 |
float intra_sweep_estimate); |
1 | 538 |
// Set the surplus for each of the free lists. |
539 |
void setFLSurplus(); |
|
540 |
// Set the hint for each of the free lists. |
|
541 |
void setFLHints(); |
|
542 |
// Clear the census for each of the free lists. |
|
543 |
void clearFLCensus(); |
|
544 |
// Perform functions for the census after the end of the sweep. |
|
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
545 |
void endSweepFLCensus(size_t sweep_count); |
1 | 546 |
// Return true if the count of free chunks is greater |
547 |
// than the desired number of free chunks. |
|
548 |
bool coalOverPopulated(size_t size); |
|
549 |
||
550 |
// Record (for each size): |
|
551 |
// |
|
552 |
// split-births = #chunks added due to splits in (prev-sweep-end, |
|
553 |
// this-sweep-start) |
|
554 |
// split-deaths = #chunks removed for splits in (prev-sweep-end, |
|
555 |
// this-sweep-start) |
|
556 |
// num-curr = #chunks at start of this sweep |
|
557 |
// num-prev = #chunks at end of previous sweep |
|
558 |
// |
|
559 |
// The above are quantities that are measured. Now define: |
|
560 |
// |
|
561 |
// num-desired := num-prev + split-births - split-deaths - num-curr |
|
562 |
// |
|
563 |
// Roughly, num-prev + split-births is the supply, |
|
564 |
// split-deaths is demand due to other sizes |
|
565 |
// and num-curr is what we have left. |
|
566 |
// |
|
567 |
// Thus, num-desired is roughly speaking the "legitimate demand" |
|
568 |
// for blocks of this size and what we are striving to reach at the |
|
569 |
// end of the current sweep. |
|
570 |
// |
|
571 |
// For a given list, let num-len be its current population. |
|
572 |
// Define, for a free list of a given size: |
|
573 |
// |
|
574 |
// coal-overpopulated := num-len >= num-desired * coal-surplus |
|
575 |
// (coal-surplus is set to 1.05, i.e. we allow a little slop when |
|
576 |
// coalescing -- we do not coalesce unless we think that the current |
|
577 |
// supply has exceeded the estimated demand by more than 5%). |
|
578 |
// |
|
579 |
// For the set of sizes in the binary tree, which is neither dense nor |
|
580 |
// closed, it may be the case that for a particular size we have never |
|
581 |
// had, or do not now have, or did not have at the previous sweep, |
|
582 |
// chunks of that size. We need to extend the definition of |
|
583 |
// coal-overpopulated to such sizes as well: |
|
584 |
// |
|
585 |
// For a chunk in/not in the binary tree, extend coal-overpopulated |
|
586 |
// defined above to include all sizes as follows: |
|
587 |
// |
|
588 |
// . a size that is non-existent is coal-overpopulated |
|
589 |
// . a size that has a num-desired <= 0 as defined above is |
|
590 |
// coal-overpopulated. |
|
591 |
// |
|
592 |
// Also define, for a chunk heap-offset C and mountain heap-offset M: |
|
593 |
// |
|
594 |
// close-to-mountain := C >= 0.99 * M |
|
595 |
// |
|
596 |
// Now, the coalescing strategy is: |
|
597 |
// |
|
598 |
// Coalesce left-hand chunk with right-hand chunk if and |
|
599 |
// only if: |
|
600 |
// |
|
601 |
// EITHER |
|
602 |
// . left-hand chunk is of a size that is coal-overpopulated |
|
603 |
// OR |
|
604 |
// . right-hand chunk is close-to-mountain |
|
605 |
void smallCoalBirth(size_t size); |
|
606 |
void smallCoalDeath(size_t size); |
|
607 |
void coalBirth(size_t size); |
|
608 |
void coalDeath(size_t size); |
|
609 |
void smallSplitBirth(size_t size); |
|
610 |
void smallSplitDeath(size_t size); |
|
611 |
void splitBirth(size_t size); |
|
612 |
void splitDeath(size_t size); |
|
613 |
void split(size_t from, size_t to1); |
|
614 |
||
615 |
double flsFrag() const; |
|
616 |
}; |
|
617 |
||
618 |
// A parallel-GC-thread-local allocation buffer for allocation into a |
|
619 |
// CompactibleFreeListSpace. |
|
620 |
class CFLS_LAB : public CHeapObj { |
|
621 |
// The space that this buffer allocates into. |
|
622 |
CompactibleFreeListSpace* _cfls; |
|
623 |
||
624 |
// Our local free lists. |
|
12507 | 625 |
FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; |
1 | 626 |
|
627 |
// Initialized from a command-line arg. |
|
628 |
||
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
629 |
// Allocation statistics in support of dynamic adjustment of |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
630 |
// #blocks to claim per get_from_global_pool() call below. |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
631 |
static AdaptiveWeightedAverage |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
632 |
_blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
633 |
static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
11396
917d8673b5ef
7121618: Change type of number of GC workers to unsigned int.
jmasa
parents:
11247
diff
changeset
|
634 |
static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; |
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
635 |
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
636 |
|
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
637 |
// Internal work method |
12507 | 638 |
void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl); |
1 | 639 |
|
640 |
public: |
|
641 |
CFLS_LAB(CompactibleFreeListSpace* cfls); |
|
642 |
||
643 |
// Allocate and return a block of the given size, or else return NULL. |
|
644 |
HeapWord* alloc(size_t word_sz); |
|
645 |
||
646 |
// Return any unused portions of the buffer to the global pool. |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
647 |
void retire(int tid); |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
648 |
|
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
649 |
// Dynamic OldPLABSize sizing |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
650 |
static void compute_desired_plab_size(); |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
651 |
// When the settings are modified from default static initialization |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
652 |
static void modify_initialization(size_t n, unsigned wt); |
1 | 653 |
}; |
654 |
||
655 |
size_t PromotionInfo::refillSize() const { |
|
656 |
const size_t CMSSpoolBlockSize = 256; |
|
657 |
const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) |
|
658 |
* CMSSpoolBlockSize); |
|
659 |
return CompactibleFreeListSpace::adjustObjectSize(sz); |
|
660 |
} |
|
7397 | 661 |
|
662 |
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |