author | katleman |
Thu, 16 Oct 2014 12:01:59 -0700 (2014-10-16) | |
changeset 27002 | 297d6a75d68a |
parent 26150 | 6186fcfeb5ae |
child 27624 | fe43edc5046d |
permissions | -rw-r--r-- |
1 | 1 |
/* |
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
23537
diff
changeset
|
2 |
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5434
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5434
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5434
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |
26 |
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |
|
27 |
||
14123
944e56f74fba
7045397: NPG: Add freelists to class loader arenas.
jmasa
parents:
13728
diff
changeset
|
28 |
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp" |
7397 | 29 |
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" |
12507 | 30 |
#include "memory/binaryTreeDictionary.hpp" |
7397 | 31 |
#include "memory/blockOffsetTable.inline.hpp" |
12507 | 32 |
#include "memory/freeList.hpp" |
7397 | 33 |
#include "memory/space.hpp" |
34 |
||
1 | 35 |
// Classes in support of keeping track of promotions into a non-Contiguous |
36 |
// space, in this case a CompactibleFreeListSpace. |
|
37 |
||
38 |
// Forward declarations |
|
39 |
class CompactibleFreeListSpace; |
|
40 |
class BlkClosure; |
|
41 |
class BlkClosureCareful; |
|
14123
944e56f74fba
7045397: NPG: Add freelists to class loader arenas.
jmasa
parents:
13728
diff
changeset
|
42 |
class FreeChunk; |
1 | 43 |
class UpwardsObjectClosure; |
44 |
class ObjectClosureCareful; |
|
45 |
class Klass; |
|
46 |
||
47 |
class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { |
|
48 |
public: |
|
49 |
LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), |
|
50 |
_allocation_size_limit(0) {} |
|
51 |
void set(HeapWord* ptr, size_t word_size, size_t refill_size, |
|
52 |
size_t allocation_size_limit) { |
|
53 |
_ptr = ptr; |
|
54 |
_word_size = word_size; |
|
55 |
_refillSize = refill_size; |
|
56 |
_allocation_size_limit = allocation_size_limit; |
|
57 |
} |
|
58 |
HeapWord* _ptr; |
|
59 |
size_t _word_size; |
|
60 |
size_t _refillSize; |
|
22551 | 61 |
size_t _allocation_size_limit; // Largest size that will be allocated |
6258
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
62 |
|
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
63 |
void print_on(outputStream* st) const; |
1 | 64 |
}; |
65 |
||
66 |
// Concrete subclass of CompactibleSpace that implements |
|
67 |
// a free list space, such as used in the concurrent mark sweep |
|
68 |
// generation. |
|
69 |
||
70 |
class CompactibleFreeListSpace: public CompactibleSpace { |
|
71 |
friend class VMStructs; |
|
72 |
friend class ConcurrentMarkSweepGeneration; |
|
73 |
friend class CMSCollector; |
|
74 |
// Local alloc buffer for promotion into this space. |
|
75 |
friend class CFLS_LAB; |
|
76 |
||
77 |
// "Size" of chunks of work (executed during parallel remark phases |
|
78 |
// of CMS collection); this probably belongs in CMSCollector, although |
|
79 |
// it's cached here because it's used in |
|
80 |
// initialize_sequential_subtasks_for_rescan() which modifies |
|
81 |
// par_seq_tasks which also lives in Space. XXX |
|
82 |
const size_t _rescan_task_size; |
|
83 |
const size_t _marking_task_size; |
|
84 |
||
85 |
// Yet another sequential tasks done structure. This supports |
|
86 |
// CMS GC, where we have threads dynamically |
|
87 |
// claiming sub-tasks from a larger parallel task. |
|
88 |
SequentialSubTasksDone _conc_par_seq_tasks; |
|
89 |
||
90 |
BlockOffsetArrayNonContigSpace _bt; |
|
91 |
||
92 |
CMSCollector* _collector; |
|
93 |
ConcurrentMarkSweepGeneration* _gen; |
|
94 |
||
95 |
// Data structures for free blocks (used during allocation/sweeping) |
|
96 |
||
97 |
// Allocation is done linearly from two different blocks depending on |
|
98 |
// whether the request is small or large, in an effort to reduce |
|
99 |
// fragmentation. We assume that any locking for allocation is done |
|
100 |
// by the containing generation. Thus, none of the methods in this |
|
101 |
// space are re-entrant. |
|
102 |
enum SomeConstants { |
|
103 |
SmallForLinearAlloc = 16, // size < this then use _sLAB |
|
104 |
SmallForDictionary = 257, // size < this then use _indexedFreeList |
|
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
5434
diff
changeset
|
105 |
IndexSetSize = SmallForDictionary // keep this odd-sized |
1 | 106 |
}; |
10992
b998c6b89fa2
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
10771
diff
changeset
|
107 |
static size_t IndexSetStart; |
b998c6b89fa2
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
10771
diff
changeset
|
108 |
static size_t IndexSetStride; |
1 | 109 |
|
110 |
private: |
|
111 |
enum FitStrategyOptions { |
|
112 |
FreeBlockStrategyNone = 0, |
|
113 |
FreeBlockBestFitFirst |
|
114 |
}; |
|
115 |
||
116 |
PromotionInfo _promoInfo; |
|
117 |
||
22551 | 118 |
// Helps to impose a global total order on freelistLock ranks; |
1 | 119 |
// assumes that CFLSpace's are allocated in global total order |
120 |
static int _lockRank; |
|
121 |
||
22551 | 122 |
// A lock protecting the free lists and free blocks; |
1 | 123 |
// mutable because of ubiquity of locking even for otherwise const methods |
124 |
mutable Mutex _freelistLock; |
|
22551 | 125 |
// Locking verifier convenience function |
1 | 126 |
void assert_locked() const PRODUCT_RETURN; |
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
127 |
void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
1 | 128 |
|
129 |
// Linear allocation blocks |
|
130 |
LinearAllocBlock _smallLinearAllocBlock; |
|
131 |
||
12507 | 132 |
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; |
22551 | 133 |
AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks |
1 | 134 |
|
22551 | 135 |
// Indexed array for small size blocks |
14123
944e56f74fba
7045397: NPG: Add freelists to class loader arenas.
jmasa
parents:
13728
diff
changeset
|
136 |
AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; |
22551 | 137 |
|
138 |
// Allocation strategy |
|
139 |
bool _fitStrategy; // Use best fit strategy |
|
1 | 140 |
bool _adaptive_freelists; // Use adaptive freelists |
141 |
||
142 |
// This is an address close to the largest free chunk in the heap. |
|
143 |
// It is currently assumed to be at the end of the heap. Free |
|
144 |
// chunks with addresses greater than nearLargestChunk are coalesced |
|
145 |
// in an effort to maintain a large chunk at the end of the heap. |
|
146 |
HeapWord* _nearLargestChunk; |
|
147 |
||
148 |
// Used to keep track of limit of sweep for the space |
|
149 |
HeapWord* _sweep_limit; |
|
150 |
||
151 |
// Support for compacting cms |
|
152 |
HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
|
153 |
HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); |
|
154 |
||
155 |
// Initialization helpers. |
|
156 |
void initializeIndexedFreeListArray(); |
|
157 |
||
158 |
// Extra stuff to manage promotion parallelism. |
|
159 |
||
22551 | 160 |
// A lock protecting the dictionary during par promotion allocation. |
1 | 161 |
mutable Mutex _parDictionaryAllocLock; |
162 |
Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } |
|
163 |
||
164 |
// Locks protecting the exact lists during par promotion allocation. |
|
165 |
Mutex* _indexedFreeListParLocks[IndexSetSize]; |
|
166 |
||
167 |
// Attempt to obtain up to "n" blocks of the size "word_sz" (which is |
|
168 |
// required to be smaller than "IndexSetSize".) If successful, |
|
169 |
// adds them to "fl", which is required to be an empty free list. |
|
170 |
// If the count of "fl" is negative, it's absolute value indicates a |
|
171 |
// number of free chunks that had been previously "borrowed" from global |
|
172 |
// list of size "word_sz", and must now be decremented. |
|
14123
944e56f74fba
7045397: NPG: Add freelists to class loader arenas.
jmasa
parents:
13728
diff
changeset
|
173 |
void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
1 | 174 |
|
26150
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
175 |
// Used by par_get_chunk_of_blocks() for the chunks from the |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
176 |
// indexed_free_lists. |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
177 |
bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
178 |
|
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
179 |
// Used by par_get_chunk_of_blocks_dictionary() to get a chunk |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
180 |
// evenly splittable into "n" "word_sz" chunks. Returns that |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
181 |
// evenly splittable chunk. May split a larger chunk to get the |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
182 |
// evenly splittable chunk. |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
183 |
FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
184 |
|
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
185 |
// Used by par_get_chunk_of_blocks() for the chunks from the |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
186 |
// dictionary. |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
187 |
void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
6186fcfeb5ae
8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert
jmasa
parents:
25485
diff
changeset
|
188 |
|
1 | 189 |
// Allocation helper functions |
190 |
// Allocate using a strategy that takes from the indexed free lists |
|
191 |
// first. This allocation strategy assumes a companion sweeping |
|
192 |
// strategy that attempts to keep the needed number of chunks in each |
|
193 |
// indexed free lists. |
|
194 |
HeapWord* allocate_adaptive_freelists(size_t size); |
|
195 |
// Allocate from the linear allocation buffers first. This allocation |
|
196 |
// strategy assumes maximal coalescing can maintain chunks large enough |
|
197 |
// to be used as linear allocation buffers. |
|
198 |
HeapWord* allocate_non_adaptive_freelists(size_t size); |
|
199 |
||
200 |
// Gets a chunk from the linear allocation block (LinAB). If there |
|
201 |
// is not enough space in the LinAB, refills it. |
|
202 |
HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); |
|
203 |
HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); |
|
204 |
// Get a chunk from the space remaining in the linear allocation block. Do |
|
205 |
// not attempt to refill if the space is not available, return NULL. Do the |
|
206 |
// repairs on the linear allocation block as appropriate. |
|
207 |
HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); |
|
208 |
inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); |
|
209 |
||
210 |
// Helper function for getChunkFromIndexedFreeList. |
|
211 |
// Replenish the indexed free list for this "size". Do not take from an |
|
212 |
// underpopulated size. |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
213 |
FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); |
1 | 214 |
|
215 |
// Get a chunk from the indexed free list. If the indexed free list |
|
216 |
// does not have a free chunk, try to replenish the indexed free list |
|
217 |
// then get the free chunk from the replenished indexed free list. |
|
218 |
inline FreeChunk* getChunkFromIndexedFreeList(size_t size); |
|
219 |
||
220 |
// The returned chunk may be larger than requested (or null). |
|
221 |
FreeChunk* getChunkFromDictionary(size_t size); |
|
222 |
// The returned chunk is the exact size requested (or null). |
|
223 |
FreeChunk* getChunkFromDictionaryExact(size_t size); |
|
224 |
||
225 |
// Find a chunk in the indexed free list that is the best |
|
226 |
// fit for size "numWords". |
|
227 |
FreeChunk* bestFitSmall(size_t numWords); |
|
228 |
// For free list "fl" of chunks of size > numWords, |
|
229 |
// remove a chunk, split off a chunk of size numWords |
|
230 |
// and return it. The split off remainder is returned to |
|
231 |
// the free lists. The old name for getFromListGreater |
|
232 |
// was lookInListGreater. |
|
14123
944e56f74fba
7045397: NPG: Add freelists to class loader arenas.
jmasa
parents:
13728
diff
changeset
|
233 |
FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords); |
1 | 234 |
// Get a chunk in the indexed free list or dictionary, |
235 |
// by considering a larger chunk and splitting it. |
|
236 |
FreeChunk* getChunkFromGreater(size_t numWords); |
|
237 |
// Verify that the given chunk is in the indexed free lists. |
|
238 |
bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; |
|
239 |
// Remove the specified chunk from the indexed free lists. |
|
240 |
void removeChunkFromIndexedFreeList(FreeChunk* fc); |
|
241 |
// Remove the specified chunk from the dictionary. |
|
242 |
void removeChunkFromDictionary(FreeChunk* fc); |
|
243 |
// Split a free chunk into a smaller free chunk of size "new_size". |
|
244 |
// Return the smaller free chunk and return the remainder to the |
|
245 |
// free lists. |
|
246 |
FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); |
|
247 |
// Add a chunk to the free lists. |
|
248 |
void addChunkToFreeLists(HeapWord* chunk, size_t size); |
|
249 |
// Add a chunk to the free lists, preferring to suffix it |
|
250 |
// to the last free chunk at end of space if possible, and |
|
251 |
// updating the block census stats as well as block offset table. |
|
252 |
// Take any locks as appropriate if we are multithreaded. |
|
253 |
void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); |
|
254 |
// Add a free chunk to the indexed free lists. |
|
255 |
void returnChunkToFreeList(FreeChunk* chunk); |
|
256 |
// Add a free chunk to the dictionary. |
|
257 |
void returnChunkToDictionary(FreeChunk* chunk); |
|
258 |
||
259 |
// Functions for maintaining the linear allocation buffers (LinAB). |
|
260 |
// Repairing a linear allocation block refers to operations |
|
261 |
// performed on the remainder of a LinAB after an allocation |
|
262 |
// has been made from it. |
|
263 |
void repairLinearAllocationBlocks(); |
|
264 |
void repairLinearAllocBlock(LinearAllocBlock* blk); |
|
265 |
void refillLinearAllocBlock(LinearAllocBlock* blk); |
|
266 |
void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); |
|
267 |
void refillLinearAllocBlocksIfNeeded(); |
|
268 |
||
269 |
void verify_objects_initialized() const; |
|
270 |
||
271 |
// Statistics reporting helper functions |
|
272 |
void reportFreeListStatistics() const; |
|
273 |
void reportIndexedFreeListStatistics() const; |
|
274 |
size_t maxChunkSizeInIndexedFreeLists() const; |
|
275 |
size_t numFreeBlocksInIndexedFreeLists() const; |
|
276 |
// Accessor |
|
277 |
HeapWord* unallocated_block() const { |
|
6258
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
278 |
if (BlockOffsetArrayUseUnallocatedBlock) { |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
279 |
HeapWord* ub = _bt.unallocated_block(); |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
280 |
assert(ub >= bottom() && |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
281 |
ub <= end(), "space invariant"); |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
282 |
return ub; |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
283 |
} else { |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
284 |
return end(); |
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
285 |
} |
1 | 286 |
} |
287 |
void freed(HeapWord* start, size_t size) { |
|
288 |
_bt.freed(start, size); |
|
289 |
} |
|
290 |
||
291 |
protected: |
|
22551 | 292 |
// Reset the indexed free list to its initial empty condition. |
1 | 293 |
void resetIndexedFreeListArray(); |
22551 | 294 |
// Reset to an initial state with a single free block described |
1 | 295 |
// by the MemRegion parameter. |
296 |
void reset(MemRegion mr); |
|
297 |
// Return the total number of words in the indexed free lists. |
|
298 |
size_t totalSizeInIndexedFreeLists() const; |
|
299 |
||
300 |
public: |
|
22551 | 301 |
// Constructor |
1 | 302 |
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, |
303 |
bool use_adaptive_freelists, |
|
12507 | 304 |
FreeBlockDictionary<FreeChunk>::DictionaryChoice); |
22551 | 305 |
// Accessors |
1 | 306 |
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } |
12507 | 307 |
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } |
1 | 308 |
HeapWord* nearLargestChunk() const { return _nearLargestChunk; } |
309 |
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } |
|
310 |
||
22551 | 311 |
// Set CMS global values. |
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
5434
diff
changeset
|
312 |
static void set_cms_values(); |
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
5434
diff
changeset
|
313 |
|
1 | 314 |
// Return the free chunk at the end of the space. If no such |
315 |
// chunk exists, return NULL. |
|
316 |
FreeChunk* find_chunk_at_end(); |
|
317 |
||
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
318 |
bool adaptive_freelists() const { return _adaptive_freelists; } |
1 | 319 |
|
320 |
void set_collector(CMSCollector* collector) { _collector = collector; } |
|
321 |
||
22551 | 322 |
// Support for parallelization of rescan and marking. |
1 | 323 |
const size_t rescan_task_size() const { return _rescan_task_size; } |
324 |
const size_t marking_task_size() const { return _marking_task_size; } |
|
325 |
SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } |
|
326 |
void initialize_sequential_subtasks_for_rescan(int n_threads); |
|
327 |
void initialize_sequential_subtasks_for_marking(int n_threads, |
|
328 |
HeapWord* low = NULL); |
|
329 |
||
330 |
// Space enquiries |
|
331 |
size_t used() const; |
|
332 |
size_t free() const; |
|
333 |
size_t max_alloc_in_words() const; |
|
334 |
// XXX: should have a less conservative used_region() than that of |
|
335 |
// Space; we could consider keeping track of highest allocated |
|
336 |
// address and correcting that at each sweep, as the sweeper |
|
337 |
// goes through the entire allocated part of the generation. We |
|
338 |
// could also use that information to keep the sweeper from |
|
339 |
// sweeping more than is necessary. The allocator and sweeper will |
|
340 |
// of course need to synchronize on this, since the sweeper will |
|
341 |
// try to bump down the address and the allocator will try to bump it up. |
|
342 |
// For now, however, we'll just use the default used_region() |
|
343 |
// which overestimates the region by returning the entire |
|
344 |
// committed region (this is safe, but inefficient). |
|
345 |
||
346 |
// Returns a subregion of the space containing all the objects in |
|
347 |
// the space. |
|
348 |
MemRegion used_region() const { |
|
349 |
return MemRegion(bottom(), |
|
350 |
BlockOffsetArrayUseUnallocatedBlock ? |
|
351 |
unallocated_block() : end()); |
|
352 |
} |
|
353 |
||
354 |
virtual bool is_free_block(const HeapWord* p) const; |
|
355 |
||
356 |
// Resizing support |
|
357 |
void set_end(HeapWord* value); // override |
|
358 |
||
22551 | 359 |
// Mutual exclusion support |
1 | 360 |
Mutex* freelistLock() const { return &_freelistLock; } |
361 |
||
362 |
// Iteration support |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
363 |
void oop_iterate(ExtendedOopClosure* cl); |
1 | 364 |
|
365 |
void object_iterate(ObjectClosure* blk); |
|
1893
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
366 |
// Apply the closure to each object in the space whose references |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
367 |
// point to objects in the heap. The usage of CompactibleFreeListSpace |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
368 |
// by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
369 |
// objects in the space with references to objects that are no longer |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
370 |
// valid. For example, an object may reference another object |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
371 |
// that has already been sweep up (collected). This method uses |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
372 |
// obj_is_alive() to determine whether it is safe to iterate of |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
373 |
// an object. |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1388
diff
changeset
|
374 |
void safe_object_iterate(ObjectClosure* blk); |
23535
6306204d22a9
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
mgerdin
parents:
23508
diff
changeset
|
375 |
|
6306204d22a9
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
mgerdin
parents:
23508
diff
changeset
|
376 |
// Iterate over all objects that intersect with mr, calling "cl->do_object" |
6306204d22a9
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
mgerdin
parents:
23508
diff
changeset
|
377 |
// on each. There is an exception to this: if this closure has already |
6306204d22a9
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
mgerdin
parents:
23508
diff
changeset
|
378 |
// been invoked on an object, it may skip such objects in some cases. This is |
6306204d22a9
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
mgerdin
parents:
23508
diff
changeset
|
379 |
// Most likely to happen in an "upwards" (ascending address) iteration of |
6306204d22a9
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
mgerdin
parents:
23508
diff
changeset
|
380 |
// MemRegions. |
1 | 381 |
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
382 |
||
383 |
// Requires that "mr" be entirely within the space. |
|
384 |
// Apply "cl->do_object" to all objects that intersect with "mr". |
|
385 |
// If the iteration encounters an unparseable portion of the region, |
|
386 |
// terminate the iteration and return the address of the start of the |
|
387 |
// subregion that isn't done. Return of "NULL" indicates that the |
|
22551 | 388 |
// iteration completed. |
23536
5ad11152daa9
8038412: Move object_iterate_careful down from Space to ContigousSpace and CFLSpace
mgerdin
parents:
23535
diff
changeset
|
389 |
HeapWord* object_iterate_careful_m(MemRegion mr, |
5ad11152daa9
8038412: Move object_iterate_careful down from Space to ContigousSpace and CFLSpace
mgerdin
parents:
23535
diff
changeset
|
390 |
ObjectClosureCareful* cl); |
1 | 391 |
|
392 |
// Override: provides a DCTO_CL specific to this kind of space. |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
393 |
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, |
1 | 394 |
CardTableModRefBS::PrecisionStyle precision, |
395 |
HeapWord* boundary); |
|
396 |
||
397 |
void blk_iterate(BlkClosure* cl); |
|
398 |
void blk_iterate_careful(BlkClosureCareful* cl); |
|
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
399 |
HeapWord* block_start_const(const void* p) const; |
1 | 400 |
HeapWord* block_start_careful(const void* p) const; |
401 |
size_t block_size(const HeapWord* p) const; |
|
402 |
size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; |
|
403 |
bool block_is_obj(const HeapWord* p) const; |
|
404 |
bool obj_is_alive(const HeapWord* p) const; |
|
405 |
size_t block_size_nopar(const HeapWord* p) const; |
|
406 |
bool block_is_obj_nopar(const HeapWord* p) const; |
|
407 |
||
22551 | 408 |
// Iteration support for promotion |
1 | 409 |
void save_marks(); |
410 |
bool no_allocs_since_save_marks(); |
|
411 |
||
22551 | 412 |
// Iteration support for sweeping |
1 | 413 |
void save_sweep_limit() { |
414 |
_sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? |
|
415 |
unallocated_block() : end(); |
|
9969
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
416 |
if (CMSTraceSweeper) { |
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
417 |
gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT |
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
418 |
" for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", |
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
23537
diff
changeset
|
419 |
p2i(_sweep_limit), p2i(bottom()), p2i(end())); |
9969
57932d7294a9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
7397
diff
changeset
|
420 |
} |
1 | 421 |
} |
422 |
NOT_PRODUCT( |
|
423 |
void clear_sweep_limit() { _sweep_limit = NULL; } |
|
424 |
) |
|
425 |
HeapWord* sweep_limit() { return _sweep_limit; } |
|
426 |
||
427 |
// Apply "blk->do_oop" to the addresses of all reference fields in objects |
|
428 |
// promoted into this generation since the most recent save_marks() call. |
|
429 |
// Fields in objects allocated by applications of the closure |
|
430 |
// *are* included in the iteration. Thus, when the iteration completes |
|
431 |
// there should be no further such objects remaining. |
|
432 |
#define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
433 |
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); |
|
434 |
ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) |
|
435 |
#undef CFLS_OOP_SINCE_SAVE_MARKS_DECL |
|
436 |
||
437 |
// Allocation support |
|
438 |
HeapWord* allocate(size_t size); |
|
439 |
HeapWord* par_allocate(size_t size); |
|
440 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
185
diff
changeset
|
441 |
oop promote(oop obj, size_t obj_size); |
1 | 442 |
void gc_prologue(); |
443 |
void gc_epilogue(); |
|
444 |
||
445 |
// This call is used by a containing CMS generation / collector |
|
446 |
// to inform the CFLS space that a sweep has been completed |
|
447 |
// and that the space can do any related house-keeping functions. |
|
448 |
void sweep_completed(); |
|
449 |
||
450 |
// For an object in this space, the mark-word's two |
|
451 |
// LSB's having the value [11] indicates that it has been |
|
452 |
// promoted since the most recent call to save_marks() on |
|
453 |
// this generation and has not subsequently been iterated |
|
454 |
// over (using oop_since_save_marks_iterate() above). |
|
5434 | 455 |
// This property holds only for single-threaded collections, |
456 |
// and is typically used for Cheney scans; for MT scavenges, |
|
457 |
// the property holds for all objects promoted during that |
|
458 |
// scavenge for the duration of the scavenge and is used |
|
459 |
// by card-scanning to avoid scanning objects (being) promoted |
|
460 |
// during that scavenge. |
|
1 | 461 |
bool obj_allocated_since_save_marks(const oop obj) const { |
462 |
assert(is_in_reserved(obj), "Wrong space?"); |
|
463 |
return ((PromotedObject*)obj)->hasPromotedMark(); |
|
464 |
} |
|
465 |
||
466 |
// A worst-case estimate of the space required (in HeapWords) to expand the |
|
467 |
// heap when promoting an obj of size obj_size. |
|
468 |
size_t expansionSpaceRequired(size_t obj_size) const; |
|
469 |
||
470 |
FreeChunk* allocateScratch(size_t size); |
|
471 |
||
22551 | 472 |
// Returns true if either the small or large linear allocation buffer is empty. |
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
473 |
bool linearAllocationWouldFail() const; |
1 | 474 |
|
475 |
// Adjust the chunk for the minimum size. This version is called in |
|
476 |
// most cases in CompactibleFreeListSpace methods. |
|
477 |
inline static size_t adjustObjectSize(size_t size) { |
|
478 |
return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); |
|
479 |
} |
|
480 |
// This is a virtual version of adjustObjectSize() that is called |
|
481 |
// only occasionally when the compaction space changes and the type |
|
482 |
// of the new compaction space is is only known to be CompactibleSpace. |
|
483 |
size_t adjust_object_size_v(size_t size) const { |
|
484 |
return adjustObjectSize(size); |
|
485 |
} |
|
486 |
// Minimum size of a free block. |
|
487 |
virtual size_t minimum_free_block_size() const { return MinChunkSize; } |
|
488 |
void removeFreeChunkFromFreeLists(FreeChunk* chunk); |
|
489 |
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, |
|
490 |
bool coalesced); |
|
491 |
||
22551 | 492 |
// Support for decisions regarding concurrent collection policy. |
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
493 |
bool should_concurrent_collect() const; |
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
494 |
|
22551 | 495 |
// Support for compaction. |
1 | 496 |
void prepare_for_compaction(CompactPoint* cp); |
497 |
void adjust_pointers(); |
|
498 |
void compact(); |
|
22551 | 499 |
// Reset the space to reflect the fact that a compaction of the |
1 | 500 |
// space has been done. |
501 |
virtual void reset_after_compaction(); |
|
502 |
||
22551 | 503 |
// Debugging support. |
1 | 504 |
void print() const; |
6258
68f252c6e825
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
5702
diff
changeset
|
505 |
void print_on(outputStream* st) const; |
1 | 506 |
void prepare_for_verify(); |
12379 | 507 |
void verify() const; |
1 | 508 |
void verifyFreeLists() const PRODUCT_RETURN; |
509 |
void verifyIndexedFreeLists() const; |
|
510 |
void verifyIndexedFreeList(size_t size) const; |
|
10771
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
511 |
// Verify that the given chunk is in the free lists: |
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
512 |
// i.e. either the binary tree dictionary, the indexed free lists |
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
513 |
// or the linear allocation block. |
12509
6228e2085074
7164144: Fix variable naming style in freeBlockDictionary.* and binaryTreeDictionary*
jmasa
parents:
12507
diff
changeset
|
514 |
bool verify_chunk_in_free_list(FreeChunk* fc) const; |
22551 | 515 |
// Verify that the given chunk is the linear allocation block. |
10771
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
516 |
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; |
1 | 517 |
// Do some basic checks on the the free lists. |
10771
68e4b84cfa28
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
9969
diff
changeset
|
518 |
void check_free_list_consistency() const PRODUCT_RETURN; |
1 | 519 |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
520 |
// Printing support |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
521 |
void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
522 |
void print_indexed_free_lists(outputStream* st) const; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
523 |
void print_dictionary_free_lists(outputStream* st) const; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
524 |
void print_promo_info_blocks(outputStream* st) const; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
525 |
|
1 | 526 |
NOT_PRODUCT ( |
527 |
void initializeIndexedFreeListArrayReturnedBytes(); |
|
528 |
size_t sumIndexedFreeListArrayReturnedBytes(); |
|
529 |
// Return the total number of chunks in the indexed free lists. |
|
530 |
size_t totalCountInIndexedFreeLists() const; |
|
22551 | 531 |
// Return the total number of chunks in the space. |
1 | 532 |
size_t totalCount(); |
533 |
) |
|
534 |
||
535 |
// The census consists of counts of the quantities such as |
|
536 |
// the current count of the free chunks, number of chunks |
|
537 |
// created as a result of the split of a larger chunk or |
|
538 |
// coalescing of smaller chucks, etc. The counts in the |
|
539 |
// census is used to make decisions on splitting and |
|
540 |
// coalescing of chunks during the sweep of garbage. |
|
541 |
||
542 |
// Print the statistics for the free lists. |
|
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
543 |
void printFLCensus(size_t sweep_count) const; |
1 | 544 |
|
545 |
// Statistics functions |
|
546 |
// Initialize census for lists before the sweep. |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
547 |
void beginSweepFLCensus(float inter_sweep_current, |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
548 |
float inter_sweep_estimate, |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
549 |
float intra_sweep_estimate); |
1 | 550 |
// Set the surplus for each of the free lists. |
551 |
void setFLSurplus(); |
|
552 |
// Set the hint for each of the free lists. |
|
553 |
void setFLHints(); |
|
554 |
// Clear the census for each of the free lists. |
|
555 |
void clearFLCensus(); |
|
556 |
// Perform functions for the census after the end of the sweep. |
|
185
cda2a1eb4be5
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
1
diff
changeset
|
557 |
void endSweepFLCensus(size_t sweep_count); |
1 | 558 |
// Return true if the count of free chunks is greater |
559 |
// than the desired number of free chunks. |
|
560 |
bool coalOverPopulated(size_t size); |
|
561 |
||
562 |
// Record (for each size): |
|
563 |
// |
|
564 |
// split-births = #chunks added due to splits in (prev-sweep-end, |
|
565 |
// this-sweep-start) |
|
566 |
// split-deaths = #chunks removed for splits in (prev-sweep-end, |
|
567 |
// this-sweep-start) |
|
568 |
// num-curr = #chunks at start of this sweep |
|
569 |
// num-prev = #chunks at end of previous sweep |
|
570 |
// |
|
571 |
// The above are quantities that are measured. Now define: |
|
572 |
// |
|
573 |
// num-desired := num-prev + split-births - split-deaths - num-curr |
|
574 |
// |
|
575 |
// Roughly, num-prev + split-births is the supply, |
|
576 |
// split-deaths is demand due to other sizes |
|
577 |
// and num-curr is what we have left. |
|
578 |
// |
|
579 |
// Thus, num-desired is roughly speaking the "legitimate demand" |
|
580 |
// for blocks of this size and what we are striving to reach at the |
|
581 |
// end of the current sweep. |
|
582 |
// |
|
583 |
// For a given list, let num-len be its current population. |
|
584 |
// Define, for a free list of a given size: |
|
585 |
// |
|
586 |
// coal-overpopulated := num-len >= num-desired * coal-surplus |
|
587 |
// (coal-surplus is set to 1.05, i.e. we allow a little slop when |
|
588 |
// coalescing -- we do not coalesce unless we think that the current |
|
589 |
// supply has exceeded the estimated demand by more than 5%). |
|
590 |
// |
|
591 |
// For the set of sizes in the binary tree, which is neither dense nor |
|
592 |
// closed, it may be the case that for a particular size we have never |
|
593 |
// had, or do not now have, or did not have at the previous sweep, |
|
594 |
// chunks of that size. We need to extend the definition of |
|
595 |
// coal-overpopulated to such sizes as well: |
|
596 |
// |
|
597 |
// For a chunk in/not in the binary tree, extend coal-overpopulated |
|
598 |
// defined above to include all sizes as follows: |
|
599 |
// |
|
600 |
// . a size that is non-existent is coal-overpopulated |
|
601 |
// . a size that has a num-desired <= 0 as defined above is |
|
602 |
// coal-overpopulated. |
|
603 |
// |
|
604 |
// Also define, for a chunk heap-offset C and mountain heap-offset M: |
|
605 |
// |
|
606 |
// close-to-mountain := C >= 0.99 * M |
|
607 |
// |
|
608 |
// Now, the coalescing strategy is: |
|
609 |
// |
|
610 |
// Coalesce left-hand chunk with right-hand chunk if and |
|
611 |
// only if: |
|
612 |
// |
|
613 |
// EITHER |
|
614 |
// . left-hand chunk is of a size that is coal-overpopulated |
|
615 |
// OR |
|
616 |
// . right-hand chunk is close-to-mountain |
|
617 |
void smallCoalBirth(size_t size); |
|
618 |
void smallCoalDeath(size_t size); |
|
619 |
void coalBirth(size_t size); |
|
620 |
void coalDeath(size_t size); |
|
621 |
void smallSplitBirth(size_t size); |
|
622 |
void smallSplitDeath(size_t size); |
|
12509
6228e2085074
7164144: Fix variable naming style in freeBlockDictionary.* and binaryTreeDictionary*
jmasa
parents:
12507
diff
changeset
|
623 |
void split_birth(size_t size); |
1 | 624 |
void splitDeath(size_t size); |
625 |
void split(size_t from, size_t to1); |
|
626 |
||
627 |
double flsFrag() const; |
|
628 |
}; |
|
629 |
||
630 |
// A parallel-GC-thread-local allocation buffer for allocation into a |
|
631 |
// CompactibleFreeListSpace. |
|
13195 | 632 |
class CFLS_LAB : public CHeapObj<mtGC> { |
1 | 633 |
// The space that this buffer allocates into. |
634 |
CompactibleFreeListSpace* _cfls; |
|
635 |
||
636 |
// Our local free lists. |
|
14123
944e56f74fba
7045397: NPG: Add freelists to class loader arenas.
jmasa
parents:
13728
diff
changeset
|
637 |
AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; |
1 | 638 |
|
639 |
// Initialized from a command-line arg. |
|
640 |
||
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
641 |
// Allocation statistics in support of dynamic adjustment of |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
642 |
// #blocks to claim per get_from_global_pool() call below. |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
643 |
static AdaptiveWeightedAverage |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
644 |
_blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
645 |
static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
11396
917d8673b5ef
7121618: Change type of number of GC workers to unsigned int.
jmasa
parents:
11247
diff
changeset
|
646 |
static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; |
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
647 |
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
648 |
|
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
649 |
// Internal work method |
14123
944e56f74fba
7045397: NPG: Add freelists to class loader arenas.
jmasa
parents:
13728
diff
changeset
|
650 |
void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl); |
1 | 651 |
|
652 |
public: |
|
653 |
CFLS_LAB(CompactibleFreeListSpace* cfls); |
|
654 |
||
655 |
// Allocate and return a block of the given size, or else return NULL. |
|
656 |
HeapWord* alloc(size_t word_sz); |
|
657 |
||
658 |
// Return any unused portions of the buffer to the global pool. |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
659 |
void retire(int tid); |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
660 |
|
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
661 |
// Dynamic OldPLABSize sizing |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
662 |
static void compute_desired_plab_size(); |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
663 |
// When the settings are modified from default static initialization |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
664 |
static void modify_initialization(size_t n, unsigned wt); |
1 | 665 |
}; |
666 |
||
667 |
size_t PromotionInfo::refillSize() const { |
|
668 |
const size_t CMSSpoolBlockSize = 256; |
|
669 |
const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) |
|
670 |
* CMSSpoolBlockSize); |
|
671 |
return CompactibleFreeListSpace::adjustObjectSize(sz); |
|
672 |
} |
|
7397 | 673 |
|
674 |
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP |