1 /* |
|
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP |
|
26 #define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP |
|
27 |
|
28 #include "gc/cms/adaptiveFreeList.hpp" |
|
29 #include "gc/cms/promotionInfo.hpp" |
|
30 #include "gc/shared/blockOffsetTable.hpp" |
|
31 #include "gc/shared/cardTable.hpp" |
|
32 #include "gc/shared/space.hpp" |
|
33 #include "logging/log.hpp" |
|
34 #include "memory/binaryTreeDictionary.hpp" |
|
35 #include "memory/freeList.hpp" |
|
36 |
|
37 // Classes in support of keeping track of promotions into a non-Contiguous |
|
38 // space, in this case a CompactibleFreeListSpace. |
|
39 |
|
40 // Forward declarations |
|
41 class CMSCollector; |
|
42 class CompactibleFreeListSpace; |
|
43 class ConcurrentMarkSweepGeneration; |
|
44 class BlkClosure; |
|
45 class BlkClosureCareful; |
|
46 class FreeChunk; |
|
47 class UpwardsObjectClosure; |
|
48 class ObjectClosureCareful; |
|
49 class Klass; |
|
50 |
|
51 class AFLBinaryTreeDictionary : public BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > { |
|
52 public: |
|
53 AFLBinaryTreeDictionary(MemRegion mr) |
|
54 : BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >(mr) {} |
|
55 |
|
56 // Find the list with size "size" in the binary tree and update |
|
57 // the statistics in the list according to "split" (chunk was |
|
58 // split or coalesce) and "birth" (chunk was added or removed). |
|
59 void dict_census_update(size_t size, bool split, bool birth); |
|
60 // Return true if the dictionary is overpopulated (more chunks of |
|
61 // this size than desired) for size "size". |
|
62 bool coal_dict_over_populated(size_t size); |
|
63 // Methods called at the beginning of a sweep to prepare the |
|
64 // statistics for the sweep. |
|
65 void begin_sweep_dict_census(double coalSurplusPercent, |
|
66 float inter_sweep_current, |
|
67 float inter_sweep_estimate, |
|
68 float intra_sweep_estimate); |
|
69 // Methods called after the end of a sweep to modify the |
|
70 // statistics for the sweep. |
|
71 void end_sweep_dict_census(double splitSurplusPercent); |
|
72 // Accessors for statistics |
|
73 void set_tree_surplus(double splitSurplusPercent); |
|
74 void set_tree_hints(void); |
|
75 // Reset statistics for all the lists in the tree. |
|
76 void clear_tree_census(void); |
|
77 // Print the statistics for all the lists in the tree. Also may |
|
78 // print out summaries. |
|
79 void print_dict_census(outputStream* st) const; |
|
80 }; |
|
81 |
|
82 class LinearAllocBlock { |
|
83 public: |
|
84 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), |
|
85 _allocation_size_limit(0) {} |
|
86 void set(HeapWord* ptr, size_t word_size, size_t refill_size, |
|
87 size_t allocation_size_limit) { |
|
88 _ptr = ptr; |
|
89 _word_size = word_size; |
|
90 _refillSize = refill_size; |
|
91 _allocation_size_limit = allocation_size_limit; |
|
92 } |
|
93 HeapWord* _ptr; |
|
94 size_t _word_size; |
|
95 size_t _refillSize; |
|
96 size_t _allocation_size_limit; // Largest size that will be allocated |
|
97 |
|
98 void print_on(outputStream* st) const; |
|
99 }; |
|
100 |
|
101 // Concrete subclass of CompactibleSpace that implements |
|
102 // a free list space, such as used in the concurrent mark sweep |
|
103 // generation. |
|
104 |
|
105 class CompactibleFreeListSpace: public CompactibleSpace { |
|
106 friend class VMStructs; |
|
107 friend class ConcurrentMarkSweepGeneration; |
|
108 friend class CMSCollector; |
|
109 // Local alloc buffer for promotion into this space. |
|
110 friend class CompactibleFreeListSpaceLAB; |
|
111 // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class |
|
112 template <typename SpaceType> |
|
113 friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space); |
|
114 template <typename SpaceType> |
|
115 friend void CompactibleSpace::scan_and_compact(SpaceType* space); |
|
116 template <typename SpaceType> |
|
117 friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space); |
|
118 template <typename SpaceType> |
|
119 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); |
|
120 |
|
121 // "Size" of chunks of work (executed during parallel remark phases |
|
122 // of CMS collection); this probably belongs in CMSCollector, although |
|
123 // it's cached here because it's used in |
|
124 // initialize_sequential_subtasks_for_rescan() which modifies |
|
125 // par_seq_tasks which also lives in Space. XXX |
|
126 const size_t _rescan_task_size; |
|
127 const size_t _marking_task_size; |
|
128 |
|
129 // Yet another sequential tasks done structure. This supports |
|
130 // CMS GC, where we have threads dynamically |
|
131 // claiming sub-tasks from a larger parallel task. |
|
132 SequentialSubTasksDone _conc_par_seq_tasks; |
|
133 |
|
134 BlockOffsetArrayNonContigSpace _bt; |
|
135 |
|
136 CMSCollector* _collector; |
|
137 ConcurrentMarkSweepGeneration* _old_gen; |
|
138 |
|
139 // Data structures for free blocks (used during allocation/sweeping) |
|
140 |
|
141 // Allocation is done linearly from two different blocks depending on |
|
142 // whether the request is small or large, in an effort to reduce |
|
143 // fragmentation. We assume that any locking for allocation is done |
|
144 // by the containing generation. Thus, none of the methods in this |
|
145 // space are re-entrant. |
|
146 enum SomeConstants { |
|
147 SmallForLinearAlloc = 16, // size < this then use _sLAB |
|
148 SmallForDictionary = 257, // size < this then use _indexedFreeList |
|
149 IndexSetSize = SmallForDictionary // keep this odd-sized |
|
150 }; |
|
151 static size_t IndexSetStart; |
|
152 static size_t IndexSetStride; |
|
153 static size_t _min_chunk_size_in_bytes; |
|
154 |
|
155 private: |
|
156 enum FitStrategyOptions { |
|
157 FreeBlockStrategyNone = 0, |
|
158 FreeBlockBestFitFirst |
|
159 }; |
|
160 |
|
161 PromotionInfo _promoInfo; |
|
162 |
|
163 // Helps to impose a global total order on freelistLock ranks; |
|
164 // assumes that CFLSpace's are allocated in global total order |
|
165 static int _lockRank; |
|
166 |
|
167 // A lock protecting the free lists and free blocks; |
|
168 // mutable because of ubiquity of locking even for otherwise const methods |
|
169 mutable Mutex _freelistLock; |
|
170 |
|
171 // Locking verifier convenience function |
|
172 void assert_locked() const PRODUCT_RETURN; |
|
173 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
|
174 |
|
175 // Linear allocation blocks |
|
176 LinearAllocBlock _smallLinearAllocBlock; |
|
177 |
|
178 AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks |
|
179 |
|
180 // Indexed array for small size blocks |
|
181 AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; |
|
182 |
|
183 // Allocation strategy |
|
184 bool _fitStrategy; // Use best fit strategy |
|
185 |
|
186 // This is an address close to the largest free chunk in the heap. |
|
187 // It is currently assumed to be at the end of the heap. Free |
|
188 // chunks with addresses greater than nearLargestChunk are coalesced |
|
189 // in an effort to maintain a large chunk at the end of the heap. |
|
190 HeapWord* _nearLargestChunk; |
|
191 |
|
192 // Used to keep track of limit of sweep for the space |
|
193 HeapWord* _sweep_limit; |
|
194 |
|
195 // Stable value of used(). |
|
196 size_t _used_stable; |
|
197 |
|
198 // Used to make the young collector update the mod union table |
|
199 MemRegionClosure* _preconsumptionDirtyCardClosure; |
|
200 |
|
201 // Support for compacting cms |
|
202 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
|
203 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); |
|
204 |
|
205 // Initialization helpers. |
|
206 void initializeIndexedFreeListArray(); |
|
207 |
|
208 // Extra stuff to manage promotion parallelism. |
|
209 |
|
210 // A lock protecting the dictionary during par promotion allocation. |
|
211 mutable Mutex _parDictionaryAllocLock; |
|
212 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } |
|
213 |
|
214 // Locks protecting the exact lists during par promotion allocation. |
|
215 Mutex* _indexedFreeListParLocks[IndexSetSize]; |
|
216 |
|
217 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is |
|
218 // required to be smaller than "IndexSetSize".) If successful, |
|
219 // adds them to "fl", which is required to be an empty free list. |
|
220 // If the count of "fl" is negative, it's absolute value indicates a |
|
221 // number of free chunks that had been previously "borrowed" from global |
|
222 // list of size "word_sz", and must now be decremented. |
|
223 void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
|
224 |
|
225 // Used by par_get_chunk_of_blocks() for the chunks from the |
|
226 // indexed_free_lists. |
|
227 bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
|
228 |
|
229 // Used by par_get_chunk_of_blocks_dictionary() to get a chunk |
|
230 // evenly splittable into "n" "word_sz" chunks. Returns that |
|
231 // evenly splittable chunk. May split a larger chunk to get the |
|
232 // evenly splittable chunk. |
|
233 FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); |
|
234 |
|
235 // Used by par_get_chunk_of_blocks() for the chunks from the |
|
236 // dictionary. |
|
237 void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
|
238 |
|
239 // Allocation helper functions |
|
240 // Allocate using a strategy that takes from the indexed free lists |
|
241 // first. This allocation strategy assumes a companion sweeping |
|
242 // strategy that attempts to keep the needed number of chunks in each |
|
243 // indexed free lists. |
|
244 HeapWord* allocate_adaptive_freelists(size_t size); |
|
245 |
|
246 // Gets a chunk from the linear allocation block (LinAB). If there |
|
247 // is not enough space in the LinAB, refills it. |
|
248 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); |
|
249 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); |
|
250 // Get a chunk from the space remaining in the linear allocation block. Do |
|
251 // not attempt to refill if the space is not available, return NULL. Do the |
|
252 // repairs on the linear allocation block as appropriate. |
|
253 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); |
|
254 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); |
|
255 |
|
256 // Helper function for getChunkFromIndexedFreeList. |
|
257 // Replenish the indexed free list for this "size". Do not take from an |
|
258 // underpopulated size. |
|
259 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); |
|
260 |
|
261 // Get a chunk from the indexed free list. If the indexed free list |
|
262 // does not have a free chunk, try to replenish the indexed free list |
|
263 // then get the free chunk from the replenished indexed free list. |
|
264 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); |
|
265 |
|
266 // The returned chunk may be larger than requested (or null). |
|
267 FreeChunk* getChunkFromDictionary(size_t size); |
|
268 // The returned chunk is the exact size requested (or null). |
|
269 FreeChunk* getChunkFromDictionaryExact(size_t size); |
|
270 |
|
271 // Find a chunk in the indexed free list that is the best |
|
272 // fit for size "numWords". |
|
273 FreeChunk* bestFitSmall(size_t numWords); |
|
274 // For free list "fl" of chunks of size > numWords, |
|
275 // remove a chunk, split off a chunk of size numWords |
|
276 // and return it. The split off remainder is returned to |
|
277 // the free lists. The old name for getFromListGreater |
|
278 // was lookInListGreater. |
|
279 FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords); |
|
280 // Get a chunk in the indexed free list or dictionary, |
|
281 // by considering a larger chunk and splitting it. |
|
282 FreeChunk* getChunkFromGreater(size_t numWords); |
|
283 // Verify that the given chunk is in the indexed free lists. |
|
284 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; |
|
285 // Remove the specified chunk from the indexed free lists. |
|
286 void removeChunkFromIndexedFreeList(FreeChunk* fc); |
|
287 // Remove the specified chunk from the dictionary. |
|
288 void removeChunkFromDictionary(FreeChunk* fc); |
|
289 // Split a free chunk into a smaller free chunk of size "new_size". |
|
290 // Return the smaller free chunk and return the remainder to the |
|
291 // free lists. |
|
292 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); |
|
293 // Add a chunk to the free lists. |
|
294 void addChunkToFreeLists(HeapWord* chunk, size_t size); |
|
295 // Add a chunk to the free lists, preferring to suffix it |
|
296 // to the last free chunk at end of space if possible, and |
|
297 // updating the block census stats as well as block offset table. |
|
298 // Take any locks as appropriate if we are multithreaded. |
|
299 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); |
|
300 // Add a free chunk to the indexed free lists. |
|
301 void returnChunkToFreeList(FreeChunk* chunk); |
|
302 // Add a free chunk to the dictionary. |
|
303 void returnChunkToDictionary(FreeChunk* chunk); |
|
304 |
|
305 // Functions for maintaining the linear allocation buffers (LinAB). |
|
306 // Repairing a linear allocation block refers to operations |
|
307 // performed on the remainder of a LinAB after an allocation |
|
308 // has been made from it. |
|
309 void repairLinearAllocationBlocks(); |
|
310 void repairLinearAllocBlock(LinearAllocBlock* blk); |
|
311 void refillLinearAllocBlock(LinearAllocBlock* blk); |
|
312 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); |
|
313 void refillLinearAllocBlocksIfNeeded(); |
|
314 |
|
315 void verify_objects_initialized() const; |
|
316 |
|
317 // Statistics reporting helper functions |
|
318 void reportFreeListStatistics(const char* title) const; |
|
319 void reportIndexedFreeListStatistics(outputStream* st) const; |
|
320 size_t maxChunkSizeInIndexedFreeLists() const; |
|
321 size_t numFreeBlocksInIndexedFreeLists() const; |
|
322 // Accessor |
|
323 HeapWord* unallocated_block() const { |
|
324 if (BlockOffsetArrayUseUnallocatedBlock) { |
|
325 HeapWord* ub = _bt.unallocated_block(); |
|
326 assert(ub >= bottom() && |
|
327 ub <= end(), "space invariant"); |
|
328 return ub; |
|
329 } else { |
|
330 return end(); |
|
331 } |
|
332 } |
|
333 void freed(HeapWord* start, size_t size) { |
|
334 _bt.freed(start, size); |
|
335 } |
|
336 |
|
337 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. |
|
338 // See comments for CompactibleSpace for more information. |
|
339 inline HeapWord* scan_limit() const { |
|
340 return end(); |
|
341 } |
|
342 |
|
343 inline bool scanned_block_is_obj(const HeapWord* addr) const { |
|
344 return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call |
|
345 } |
|
346 |
|
347 inline size_t scanned_block_size(const HeapWord* addr) const { |
|
348 return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call |
|
349 } |
|
350 |
|
351 inline size_t adjust_obj_size(size_t size) const { |
|
352 return adjustObjectSize(size); |
|
353 } |
|
354 |
|
355 inline size_t obj_size(const HeapWord* addr) const; |
|
356 |
|
357 protected: |
|
358 // Reset the indexed free list to its initial empty condition. |
|
359 void resetIndexedFreeListArray(); |
|
360 // Reset to an initial state with a single free block described |
|
361 // by the MemRegion parameter. |
|
362 void reset(MemRegion mr); |
|
363 // Return the total number of words in the indexed free lists. |
|
364 size_t totalSizeInIndexedFreeLists() const; |
|
365 |
|
366 public: |
|
367 // Constructor |
|
368 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr); |
|
369 // Accessors |
|
370 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } |
|
371 AFLBinaryTreeDictionary* dictionary() const { return _dictionary; } |
|
372 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } |
|
373 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } |
|
374 |
|
375 // Set CMS global values. |
|
376 static void set_cms_values(); |
|
377 |
|
378 // Return the free chunk at the end of the space. If no such |
|
379 // chunk exists, return NULL. |
|
380 FreeChunk* find_chunk_at_end(); |
|
381 |
|
382 void set_collector(CMSCollector* collector) { _collector = collector; } |
|
383 |
|
384 // Support for parallelization of rescan and marking. |
|
385 const size_t rescan_task_size() const { return _rescan_task_size; } |
|
386 const size_t marking_task_size() const { return _marking_task_size; } |
|
387 // Return ergonomic max size for CMSRescanMultiple and CMSConcMarkMultiple. |
|
388 const size_t max_flag_size_for_task_size() const; |
|
389 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } |
|
390 void initialize_sequential_subtasks_for_rescan(int n_threads); |
|
391 void initialize_sequential_subtasks_for_marking(int n_threads, |
|
392 HeapWord* low = NULL); |
|
393 |
|
394 virtual MemRegionClosure* preconsumptionDirtyCardClosure() const { |
|
395 return _preconsumptionDirtyCardClosure; |
|
396 } |
|
397 |
|
398 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { |
|
399 _preconsumptionDirtyCardClosure = cl; |
|
400 } |
|
401 |
|
402 // Space enquiries |
|
403 size_t used() const; |
|
404 size_t free() const; |
|
405 size_t max_alloc_in_words() const; |
|
406 // XXX: should have a less conservative used_region() than that of |
|
407 // Space; we could consider keeping track of highest allocated |
|
408 // address and correcting that at each sweep, as the sweeper |
|
409 // goes through the entire allocated part of the generation. We |
|
410 // could also use that information to keep the sweeper from |
|
411 // sweeping more than is necessary. The allocator and sweeper will |
|
412 // of course need to synchronize on this, since the sweeper will |
|
413 // try to bump down the address and the allocator will try to bump it up. |
|
414 // For now, however, we'll just use the default used_region() |
|
415 // which overestimates the region by returning the entire |
|
416 // committed region (this is safe, but inefficient). |
|
417 |
|
418 // Returns monotonically increasing stable used space bytes for CMS. |
|
419 // This is required for jstat and other memory monitoring tools |
|
420 // that might otherwise see inconsistent used space values during a garbage |
|
421 // collection, promotion or allocation into compactibleFreeListSpace. |
|
422 // The value returned by this function might be smaller than the |
|
423 // actual value. |
|
424 size_t used_stable() const; |
|
425 // Recalculate and cache the current stable used() value. Only to be called |
|
426 // in places where we can be sure that the result is stable. |
|
427 void recalculate_used_stable(); |
|
428 |
|
429 // Returns a subregion of the space containing all the objects in |
|
430 // the space. |
|
431 MemRegion used_region() const { |
|
432 return MemRegion(bottom(), |
|
433 BlockOffsetArrayUseUnallocatedBlock ? |
|
434 unallocated_block() : end()); |
|
435 } |
|
436 |
|
437 virtual bool is_free_block(const HeapWord* p) const; |
|
438 |
|
439 // Resizing support |
|
440 void set_end(HeapWord* value); // override |
|
441 |
|
442 // Never mangle CompactibleFreeListSpace |
|
443 void mangle_unused_area() {} |
|
444 void mangle_unused_area_complete() {} |
|
445 |
|
446 // Mutual exclusion support |
|
447 Mutex* freelistLock() const { return &_freelistLock; } |
|
448 |
|
449 // Iteration support |
|
450 void oop_iterate(OopIterateClosure* cl); |
|
451 |
|
452 void object_iterate(ObjectClosure* blk); |
|
453 // Apply the closure to each object in the space whose references |
|
454 // point to objects in the heap. The usage of CompactibleFreeListSpace |
|
455 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
|
456 // objects in the space with references to objects that are no longer |
|
457 // valid. For example, an object may reference another object |
|
458 // that has already been sweep up (collected). This method uses |
|
459 // obj_is_alive() to determine whether it is safe to iterate of |
|
460 // an object. |
|
461 void safe_object_iterate(ObjectClosure* blk); |
|
462 |
|
463 // Iterate over all objects that intersect with mr, calling "cl->do_object" |
|
464 // on each. There is an exception to this: if this closure has already |
|
465 // been invoked on an object, it may skip such objects in some cases. This is |
|
466 // Most likely to happen in an "upwards" (ascending address) iteration of |
|
467 // MemRegions. |
|
468 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
|
469 |
|
470 // Requires that "mr" be entirely within the space. |
|
471 // Apply "cl->do_object" to all objects that intersect with "mr". |
|
472 // If the iteration encounters an unparseable portion of the region, |
|
473 // terminate the iteration and return the address of the start of the |
|
474 // subregion that isn't done. Return of "NULL" indicates that the |
|
475 // iteration completed. |
|
476 HeapWord* object_iterate_careful_m(MemRegion mr, |
|
477 ObjectClosureCareful* cl); |
|
478 |
|
479 // Override: provides a DCTO_CL specific to this kind of space. |
|
480 DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl, |
|
481 CardTable::PrecisionStyle precision, |
|
482 HeapWord* boundary, |
|
483 bool parallel); |
|
484 |
|
485 void blk_iterate(BlkClosure* cl); |
|
486 void blk_iterate_careful(BlkClosureCareful* cl); |
|
487 HeapWord* block_start_const(const void* p) const; |
|
488 HeapWord* block_start_careful(const void* p) const; |
|
489 size_t block_size(const HeapWord* p) const; |
|
490 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; |
|
491 bool block_is_obj(const HeapWord* p) const; |
|
492 bool obj_is_alive(const HeapWord* p) const; |
|
493 size_t block_size_nopar(const HeapWord* p) const; |
|
494 bool block_is_obj_nopar(const HeapWord* p) const; |
|
495 |
|
496 // Iteration support for promotion |
|
497 void save_marks(); |
|
498 bool no_allocs_since_save_marks(); |
|
499 |
|
500 // Iteration support for sweeping |
|
501 void save_sweep_limit() { |
|
502 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? |
|
503 unallocated_block() : end(); |
|
504 log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT |
|
505 " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", |
|
506 p2i(_sweep_limit), p2i(bottom()), p2i(end())); |
|
507 } |
|
508 NOT_PRODUCT( |
|
509 void clear_sweep_limit() { _sweep_limit = NULL; } |
|
510 ) |
|
511 HeapWord* sweep_limit() { return _sweep_limit; } |
|
512 |
|
513 // Apply "blk->do_oop" to the addresses of all reference fields in objects |
|
514 // promoted into this generation since the most recent save_marks() call. |
|
515 // Fields in objects allocated by applications of the closure |
|
516 // *are* included in the iteration. Thus, when the iteration completes |
|
517 // there should be no further such objects remaining. |
|
518 template <typename OopClosureType> |
|
519 void oop_since_save_marks_iterate(OopClosureType* blk); |
|
520 |
|
521 // Allocation support |
|
522 HeapWord* allocate(size_t size); |
|
523 HeapWord* par_allocate(size_t size); |
|
524 |
|
525 oop promote(oop obj, size_t obj_size); |
|
526 void gc_prologue(); |
|
527 void gc_epilogue(); |
|
528 |
|
529 // This call is used by a containing CMS generation / collector |
|
530 // to inform the CFLS space that a sweep has been completed |
|
531 // and that the space can do any related house-keeping functions. |
|
532 void sweep_completed(); |
|
533 |
|
534 // For an object in this space, the mark-word's two |
|
535 // LSB's having the value [11] indicates that it has been |
|
536 // promoted since the most recent call to save_marks() on |
|
537 // this generation and has not subsequently been iterated |
|
538 // over (using oop_since_save_marks_iterate() above). |
|
539 // This property holds only for single-threaded collections, |
|
540 // and is typically used for Cheney scans; for MT scavenges, |
|
541 // the property holds for all objects promoted during that |
|
542 // scavenge for the duration of the scavenge and is used |
|
543 // by card-scanning to avoid scanning objects (being) promoted |
|
544 // during that scavenge. |
|
545 bool obj_allocated_since_save_marks(const oop obj) const { |
|
546 assert(is_in_reserved(obj), "Wrong space?"); |
|
547 return ((PromotedObject*)obj)->hasPromotedMark(); |
|
548 } |
|
549 |
|
550 // A worst-case estimate of the space required (in HeapWords) to expand the |
|
551 // heap when promoting an obj of size obj_size. |
|
552 size_t expansionSpaceRequired(size_t obj_size) const; |
|
553 |
|
554 FreeChunk* allocateScratch(size_t size); |
|
555 |
|
556 // Returns true if either the small or large linear allocation buffer is empty. |
|
557 bool linearAllocationWouldFail() const; |
|
558 |
|
559 // Adjust the chunk for the minimum size. This version is called in |
|
560 // most cases in CompactibleFreeListSpace methods. |
|
561 inline static size_t adjustObjectSize(size_t size) { |
|
562 return align_object_size(MAX2(size, (size_t)MinChunkSize)); |
|
563 } |
|
564 // This is a virtual version of adjustObjectSize() that is called |
|
565 // only occasionally when the compaction space changes and the type |
|
566 // of the new compaction space is is only known to be CompactibleSpace. |
|
567 size_t adjust_object_size_v(size_t size) const { |
|
568 return adjustObjectSize(size); |
|
569 } |
|
570 // Minimum size of a free block. |
|
571 virtual size_t minimum_free_block_size() const { return MinChunkSize; } |
|
572 void removeFreeChunkFromFreeLists(FreeChunk* chunk); |
|
573 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, |
|
574 bool coalesced); |
|
575 |
|
576 // Support for compaction. |
|
577 void prepare_for_compaction(CompactPoint* cp); |
|
578 void adjust_pointers(); |
|
579 void compact(); |
|
580 // Reset the space to reflect the fact that a compaction of the |
|
581 // space has been done. |
|
582 virtual void reset_after_compaction(); |
|
583 |
|
584 // Debugging support. |
|
585 void print() const; |
|
586 void print_on(outputStream* st) const; |
|
587 void prepare_for_verify(); |
|
588 void verify() const; |
|
589 void verifyFreeLists() const PRODUCT_RETURN; |
|
590 void verifyIndexedFreeLists() const; |
|
591 void verifyIndexedFreeList(size_t size) const; |
|
592 // Verify that the given chunk is in the free lists: |
|
593 // i.e. either the binary tree dictionary, the indexed free lists |
|
594 // or the linear allocation block. |
|
595 bool verify_chunk_in_free_list(FreeChunk* fc) const; |
|
596 // Verify that the given chunk is the linear allocation block. |
|
597 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; |
|
598 // Do some basic checks on the the free lists. |
|
599 void check_free_list_consistency() const PRODUCT_RETURN; |
|
600 |
|
601 // Printing support |
|
602 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); |
|
603 void print_indexed_free_lists(outputStream* st) const; |
|
604 void print_dictionary_free_lists(outputStream* st) const; |
|
605 void print_promo_info_blocks(outputStream* st) const; |
|
606 |
|
607 NOT_PRODUCT ( |
|
608 void initializeIndexedFreeListArrayReturnedBytes(); |
|
609 size_t sumIndexedFreeListArrayReturnedBytes(); |
|
610 // Return the total number of chunks in the indexed free lists. |
|
611 size_t totalCountInIndexedFreeLists() const; |
|
612 // Return the total number of chunks in the space. |
|
613 size_t totalCount(); |
|
614 ) |
|
615 |
|
616 // The census consists of counts of the quantities such as |
|
617 // the current count of the free chunks, number of chunks |
|
618 // created as a result of the split of a larger chunk or |
|
619 // coalescing of smaller chucks, etc. The counts in the |
|
620 // census is used to make decisions on splitting and |
|
621 // coalescing of chunks during the sweep of garbage. |
|
622 |
|
623 // Print the statistics for the free lists. |
|
624 void printFLCensus(size_t sweep_count) const; |
|
625 |
|
626 // Statistics functions |
|
627 // Initialize census for lists before the sweep. |
|
628 void beginSweepFLCensus(float inter_sweep_current, |
|
629 float inter_sweep_estimate, |
|
630 float intra_sweep_estimate); |
|
631 // Set the surplus for each of the free lists. |
|
632 void setFLSurplus(); |
|
633 // Set the hint for each of the free lists. |
|
634 void setFLHints(); |
|
635 // Clear the census for each of the free lists. |
|
636 void clearFLCensus(); |
|
637 // Perform functions for the census after the end of the sweep. |
|
638 void endSweepFLCensus(size_t sweep_count); |
|
639 // Return true if the count of free chunks is greater |
|
640 // than the desired number of free chunks. |
|
641 bool coalOverPopulated(size_t size); |
|
642 |
|
643 // Record (for each size): |
|
644 // |
|
645 // split-births = #chunks added due to splits in (prev-sweep-end, |
|
646 // this-sweep-start) |
|
647 // split-deaths = #chunks removed for splits in (prev-sweep-end, |
|
648 // this-sweep-start) |
|
649 // num-curr = #chunks at start of this sweep |
|
650 // num-prev = #chunks at end of previous sweep |
|
651 // |
|
652 // The above are quantities that are measured. Now define: |
|
653 // |
|
654 // num-desired := num-prev + split-births - split-deaths - num-curr |
|
655 // |
|
656 // Roughly, num-prev + split-births is the supply, |
|
657 // split-deaths is demand due to other sizes |
|
658 // and num-curr is what we have left. |
|
659 // |
|
660 // Thus, num-desired is roughly speaking the "legitimate demand" |
|
661 // for blocks of this size and what we are striving to reach at the |
|
662 // end of the current sweep. |
|
663 // |
|
664 // For a given list, let num-len be its current population. |
|
665 // Define, for a free list of a given size: |
|
666 // |
|
667 // coal-overpopulated := num-len >= num-desired * coal-surplus |
|
668 // (coal-surplus is set to 1.05, i.e. we allow a little slop when |
|
669 // coalescing -- we do not coalesce unless we think that the current |
|
670 // supply has exceeded the estimated demand by more than 5%). |
|
671 // |
|
672 // For the set of sizes in the binary tree, which is neither dense nor |
|
673 // closed, it may be the case that for a particular size we have never |
|
674 // had, or do not now have, or did not have at the previous sweep, |
|
675 // chunks of that size. We need to extend the definition of |
|
676 // coal-overpopulated to such sizes as well: |
|
677 // |
|
678 // For a chunk in/not in the binary tree, extend coal-overpopulated |
|
679 // defined above to include all sizes as follows: |
|
680 // |
|
681 // . a size that is non-existent is coal-overpopulated |
|
682 // . a size that has a num-desired <= 0 as defined above is |
|
683 // coal-overpopulated. |
|
684 // |
|
685 // Also define, for a chunk heap-offset C and mountain heap-offset M: |
|
686 // |
|
687 // close-to-mountain := C >= 0.99 * M |
|
688 // |
|
689 // Now, the coalescing strategy is: |
|
690 // |
|
691 // Coalesce left-hand chunk with right-hand chunk if and |
|
692 // only if: |
|
693 // |
|
694 // EITHER |
|
695 // . left-hand chunk is of a size that is coal-overpopulated |
|
696 // OR |
|
697 // . right-hand chunk is close-to-mountain |
|
698 void smallCoalBirth(size_t size); |
|
699 void smallCoalDeath(size_t size); |
|
700 void coalBirth(size_t size); |
|
701 void coalDeath(size_t size); |
|
702 void smallSplitBirth(size_t size); |
|
703 void smallSplitDeath(size_t size); |
|
704 void split_birth(size_t size); |
|
705 void splitDeath(size_t size); |
|
706 void split(size_t from, size_t to1); |
|
707 |
|
708 double flsFrag() const; |
|
709 }; |
|
710 |
|
711 // A parallel-GC-thread-local allocation buffer for allocation into a |
|
712 // CompactibleFreeListSpace. |
|
713 class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> { |
|
714 // The space that this buffer allocates into. |
|
715 CompactibleFreeListSpace* _cfls; |
|
716 |
|
717 // Our local free lists. |
|
718 AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; |
|
719 |
|
720 // Initialized from a command-line arg. |
|
721 |
|
722 // Allocation statistics in support of dynamic adjustment of |
|
723 // #blocks to claim per get_from_global_pool() call below. |
|
724 static AdaptiveWeightedAverage |
|
725 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; |
|
726 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
|
727 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; |
|
728 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
|
729 |
|
730 // Internal work method |
|
731 void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl); |
|
732 |
|
733 public: |
|
734 static const int _default_dynamic_old_plab_size = 16; |
|
735 static const int _default_static_old_plab_size = 50; |
|
736 |
|
737 CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls); |
|
738 |
|
739 // Allocate and return a block of the given size, or else return NULL. |
|
740 HeapWord* alloc(size_t word_sz); |
|
741 |
|
742 // Return any unused portions of the buffer to the global pool. |
|
743 void retire(int tid); |
|
744 |
|
745 // Dynamic OldPLABSize sizing |
|
746 static void compute_desired_plab_size(); |
|
747 // When the settings are modified from default static initialization |
|
748 static void modify_initialization(size_t n, unsigned wt); |
|
749 }; |
|
750 |
|
751 size_t PromotionInfo::refillSize() const { |
|
752 const size_t CMSSpoolBlockSize = 256; |
|
753 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markWord) |
|
754 * CMSSpoolBlockSize); |
|
755 return CompactibleFreeListSpace::adjustObjectSize(sz); |
|
756 } |
|
757 |
|
758 #endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP |
|