author | brutisso |
Tue, 02 Dec 2014 09:51:16 +0100 | |
changeset 27903 | 14c6e2f23fa0 |
parent 27899 | 17754211a7ab |
child 27904 | d606512952cc |
permissions | -rw-r--r-- |
1 | 1 |
/* |
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
23508
diff
changeset
|
2 |
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4574
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4574
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4574
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#ifndef SHARE_VM_MEMORY_GENERATION_HPP |
26 |
#define SHARE_VM_MEMORY_GENERATION_HPP |
|
27 |
||
28 |
#include "gc_implementation/shared/collectorCounters.hpp" |
|
29 |
#include "memory/allocation.hpp" |
|
30 |
#include "memory/memRegion.hpp" |
|
31 |
#include "memory/referenceProcessor.hpp" |
|
32 |
#include "memory/universe.hpp" |
|
33 |
#include "memory/watermark.hpp" |
|
34 |
#include "runtime/mutex.hpp" |
|
35 |
#include "runtime/perfData.hpp" |
|
36 |
#include "runtime/virtualspace.hpp" |
|
37 |
||
1 | 38 |
// A Generation models a heap area for similarly-aged objects. |
39 |
// It will contain one ore more spaces holding the actual objects. |
|
40 |
// |
|
41 |
// The Generation class hierarchy: |
|
42 |
// |
|
43 |
// Generation - abstract base class |
|
44 |
// - DefNewGeneration - allocation area (copy collected) |
|
45 |
// - ParNewGeneration - a DefNewGeneration that is collected by |
|
46 |
// several threads |
|
47 |
// - CardGeneration - abstract class adding offset array behavior |
|
48 |
// - OneContigSpaceCardGeneration - abstract class holding a single |
|
49 |
// contiguous space with card marking |
|
50 |
// - TenuredGeneration - tenured (old object) space (markSweepCompact) |
|
51 |
// - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation |
|
52 |
// (Detlefs-Printezis refinement of |
|
53 |
// Boehm-Demers-Schenker) |
|
54 |
// |
|
55 |
// The system configurations currently allowed are: |
|
56 |
// |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
57 |
// DefNewGeneration + TenuredGeneration |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
58 |
// DefNewGeneration + ConcurrentMarkSweepGeneration |
1 | 59 |
// |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
60 |
// ParNewGeneration + TenuredGeneration |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
61 |
// ParNewGeneration + ConcurrentMarkSweepGeneration |
1 | 62 |
// |
63 |
||
64 |
class DefNewGeneration; |
|
65 |
class GenerationSpec; |
|
66 |
class CompactibleSpace; |
|
67 |
class ContiguousSpace; |
|
68 |
class CompactPoint; |
|
69 |
class OopsInGenClosure; |
|
70 |
class OopClosure; |
|
71 |
class ScanClosure; |
|
72 |
class FastScanClosure; |
|
73 |
class GenCollectedHeap; |
|
74 |
class GenRemSet; |
|
75 |
class GCStats; |
|
76 |
||
77 |
// A "ScratchBlock" represents a block of memory in one generation usable by |
|
78 |
// another. It represents "num_words" free words, starting at and including |
|
79 |
// the address of "this". |
|
80 |
struct ScratchBlock { |
|
81 |
ScratchBlock* next; |
|
82 |
size_t num_words; |
|
83 |
HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming |
|
84 |
// first two fields are word-sized.) |
|
85 |
}; |
|
86 |
||
87 |
||
13195 | 88 |
class Generation: public CHeapObj<mtGC> { |
1 | 89 |
friend class VMStructs; |
90 |
private: |
|
91 |
jlong _time_of_last_gc; // time when last gc on this generation happened (ms) |
|
92 |
MemRegion _prev_used_region; // for collectors that want to "remember" a value for |
|
93 |
// used region at some specific point during collection. |
|
94 |
||
95 |
protected: |
|
96 |
// Minimum and maximum addresses for memory reserved (not necessarily |
|
97 |
// committed) for generation. |
|
98 |
// Used by card marking code. Must not overlap with address ranges of |
|
99 |
// other generations. |
|
100 |
MemRegion _reserved; |
|
101 |
||
102 |
// Memory area reserved for generation |
|
103 |
VirtualSpace _virtual_space; |
|
104 |
||
105 |
// Level in the generation hierarchy. |
|
106 |
int _level; |
|
107 |
||
108 |
// ("Weak") Reference processing support |
|
109 |
ReferenceProcessor* _ref_processor; |
|
110 |
||
111 |
// Performance Counters |
|
112 |
CollectorCounters* _gc_counters; |
|
113 |
||
114 |
// Statistics for garbage collection |
|
115 |
GCStats* _gc_stats; |
|
116 |
||
117 |
// Returns the next generation in the configuration, or else NULL if this |
|
118 |
// is the highest generation. |
|
119 |
Generation* next_gen() const; |
|
120 |
||
121 |
// Initialize the generation. |
|
122 |
Generation(ReservedSpace rs, size_t initial_byte_size, int level); |
|
123 |
||
124 |
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in |
|
125 |
// "sp" that point into younger generations. |
|
126 |
// The iteration is only over objects allocated at the start of the |
|
127 |
// iterations; objects allocated as a result of applying the closure are |
|
128 |
// not included. |
|
129 |
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); |
|
130 |
||
131 |
public: |
|
132 |
// The set of possible generation kinds. |
|
133 |
enum Name { |
|
134 |
DefNew, |
|
135 |
ParNew, |
|
136 |
MarkSweepCompact, |
|
137 |
ConcurrentMarkSweep, |
|
138 |
Other |
|
139 |
}; |
|
140 |
||
141 |
enum SomePublicConstants { |
|
142 |
// Generations are GenGrain-aligned and have size that are multiples of |
|
143 |
// GenGrain. |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
144 |
// Note: on ARM we add 1 bit for card_table_base to be properly aligned |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
145 |
// (we expect its low byte to be zero - see implementation of post_barrier) |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
146 |
LogOfGenGrain = 16 ARM_ONLY(+1), |
1 | 147 |
GenGrain = 1 << LogOfGenGrain |
148 |
}; |
|
149 |
||
150 |
// allocate and initialize ("weak") refs processing support |
|
151 |
virtual void ref_processor_init(); |
|
152 |
void set_ref_processor(ReferenceProcessor* rp) { |
|
153 |
assert(_ref_processor == NULL, "clobbering existing _ref_processor"); |
|
154 |
_ref_processor = rp; |
|
155 |
} |
|
156 |
||
157 |
virtual Generation::Name kind() { return Generation::Other; } |
|
158 |
GenerationSpec* spec(); |
|
159 |
||
160 |
// This properly belongs in the collector, but for now this |
|
161 |
// will do. |
|
162 |
virtual bool refs_discovery_is_atomic() const { return true; } |
|
163 |
virtual bool refs_discovery_is_mt() const { return false; } |
|
164 |
||
165 |
// Space enquiries (results in bytes) |
|
166 |
virtual size_t capacity() const = 0; // The maximum number of object bytes the |
|
167 |
// generation can currently hold. |
|
168 |
virtual size_t used() const = 0; // The number of used bytes in the gen. |
|
169 |
virtual size_t free() const = 0; // The number of free bytes in the gen. |
|
170 |
||
171 |
// Support for java.lang.Runtime.maxMemory(); see CollectedHeap. |
|
172 |
// Returns the total number of bytes available in a generation |
|
173 |
// for the allocation of objects. |
|
174 |
virtual size_t max_capacity() const; |
|
175 |
||
176 |
// If this is a young generation, the maximum number of bytes that can be |
|
177 |
// allocated in this generation before a GC is triggered. |
|
178 |
virtual size_t capacity_before_gc() const { return 0; } |
|
179 |
||
180 |
// The largest number of contiguous free bytes in the generation, |
|
181 |
// including expansion (Assumes called at a safepoint.) |
|
182 |
virtual size_t contiguous_available() const = 0; |
|
183 |
// The largest number of contiguous free bytes in this or any higher generation. |
|
184 |
virtual size_t max_contiguous_available() const; |
|
185 |
||
6985
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
186 |
// Returns true if promotions of the specified amount are |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
187 |
// likely to succeed without a promotion failure. |
1 | 188 |
// Promotion of the full amount is not guaranteed but |
6985
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
189 |
// might be attempted in the worst case. |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
190 |
virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; |
1 | 191 |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
192 |
// For a non-young generation, this interface can be used to inform a |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
193 |
// generation that a promotion attempt into that generation failed. |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
194 |
// Typically used to enable diagnostic output for post-mortem analysis, |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
195 |
// but other uses of the interface are not ruled out. |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
196 |
virtual void promotion_failure_occurred() { /* does nothing */ } |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
197 |
|
1 | 198 |
// Return an estimate of the maximum allocation that could be performed |
199 |
// in the generation without triggering any collection or expansion |
|
200 |
// activity. It is "unsafe" because no locks are taken; the result |
|
201 |
// should be treated as an approximation, not a guarantee, for use in |
|
202 |
// heuristic resizing decisions. |
|
203 |
virtual size_t unsafe_max_alloc_nogc() const = 0; |
|
204 |
||
205 |
// Returns true if this generation cannot be expanded further |
|
206 |
// without a GC. Override as appropriate. |
|
207 |
virtual bool is_maximal_no_gc() const { |
|
208 |
return _virtual_space.uncommitted_size() == 0; |
|
209 |
} |
|
210 |
||
211 |
MemRegion reserved() const { return _reserved; } |
|
212 |
||
213 |
// Returns a region guaranteed to contain all the objects in the |
|
214 |
// generation. |
|
215 |
virtual MemRegion used_region() const { return _reserved; } |
|
216 |
||
217 |
MemRegion prev_used_region() const { return _prev_used_region; } |
|
218 |
virtual void save_used_region() { _prev_used_region = used_region(); } |
|
219 |
||
11247 | 220 |
// Returns "TRUE" iff "p" points into the committed areas in the generation. |
1 | 221 |
// For some kinds of generations, this may be an expensive operation. |
222 |
// To avoid performance problems stemming from its inadvertent use in |
|
223 |
// product jvm's, we restrict its use to assertion checking or |
|
224 |
// verification only. |
|
225 |
virtual bool is_in(const void* p) const; |
|
226 |
||
227 |
/* Returns "TRUE" iff "p" points into the reserved area of the generation. */ |
|
228 |
bool is_in_reserved(const void* p) const { |
|
229 |
return _reserved.contains(p); |
|
230 |
} |
|
231 |
||
232 |
// If some space in the generation contains the given "addr", return a |
|
233 |
// pointer to that space, else return "NULL". |
|
234 |
virtual Space* space_containing(const void* addr) const; |
|
235 |
||
236 |
// Iteration - do not use for time critical operations |
|
237 |
virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; |
|
238 |
||
239 |
// Returns the first space, if any, in the generation that can participate |
|
240 |
// in compaction, or else "NULL". |
|
241 |
virtual CompactibleSpace* first_compaction_space() const = 0; |
|
242 |
||
243 |
// Returns "true" iff this generation should be used to allocate an |
|
244 |
// object of the given size. Young generations might |
|
245 |
// wish to exclude very large objects, for example, since, if allocated |
|
246 |
// often, they would greatly increase the frequency of young-gen |
|
247 |
// collection. |
|
248 |
virtual bool should_allocate(size_t word_size, bool is_tlab) { |
|
249 |
bool result = false; |
|
250 |
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); |
|
251 |
if (!is_tlab || supports_tlab_allocation()) { |
|
252 |
result = (word_size > 0) && (word_size < overflow_limit); |
|
253 |
} |
|
254 |
return result; |
|
255 |
} |
|
256 |
||
257 |
// Allocate and returns a block of the requested size, or returns "NULL". |
|
258 |
// Assumes the caller has done any necessary locking. |
|
259 |
virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; |
|
260 |
||
261 |
// Like "allocate", but performs any necessary locking internally. |
|
262 |
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; |
|
263 |
||
264 |
// Some generation may offer a region for shared, contiguous allocation, |
|
265 |
// via inlined code (by exporting the address of the top and end fields |
|
266 |
// defining the extent of the contiguous allocation region.) |
|
267 |
||
268 |
// This function returns "true" iff the heap supports this kind of |
|
269 |
// allocation. (More precisely, this means the style of allocation that |
|
270 |
// increments *top_addr()" with a CAS.) (Default is "no".) |
|
271 |
// A generation that supports this allocation style must use lock-free |
|
272 |
// allocation for *all* allocation, since there are times when lock free |
|
273 |
// allocation will be concurrent with plain "allocate" calls. |
|
274 |
virtual bool supports_inline_contig_alloc() const { return false; } |
|
275 |
||
276 |
// These functions return the addresses of the fields that define the |
|
277 |
// boundaries of the contiguous allocation area. (These fields should be |
|
22551 | 278 |
// physically near to one another.) |
1 | 279 |
virtual HeapWord** top_addr() const { return NULL; } |
280 |
virtual HeapWord** end_addr() const { return NULL; } |
|
281 |
||
282 |
// Thread-local allocation buffers |
|
283 |
virtual bool supports_tlab_allocation() const { return false; } |
|
284 |
virtual size_t tlab_capacity() const { |
|
285 |
guarantee(false, "Generation doesn't support thread local allocation buffers"); |
|
286 |
return 0; |
|
287 |
} |
|
22552 | 288 |
virtual size_t tlab_used() const { |
289 |
guarantee(false, "Generation doesn't support thread local allocation buffers"); |
|
290 |
return 0; |
|
291 |
} |
|
1 | 292 |
virtual size_t unsafe_max_tlab_alloc() const { |
293 |
guarantee(false, "Generation doesn't support thread local allocation buffers"); |
|
294 |
return 0; |
|
295 |
} |
|
296 |
||
297 |
// "obj" is the address of an object in a younger generation. Allocate space |
|
298 |
// for "obj" in the current (or some higher) generation, and copy "obj" into |
|
299 |
// the newly allocated space, if possible, returning the result (or NULL if |
|
300 |
// the allocation failed). |
|
301 |
// |
|
302 |
// The "obj_size" argument is just obj->size(), passed along so the caller can |
|
303 |
// avoid repeating the virtual call to retrieve it. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
304 |
virtual oop promote(oop obj, size_t obj_size); |
1 | 305 |
|
306 |
// Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote |
|
307 |
// object "obj", whose original mark word was "m", and whose size is |
|
308 |
// "word_sz". If possible, allocate space for "obj", copy obj into it |
|
309 |
// (taking care to copy "m" into the mark word when done, since the mark |
|
310 |
// word of "obj" may have been overwritten with a forwarding pointer, and |
|
311 |
// also taking care to copy the klass pointer *last*. Returns the new |
|
312 |
// object if successful, or else NULL. |
|
313 |
virtual oop par_promote(int thread_num, |
|
314 |
oop obj, markOop m, size_t word_sz); |
|
315 |
||
316 |
// Informs the current generation that all par_promote_alloc's in the |
|
317 |
// collection have been completed; any supporting data structures can be |
|
318 |
// reset. Default is to do nothing. |
|
319 |
virtual void par_promote_alloc_done(int thread_num) {} |
|
320 |
||
321 |
// Informs the current generation that all oop_since_save_marks_iterates |
|
322 |
// performed by "thread_num" in the current collection, if any, have been |
|
323 |
// completed; any supporting data structures can be reset. Default is to |
|
324 |
// do nothing. |
|
325 |
virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} |
|
326 |
||
327 |
// This generation will collect all younger generations |
|
328 |
// during a full collection. |
|
329 |
virtual bool full_collects_younger_generations() const { return false; } |
|
330 |
||
331 |
// This generation does in-place marking, meaning that mark words |
|
332 |
// are mutated during the marking phase and presumably reinitialized |
|
333 |
// to a canonical value after the GC. This is currently used by the |
|
334 |
// biased locking implementation to determine whether additional |
|
335 |
// work is required during the GC prologue and epilogue. |
|
336 |
virtual bool performs_in_place_marking() const { return true; } |
|
337 |
||
338 |
// Returns "true" iff collect() should subsequently be called on this |
|
339 |
// this generation. See comment below. |
|
340 |
// This is a generic implementation which can be overridden. |
|
341 |
// |
|
342 |
// Note: in the current (1.4) implementation, when genCollectedHeap's |
|
343 |
// incremental_collection_will_fail flag is set, all allocations are |
|
344 |
// slow path (the only fast-path place to allocate is DefNew, which |
|
345 |
// will be full if the flag is set). |
|
346 |
// Thus, older generations which collect younger generations should |
|
347 |
// test this flag and collect if it is set. |
|
348 |
virtual bool should_collect(bool full, |
|
349 |
size_t word_size, |
|
350 |
bool is_tlab) { |
|
351 |
return (full || should_allocate(word_size, is_tlab)); |
|
352 |
} |
|
353 |
||
6985
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
354 |
// Returns true if the collection is likely to be safely |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
355 |
// completed. Even if this method returns true, a collection |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
356 |
// may not be guaranteed to succeed, and the system should be |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
357 |
// able to safely unwind and recover from that failure, albeit |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
358 |
// at some additional cost. |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
359 |
virtual bool collection_attempt_is_safe() { |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
360 |
guarantee(false, "Are you sure you want to call this method?"); |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
361 |
return true; |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
362 |
} |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
363 |
|
1 | 364 |
// Perform a garbage collection. |
365 |
// If full is true attempt a full garbage collection of this generation. |
|
366 |
// Otherwise, attempting to (at least) free enough space to support an |
|
367 |
// allocation of the given "word_size". |
|
368 |
virtual void collect(bool full, |
|
369 |
bool clear_all_soft_refs, |
|
370 |
size_t word_size, |
|
371 |
bool is_tlab) = 0; |
|
372 |
||
373 |
// Perform a heap collection, attempting to create (at least) enough |
|
374 |
// space to support an allocation of the given "word_size". If |
|
375 |
// successful, perform the allocation and return the resulting |
|
376 |
// "oop" (initializing the allocated block). If the allocation is |
|
377 |
// still unsuccessful, return "NULL". |
|
378 |
virtual HeapWord* expand_and_allocate(size_t word_size, |
|
379 |
bool is_tlab, |
|
380 |
bool parallel = false) = 0; |
|
381 |
||
382 |
// Some generations may require some cleanup or preparation actions before |
|
383 |
// allowing a collection. The default is to do nothing. |
|
384 |
virtual void gc_prologue(bool full) {}; |
|
385 |
||
386 |
// Some generations may require some cleanup actions after a collection. |
|
387 |
// The default is to do nothing. |
|
388 |
virtual void gc_epilogue(bool full) {}; |
|
389 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
390 |
// Save the high water marks for the used space in a generation. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
391 |
virtual void record_spaces_top() {}; |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
392 |
|
1 | 393 |
// Some generations may need to be "fixed-up" after some allocation |
394 |
// activity to make them parsable again. The default is to do nothing. |
|
395 |
virtual void ensure_parsability() {}; |
|
396 |
||
397 |
// Time (in ms) when we were last collected or now if a collection is |
|
398 |
// in progress. |
|
399 |
virtual jlong time_of_last_gc(jlong now) { |
|
11251
e29da6b5622b
7117303: VM uses non-monotonic time source and complains that it is non-monotonic
johnc
parents:
11247
diff
changeset
|
400 |
// Both _time_of_last_gc and now are set using a time source |
e29da6b5622b
7117303: VM uses non-monotonic time source and complains that it is non-monotonic
johnc
parents:
11247
diff
changeset
|
401 |
// that guarantees monotonically non-decreasing values provided |
e29da6b5622b
7117303: VM uses non-monotonic time source and complains that it is non-monotonic
johnc
parents:
11247
diff
changeset
|
402 |
// the underlying platform provides such a source. So we still |
e29da6b5622b
7117303: VM uses non-monotonic time source and complains that it is non-monotonic
johnc
parents:
11247
diff
changeset
|
403 |
// have to guard against non-monotonicity. |
1 | 404 |
NOT_PRODUCT( |
405 |
if (now < _time_of_last_gc) { |
|
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
23508
diff
changeset
|
406 |
warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, (int64_t)_time_of_last_gc, (int64_t)now); |
1 | 407 |
} |
408 |
) |
|
409 |
return _time_of_last_gc; |
|
410 |
} |
|
411 |
||
412 |
virtual void update_time_of_last_gc(jlong now) { |
|
413 |
_time_of_last_gc = now; |
|
414 |
} |
|
415 |
||
416 |
// Generations may keep statistics about collection. This |
|
417 |
// method updates those statistics. current_level is |
|
418 |
// the level of the collection that has most recently |
|
419 |
// occurred. This allows the generation to decide what |
|
420 |
// statistics are valid to collect. For example, the |
|
421 |
// generation can decide to gather the amount of promoted data |
|
422 |
// if the collection of the younger generations has completed. |
|
423 |
GCStats* gc_stats() const { return _gc_stats; } |
|
424 |
virtual void update_gc_stats(int current_level, bool full) {} |
|
425 |
||
426 |
// Mark sweep support phase2 |
|
427 |
virtual void prepare_for_compaction(CompactPoint* cp); |
|
428 |
// Mark sweep support phase3 |
|
429 |
virtual void adjust_pointers(); |
|
430 |
// Mark sweep support phase4 |
|
431 |
virtual void compact(); |
|
432 |
virtual void post_compact() {ShouldNotReachHere();} |
|
433 |
||
434 |
// Support for CMS's rescan. In this general form we return a pointer |
|
435 |
// to an abstract object that can be used, based on specific previously |
|
436 |
// decided protocols, to exchange information between generations, |
|
437 |
// information that may be useful for speeding up certain types of |
|
438 |
// garbage collectors. A NULL value indicates to the client that |
|
439 |
// no data recording is expected by the provider. The data-recorder is |
|
440 |
// expected to be GC worker thread-local, with the worker index |
|
441 |
// indicated by "thr_num". |
|
442 |
virtual void* get_data_recorder(int thr_num) { return NULL; } |
|
18994
d32a17b7502c
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
18687
diff
changeset
|
443 |
virtual void sample_eden_chunk() {} |
1 | 444 |
|
445 |
// Some generations may require some cleanup actions before allowing |
|
446 |
// a verification. |
|
447 |
virtual void prepare_for_verify() {}; |
|
448 |
||
449 |
// Accessing "marks". |
|
450 |
||
451 |
// This function gives a generation a chance to note a point between |
|
452 |
// collections. For example, a contiguous generation might note the |
|
453 |
// beginning allocation point post-collection, which might allow some later |
|
454 |
// operations to be optimized. |
|
455 |
virtual void save_marks() {} |
|
456 |
||
457 |
// This function allows generations to initialize any "saved marks". That |
|
458 |
// is, should only be called when the generation is empty. |
|
459 |
virtual void reset_saved_marks() {} |
|
460 |
||
461 |
// This function is "true" iff any no allocations have occurred in the |
|
462 |
// generation since the last call to "save_marks". |
|
463 |
virtual bool no_allocs_since_save_marks() = 0; |
|
464 |
||
465 |
// Apply "cl->apply" to (the addresses of) all reference fields in objects |
|
466 |
// allocated in the current generation since the last call to "save_marks". |
|
467 |
// If more objects are allocated in this generation as a result of applying |
|
468 |
// the closure, iterates over reference fields in those objects as well. |
|
469 |
// Calls "save_marks" at the end of the iteration. |
|
470 |
// General signature... |
|
471 |
virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; |
|
472 |
// ...and specializations for de-virtualization. (The general |
|
22551 | 473 |
// implementation of the _nv versions call the virtual version. |
1 | 474 |
// Note that the _nv suffix is not really semantically necessary, |
475 |
// but it avoids some not-so-useful warnings on Solaris.) |
|
476 |
#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
477 |
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ |
|
478 |
oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ |
|
479 |
} |
|
480 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) |
|
481 |
||
482 |
#undef Generation_SINCE_SAVE_MARKS_DECL |
|
483 |
||
484 |
// The "requestor" generation is performing some garbage collection |
|
485 |
// action for which it would be useful to have scratch space. If |
|
486 |
// the target is not the requestor, no gc actions will be required |
|
487 |
// of the target. The requestor promises to allocate no more than |
|
488 |
// "max_alloc_words" in the target generation (via promotion say, |
|
489 |
// if the requestor is a young generation and the target is older). |
|
490 |
// If the target generation can provide any scratch space, it adds |
|
491 |
// it to "list", leaving "list" pointing to the head of the |
|
492 |
// augmented list. The default is to offer no space. |
|
493 |
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, |
|
494 |
size_t max_alloc_words) {} |
|
495 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
496 |
// Give each generation an opportunity to do clean up for any |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
497 |
// contributed scratch. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
498 |
virtual void reset_scratch() {}; |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
499 |
|
1 | 500 |
// When an older generation has been collected, and perhaps resized, |
501 |
// this method will be invoked on all younger generations (from older to |
|
502 |
// younger), allowing them to resize themselves as appropriate. |
|
503 |
virtual void compute_new_size() = 0; |
|
504 |
||
505 |
// Printing |
|
506 |
virtual const char* name() const = 0; |
|
507 |
virtual const char* short_name() const = 0; |
|
508 |
||
509 |
int level() const { return _level; } |
|
510 |
||
511 |
// Reference Processing accessor |
|
512 |
ReferenceProcessor* const ref_processor() { return _ref_processor; } |
|
513 |
||
514 |
// Iteration. |
|
515 |
||
516 |
// Iterate over all the ref-containing fields of all objects in the |
|
517 |
// generation, calling "cl.do_oop" on each. |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
518 |
virtual void oop_iterate(ExtendedOopClosure* cl); |
1 | 519 |
|
520 |
// Iterate over all objects in the generation, calling "cl.do_object" on |
|
521 |
// each. |
|
522 |
virtual void object_iterate(ObjectClosure* cl); |
|
523 |
||
1893
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
524 |
// Iterate over all safe objects in the generation, calling "cl.do_object" on |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
525 |
// each. An object is safe if its references point to other objects in |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
526 |
// the heap. This defaults to object_iterate() unless overridden. |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
527 |
virtual void safe_object_iterate(ObjectClosure* cl); |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
528 |
|
1 | 529 |
// Apply "cl->do_oop" to (the address of) all and only all the ref fields |
530 |
// in the current generation that contain pointers to objects in younger |
|
531 |
// generations. Objects allocated since the last "save_marks" call are |
|
532 |
// excluded. |
|
533 |
virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; |
|
534 |
||
535 |
// Inform a generation that it longer contains references to objects |
|
536 |
// in any younger generation. [e.g. Because younger gens are empty, |
|
537 |
// clear the card table.] |
|
538 |
virtual void clear_remembered_set() { } |
|
539 |
||
540 |
// Inform a generation that some of its objects have moved. [e.g. The |
|
541 |
// generation's spaces were compacted, invalidating the card table.] |
|
542 |
virtual void invalidate_remembered_set() { } |
|
543 |
||
544 |
// Block abstraction. |
|
545 |
||
546 |
// Returns the address of the start of the "block" that contains the |
|
547 |
// address "addr". We say "blocks" instead of "object" since some heaps |
|
548 |
// may not pack objects densely; a chunk may either be an object or a |
|
549 |
// non-object. |
|
550 |
virtual HeapWord* block_start(const void* addr) const; |
|
551 |
||
552 |
// Requires "addr" to be the start of a chunk, and returns its size. |
|
553 |
// "addr + size" is required to be the start of a new chunk, or the end |
|
554 |
// of the active area of the heap. |
|
555 |
virtual size_t block_size(const HeapWord* addr) const ; |
|
556 |
||
557 |
// Requires "addr" to be the start of a block, and returns "TRUE" iff |
|
558 |
// the block is an object. |
|
559 |
virtual bool block_is_obj(const HeapWord* addr) const; |
|
560 |
||
561 |
||
562 |
// PrintGC, PrintGCDetails support |
|
563 |
void print_heap_change(size_t prev_used) const; |
|
564 |
||
565 |
// PrintHeapAtGC support |
|
566 |
virtual void print() const; |
|
567 |
virtual void print_on(outputStream* st) const; |
|
568 |
||
12379 | 569 |
virtual void verify() = 0; |
1 | 570 |
|
571 |
struct StatRecord { |
|
572 |
int invocations; |
|
573 |
elapsedTimer accumulated_time; |
|
574 |
StatRecord() : |
|
575 |
invocations(0), |
|
576 |
accumulated_time(elapsedTimer()) {} |
|
577 |
}; |
|
578 |
private: |
|
579 |
StatRecord _stat_record; |
|
580 |
public: |
|
581 |
StatRecord* stat_record() { return &_stat_record; } |
|
582 |
||
583 |
virtual void print_summary_info(); |
|
584 |
virtual void print_summary_info_on(outputStream* st); |
|
585 |
||
586 |
// Performance Counter support |
|
587 |
virtual void update_counters() = 0; |
|
588 |
virtual CollectorCounters* counters() { return _gc_counters; } |
|
589 |
}; |
|
590 |
||
591 |
// Class CardGeneration is a generation that is covered by a card table, |
|
592 |
// and uses a card-size block-offset array to implement block_start. |
|
593 |
||
594 |
// class BlockOffsetArray; |
|
595 |
// class BlockOffsetArrayContigSpace; |
|
596 |
class BlockOffsetSharedArray; |
|
597 |
||
598 |
class CardGeneration: public Generation { |
|
599 |
friend class VMStructs; |
|
600 |
protected: |
|
601 |
// This is shared with other generations. |
|
602 |
GenRemSet* _rs; |
|
603 |
// This is local to this generation. |
|
604 |
BlockOffsetSharedArray* _bts; |
|
605 |
||
16681
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
606 |
// current shrinking effect: this damps shrinking when the heap gets empty. |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
607 |
size_t _shrink_factor; |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
608 |
|
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
609 |
size_t _min_heap_delta_bytes; // Minimum amount to expand. |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
610 |
|
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
611 |
// Some statistics from before gc started. |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
612 |
// These are gathered in the gc_prologue (and should_collect) |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
613 |
// to control growing/shrinking policy in spite of promotions. |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
614 |
size_t _capacity_at_prologue; |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
615 |
size_t _used_at_prologue; |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
616 |
|
1 | 617 |
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, |
618 |
GenRemSet* remset); |
|
619 |
||
620 |
public: |
|
621 |
||
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
622 |
// Attempt to expand the generation by "bytes". Expand by at a |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
623 |
// minimum "expand_bytes". Return true if some amount (not |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
624 |
// necessarily the full "bytes") was done. |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
625 |
virtual bool expand(size_t bytes, size_t expand_bytes); |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
626 |
|
16681
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
627 |
// Shrink generation with specified size (returns false if unable to shrink) |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
628 |
virtual void shrink(size_t bytes) = 0; |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
629 |
|
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
630 |
virtual void compute_new_size(); |
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
631 |
|
1 | 632 |
virtual void clear_remembered_set(); |
633 |
||
634 |
virtual void invalidate_remembered_set(); |
|
635 |
||
636 |
virtual void prepare_for_verify(); |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
637 |
|
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
638 |
// Grow generation with specified size (returns false if unable to grow) |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
639 |
virtual bool grow_by(size_t bytes) = 0; |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
640 |
// Grow generation to reserved size. |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
641 |
virtual bool grow_to_reserved() = 0; |
1 | 642 |
}; |
643 |
||
644 |
// OneContigSpaceCardGeneration models a heap of old objects contained in a single |
|
645 |
// contiguous space. |
|
646 |
// |
|
647 |
// Garbage collection is performed using mark-compact. |
|
648 |
||
649 |
class OneContigSpaceCardGeneration: public CardGeneration { |
|
650 |
friend class VMStructs; |
|
651 |
// Abstractly, this is a subtype that gets access to protected fields. |
|
652 |
friend class VM_PopulateDumpSharedSpace; |
|
653 |
||
654 |
protected: |
|
655 |
ContiguousSpace* _the_space; // actual space holding objects |
|
656 |
WaterMark _last_gc; // watermark between objects allocated before |
|
657 |
// and after last GC. |
|
658 |
||
659 |
// Grow generation with specified size (returns false if unable to grow) |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
660 |
virtual bool grow_by(size_t bytes); |
1 | 661 |
// Grow generation to reserved size. |
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
662 |
virtual bool grow_to_reserved(); |
1 | 663 |
// Shrink generation with specified size (returns false if unable to shrink) |
664 |
void shrink_by(size_t bytes); |
|
665 |
||
666 |
// Allocation failure |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
667 |
virtual bool expand(size_t bytes, size_t expand_bytes); |
1 | 668 |
void shrink(size_t bytes); |
669 |
||
670 |
// Accessing spaces |
|
671 |
ContiguousSpace* the_space() const { return _the_space; } |
|
672 |
||
673 |
public: |
|
674 |
OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, |
|
675 |
int level, GenRemSet* remset, |
|
676 |
ContiguousSpace* space) : |
|
677 |
CardGeneration(rs, initial_byte_size, level, remset), |
|
16681
d64161ca3e3c
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
13728
diff
changeset
|
678 |
_the_space(space) |
1 | 679 |
{} |
680 |
||
681 |
inline bool is_in(const void* p) const; |
|
682 |
||
683 |
// Space enquiries |
|
684 |
size_t capacity() const; |
|
685 |
size_t used() const; |
|
686 |
size_t free() const; |
|
687 |
||
688 |
MemRegion used_region() const; |
|
689 |
||
690 |
size_t unsafe_max_alloc_nogc() const; |
|
691 |
size_t contiguous_available() const; |
|
692 |
||
693 |
// Iteration |
|
694 |
void object_iterate(ObjectClosure* blk); |
|
695 |
void space_iterate(SpaceClosure* blk, bool usedOnly = false); |
|
696 |
||
697 |
void younger_refs_iterate(OopsInGenClosure* blk); |
|
698 |
||
699 |
inline CompactibleSpace* first_compaction_space() const; |
|
700 |
||
701 |
virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); |
|
702 |
virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); |
|
703 |
||
704 |
// Accessing marks |
|
705 |
inline WaterMark top_mark(); |
|
706 |
inline WaterMark bottom_mark(); |
|
707 |
||
708 |
#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
709 |
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); |
|
710 |
OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) |
|
711 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) |
|
712 |
||
713 |
void save_marks(); |
|
714 |
void reset_saved_marks(); |
|
715 |
bool no_allocs_since_save_marks(); |
|
716 |
||
717 |
inline size_t block_size(const HeapWord* addr) const; |
|
718 |
||
719 |
inline bool block_is_obj(const HeapWord* addr) const; |
|
720 |
||
721 |
virtual void collect(bool full, |
|
722 |
bool clear_all_soft_refs, |
|
723 |
size_t size, |
|
724 |
bool is_tlab); |
|
725 |
HeapWord* expand_and_allocate(size_t size, |
|
726 |
bool is_tlab, |
|
727 |
bool parallel = false); |
|
728 |
||
729 |
virtual void prepare_for_verify(); |
|
730 |
||
731 |
virtual void gc_epilogue(bool full); |
|
732 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
733 |
virtual void record_spaces_top(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
734 |
|
12379 | 735 |
virtual void verify(); |
1 | 736 |
virtual void print_on(outputStream* st) const; |
737 |
}; |
|
7397 | 738 |
|
739 |
#endif // SHARE_VM_MEMORY_GENERATION_HPP |