author | tonyp |
Tue, 08 Nov 2011 00:41:28 -0500 | |
changeset 10997 | 0be4b3be7197 |
parent 7397 | 5b173b4ca846 |
child 11247 | d6faa02b3802 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
7397 | 2 |
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4574
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4574
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4574
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#ifndef SHARE_VM_MEMORY_GENERATION_HPP |
26 |
#define SHARE_VM_MEMORY_GENERATION_HPP |
|
27 |
||
28 |
#include "gc_implementation/shared/collectorCounters.hpp" |
|
29 |
#include "memory/allocation.hpp" |
|
30 |
#include "memory/memRegion.hpp" |
|
31 |
#include "memory/referenceProcessor.hpp" |
|
32 |
#include "memory/universe.hpp" |
|
33 |
#include "memory/watermark.hpp" |
|
34 |
#include "runtime/mutex.hpp" |
|
35 |
#include "runtime/perfData.hpp" |
|
36 |
#include "runtime/virtualspace.hpp" |
|
37 |
||
1 | 38 |
// A Generation models a heap area for similarly-aged objects. |
39 |
// It will contain one ore more spaces holding the actual objects. |
|
40 |
// |
|
41 |
// The Generation class hierarchy: |
|
42 |
// |
|
43 |
// Generation - abstract base class |
|
44 |
// - DefNewGeneration - allocation area (copy collected) |
|
45 |
// - ParNewGeneration - a DefNewGeneration that is collected by |
|
46 |
// several threads |
|
47 |
// - CardGeneration - abstract class adding offset array behavior |
|
48 |
// - OneContigSpaceCardGeneration - abstract class holding a single |
|
49 |
// contiguous space with card marking |
|
50 |
// - TenuredGeneration - tenured (old object) space (markSweepCompact) |
|
51 |
// - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...) |
|
52 |
// - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation |
|
53 |
// (Detlefs-Printezis refinement of |
|
54 |
// Boehm-Demers-Schenker) |
|
55 |
// |
|
56 |
// The system configurations currently allowed are: |
|
57 |
// |
|
58 |
// DefNewGeneration + TenuredGeneration + PermGeneration |
|
59 |
// DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen |
|
60 |
// |
|
61 |
// ParNewGeneration + TenuredGeneration + PermGeneration |
|
62 |
// ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen |
|
63 |
// |
|
64 |
||
65 |
class DefNewGeneration; |
|
66 |
class GenerationSpec; |
|
67 |
class CompactibleSpace; |
|
68 |
class ContiguousSpace; |
|
69 |
class CompactPoint; |
|
70 |
class OopsInGenClosure; |
|
71 |
class OopClosure; |
|
72 |
class ScanClosure; |
|
73 |
class FastScanClosure; |
|
74 |
class GenCollectedHeap; |
|
75 |
class GenRemSet; |
|
76 |
class GCStats; |
|
77 |
||
78 |
// A "ScratchBlock" represents a block of memory in one generation usable by |
|
79 |
// another. It represents "num_words" free words, starting at and including |
|
80 |
// the address of "this". |
|
81 |
struct ScratchBlock { |
|
82 |
ScratchBlock* next; |
|
83 |
size_t num_words; |
|
84 |
HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming |
|
85 |
// first two fields are word-sized.) |
|
86 |
}; |
|
87 |
||
88 |
||
89 |
class Generation: public CHeapObj { |
|
90 |
friend class VMStructs; |
|
91 |
private: |
|
92 |
jlong _time_of_last_gc; // time when last gc on this generation happened (ms) |
|
93 |
MemRegion _prev_used_region; // for collectors that want to "remember" a value for |
|
94 |
// used region at some specific point during collection. |
|
95 |
||
96 |
protected: |
|
97 |
// Minimum and maximum addresses for memory reserved (not necessarily |
|
98 |
// committed) for generation. |
|
99 |
// Used by card marking code. Must not overlap with address ranges of |
|
100 |
// other generations. |
|
101 |
MemRegion _reserved; |
|
102 |
||
103 |
// Memory area reserved for generation |
|
104 |
VirtualSpace _virtual_space; |
|
105 |
||
106 |
// Level in the generation hierarchy. |
|
107 |
int _level; |
|
108 |
||
109 |
// ("Weak") Reference processing support |
|
110 |
ReferenceProcessor* _ref_processor; |
|
111 |
||
112 |
// Performance Counters |
|
113 |
CollectorCounters* _gc_counters; |
|
114 |
||
115 |
// Statistics for garbage collection |
|
116 |
GCStats* _gc_stats; |
|
117 |
||
118 |
// Returns the next generation in the configuration, or else NULL if this |
|
119 |
// is the highest generation. |
|
120 |
Generation* next_gen() const; |
|
121 |
||
122 |
// Initialize the generation. |
|
123 |
Generation(ReservedSpace rs, size_t initial_byte_size, int level); |
|
124 |
||
125 |
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in |
|
126 |
// "sp" that point into younger generations. |
|
127 |
// The iteration is only over objects allocated at the start of the |
|
128 |
// iterations; objects allocated as a result of applying the closure are |
|
129 |
// not included. |
|
130 |
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); |
|
131 |
||
132 |
public: |
|
133 |
// The set of possible generation kinds. |
|
134 |
enum Name { |
|
135 |
ASParNew, |
|
136 |
ASConcurrentMarkSweep, |
|
137 |
DefNew, |
|
138 |
ParNew, |
|
139 |
MarkSweepCompact, |
|
140 |
ConcurrentMarkSweep, |
|
141 |
Other |
|
142 |
}; |
|
143 |
||
144 |
enum SomePublicConstants { |
|
145 |
// Generations are GenGrain-aligned and have size that are multiples of |
|
146 |
// GenGrain. |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
147 |
// Note: on ARM we add 1 bit for card_table_base to be properly aligned |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
148 |
// (we expect its low byte to be zero - see implementation of post_barrier) |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
149 |
LogOfGenGrain = 16 ARM_ONLY(+1), |
1 | 150 |
GenGrain = 1 << LogOfGenGrain |
151 |
}; |
|
152 |
||
153 |
// allocate and initialize ("weak") refs processing support |
|
154 |
virtual void ref_processor_init(); |
|
155 |
void set_ref_processor(ReferenceProcessor* rp) { |
|
156 |
assert(_ref_processor == NULL, "clobbering existing _ref_processor"); |
|
157 |
_ref_processor = rp; |
|
158 |
} |
|
159 |
||
160 |
virtual Generation::Name kind() { return Generation::Other; } |
|
161 |
GenerationSpec* spec(); |
|
162 |
||
163 |
// This properly belongs in the collector, but for now this |
|
164 |
// will do. |
|
165 |
virtual bool refs_discovery_is_atomic() const { return true; } |
|
166 |
virtual bool refs_discovery_is_mt() const { return false; } |
|
167 |
||
168 |
// Space enquiries (results in bytes) |
|
169 |
virtual size_t capacity() const = 0; // The maximum number of object bytes the |
|
170 |
// generation can currently hold. |
|
171 |
virtual size_t used() const = 0; // The number of used bytes in the gen. |
|
172 |
virtual size_t free() const = 0; // The number of free bytes in the gen. |
|
173 |
||
174 |
// Support for java.lang.Runtime.maxMemory(); see CollectedHeap. |
|
175 |
// Returns the total number of bytes available in a generation |
|
176 |
// for the allocation of objects. |
|
177 |
virtual size_t max_capacity() const; |
|
178 |
||
179 |
// If this is a young generation, the maximum number of bytes that can be |
|
180 |
// allocated in this generation before a GC is triggered. |
|
181 |
virtual size_t capacity_before_gc() const { return 0; } |
|
182 |
||
183 |
// The largest number of contiguous free bytes in the generation, |
|
184 |
// including expansion (Assumes called at a safepoint.) |
|
185 |
virtual size_t contiguous_available() const = 0; |
|
186 |
// The largest number of contiguous free bytes in this or any higher generation. |
|
187 |
virtual size_t max_contiguous_available() const; |
|
188 |
||
6985
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
189 |
// Returns true if promotions of the specified amount are |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
190 |
// likely to succeed without a promotion failure. |
1 | 191 |
// Promotion of the full amount is not guaranteed but |
6985
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
192 |
// might be attempted in the worst case. |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
193 |
virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; |
1 | 194 |
|
4574
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
195 |
// For a non-young generation, this interface can be used to inform a |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
196 |
// generation that a promotion attempt into that generation failed. |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
197 |
// Typically used to enable diagnostic output for post-mortem analysis, |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
198 |
// but other uses of the interface are not ruled out. |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
199 |
virtual void promotion_failure_occurred() { /* does nothing */ } |
b2d5b0975515
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
2105
diff
changeset
|
200 |
|
1 | 201 |
// Return an estimate of the maximum allocation that could be performed |
202 |
// in the generation without triggering any collection or expansion |
|
203 |
// activity. It is "unsafe" because no locks are taken; the result |
|
204 |
// should be treated as an approximation, not a guarantee, for use in |
|
205 |
// heuristic resizing decisions. |
|
206 |
virtual size_t unsafe_max_alloc_nogc() const = 0; |
|
207 |
||
208 |
// Returns true if this generation cannot be expanded further |
|
209 |
// without a GC. Override as appropriate. |
|
210 |
virtual bool is_maximal_no_gc() const { |
|
211 |
return _virtual_space.uncommitted_size() == 0; |
|
212 |
} |
|
213 |
||
214 |
MemRegion reserved() const { return _reserved; } |
|
215 |
||
216 |
// Returns a region guaranteed to contain all the objects in the |
|
217 |
// generation. |
|
218 |
virtual MemRegion used_region() const { return _reserved; } |
|
219 |
||
220 |
MemRegion prev_used_region() const { return _prev_used_region; } |
|
221 |
virtual void save_used_region() { _prev_used_region = used_region(); } |
|
222 |
||
223 |
// Returns "TRUE" iff "p" points into an allocated object in the generation. |
|
224 |
// For some kinds of generations, this may be an expensive operation. |
|
225 |
// To avoid performance problems stemming from its inadvertent use in |
|
226 |
// product jvm's, we restrict its use to assertion checking or |
|
227 |
// verification only. |
|
228 |
virtual bool is_in(const void* p) const; |
|
229 |
||
230 |
/* Returns "TRUE" iff "p" points into the reserved area of the generation. */ |
|
231 |
bool is_in_reserved(const void* p) const { |
|
232 |
return _reserved.contains(p); |
|
233 |
} |
|
234 |
||
235 |
// Check that the generation kind is DefNewGeneration or a sub |
|
236 |
// class of DefNewGeneration and return a DefNewGeneration* |
|
237 |
DefNewGeneration* as_DefNewGeneration(); |
|
238 |
||
239 |
// If some space in the generation contains the given "addr", return a |
|
240 |
// pointer to that space, else return "NULL". |
|
241 |
virtual Space* space_containing(const void* addr) const; |
|
242 |
||
243 |
// Iteration - do not use for time critical operations |
|
244 |
virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; |
|
245 |
||
246 |
// Returns the first space, if any, in the generation that can participate |
|
247 |
// in compaction, or else "NULL". |
|
248 |
virtual CompactibleSpace* first_compaction_space() const = 0; |
|
249 |
||
250 |
// Returns "true" iff this generation should be used to allocate an |
|
251 |
// object of the given size. Young generations might |
|
252 |
// wish to exclude very large objects, for example, since, if allocated |
|
253 |
// often, they would greatly increase the frequency of young-gen |
|
254 |
// collection. |
|
255 |
virtual bool should_allocate(size_t word_size, bool is_tlab) { |
|
256 |
bool result = false; |
|
257 |
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); |
|
258 |
if (!is_tlab || supports_tlab_allocation()) { |
|
259 |
result = (word_size > 0) && (word_size < overflow_limit); |
|
260 |
} |
|
261 |
return result; |
|
262 |
} |
|
263 |
||
264 |
// Allocate and returns a block of the requested size, or returns "NULL". |
|
265 |
// Assumes the caller has done any necessary locking. |
|
266 |
virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; |
|
267 |
||
268 |
// Like "allocate", but performs any necessary locking internally. |
|
269 |
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; |
|
270 |
||
271 |
// A 'younger' gen has reached an allocation limit, and uses this to notify |
|
272 |
// the next older gen. The return value is a new limit, or NULL if none. The |
|
273 |
// caller must do the necessary locking. |
|
274 |
virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top, |
|
275 |
size_t word_size) { |
|
276 |
return NULL; |
|
277 |
} |
|
278 |
||
279 |
// Some generation may offer a region for shared, contiguous allocation, |
|
280 |
// via inlined code (by exporting the address of the top and end fields |
|
281 |
// defining the extent of the contiguous allocation region.) |
|
282 |
||
283 |
// This function returns "true" iff the heap supports this kind of |
|
284 |
// allocation. (More precisely, this means the style of allocation that |
|
285 |
// increments *top_addr()" with a CAS.) (Default is "no".) |
|
286 |
// A generation that supports this allocation style must use lock-free |
|
287 |
// allocation for *all* allocation, since there are times when lock free |
|
288 |
// allocation will be concurrent with plain "allocate" calls. |
|
289 |
virtual bool supports_inline_contig_alloc() const { return false; } |
|
290 |
||
291 |
// These functions return the addresses of the fields that define the |
|
292 |
// boundaries of the contiguous allocation area. (These fields should be |
|
293 |
// physicall near to one another.) |
|
294 |
virtual HeapWord** top_addr() const { return NULL; } |
|
295 |
virtual HeapWord** end_addr() const { return NULL; } |
|
296 |
||
297 |
// Thread-local allocation buffers |
|
298 |
virtual bool supports_tlab_allocation() const { return false; } |
|
299 |
virtual size_t tlab_capacity() const { |
|
300 |
guarantee(false, "Generation doesn't support thread local allocation buffers"); |
|
301 |
return 0; |
|
302 |
} |
|
303 |
virtual size_t unsafe_max_tlab_alloc() const { |
|
304 |
guarantee(false, "Generation doesn't support thread local allocation buffers"); |
|
305 |
return 0; |
|
306 |
} |
|
307 |
||
308 |
// "obj" is the address of an object in a younger generation. Allocate space |
|
309 |
// for "obj" in the current (or some higher) generation, and copy "obj" into |
|
310 |
// the newly allocated space, if possible, returning the result (or NULL if |
|
311 |
// the allocation failed). |
|
312 |
// |
|
313 |
// The "obj_size" argument is just obj->size(), passed along so the caller can |
|
314 |
// avoid repeating the virtual call to retrieve it. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
315 |
virtual oop promote(oop obj, size_t obj_size); |
1 | 316 |
|
317 |
// Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote |
|
318 |
// object "obj", whose original mark word was "m", and whose size is |
|
319 |
// "word_sz". If possible, allocate space for "obj", copy obj into it |
|
320 |
// (taking care to copy "m" into the mark word when done, since the mark |
|
321 |
// word of "obj" may have been overwritten with a forwarding pointer, and |
|
322 |
// also taking care to copy the klass pointer *last*. Returns the new |
|
323 |
// object if successful, or else NULL. |
|
324 |
virtual oop par_promote(int thread_num, |
|
325 |
oop obj, markOop m, size_t word_sz); |
|
326 |
||
327 |
// Undo, if possible, the most recent par_promote_alloc allocation by |
|
328 |
// "thread_num" ("obj", of "word_sz"). |
|
329 |
virtual void par_promote_alloc_undo(int thread_num, |
|
330 |
HeapWord* obj, size_t word_sz); |
|
331 |
||
332 |
// Informs the current generation that all par_promote_alloc's in the |
|
333 |
// collection have been completed; any supporting data structures can be |
|
334 |
// reset. Default is to do nothing. |
|
335 |
virtual void par_promote_alloc_done(int thread_num) {} |
|
336 |
||
337 |
// Informs the current generation that all oop_since_save_marks_iterates |
|
338 |
// performed by "thread_num" in the current collection, if any, have been |
|
339 |
// completed; any supporting data structures can be reset. Default is to |
|
340 |
// do nothing. |
|
341 |
virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} |
|
342 |
||
343 |
// This generation will collect all younger generations |
|
344 |
// during a full collection. |
|
345 |
virtual bool full_collects_younger_generations() const { return false; } |
|
346 |
||
347 |
// This generation does in-place marking, meaning that mark words |
|
348 |
// are mutated during the marking phase and presumably reinitialized |
|
349 |
// to a canonical value after the GC. This is currently used by the |
|
350 |
// biased locking implementation to determine whether additional |
|
351 |
// work is required during the GC prologue and epilogue. |
|
352 |
virtual bool performs_in_place_marking() const { return true; } |
|
353 |
||
354 |
// Returns "true" iff collect() should subsequently be called on this |
|
355 |
// this generation. See comment below. |
|
356 |
// This is a generic implementation which can be overridden. |
|
357 |
// |
|
358 |
// Note: in the current (1.4) implementation, when genCollectedHeap's |
|
359 |
// incremental_collection_will_fail flag is set, all allocations are |
|
360 |
// slow path (the only fast-path place to allocate is DefNew, which |
|
361 |
// will be full if the flag is set). |
|
362 |
// Thus, older generations which collect younger generations should |
|
363 |
// test this flag and collect if it is set. |
|
364 |
virtual bool should_collect(bool full, |
|
365 |
size_t word_size, |
|
366 |
bool is_tlab) { |
|
367 |
return (full || should_allocate(word_size, is_tlab)); |
|
368 |
} |
|
369 |
||
6985
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
370 |
// Returns true if the collection is likely to be safely |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
371 |
// completed. Even if this method returns true, a collection |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
372 |
// may not be guaranteed to succeed, and the system should be |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
373 |
// able to safely unwind and recover from that failure, albeit |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
374 |
// at some additional cost. |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
375 |
virtual bool collection_attempt_is_safe() { |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
376 |
guarantee(false, "Are you sure you want to call this method?"); |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
377 |
return true; |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
378 |
} |
e9364ec299ac
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
6176
diff
changeset
|
379 |
|
1 | 380 |
// Perform a garbage collection. |
381 |
// If full is true attempt a full garbage collection of this generation. |
|
382 |
// Otherwise, attempting to (at least) free enough space to support an |
|
383 |
// allocation of the given "word_size". |
|
384 |
virtual void collect(bool full, |
|
385 |
bool clear_all_soft_refs, |
|
386 |
size_t word_size, |
|
387 |
bool is_tlab) = 0; |
|
388 |
||
389 |
// Perform a heap collection, attempting to create (at least) enough |
|
390 |
// space to support an allocation of the given "word_size". If |
|
391 |
// successful, perform the allocation and return the resulting |
|
392 |
// "oop" (initializing the allocated block). If the allocation is |
|
393 |
// still unsuccessful, return "NULL". |
|
394 |
virtual HeapWord* expand_and_allocate(size_t word_size, |
|
395 |
bool is_tlab, |
|
396 |
bool parallel = false) = 0; |
|
397 |
||
398 |
// Some generations may require some cleanup or preparation actions before |
|
399 |
// allowing a collection. The default is to do nothing. |
|
400 |
virtual void gc_prologue(bool full) {}; |
|
401 |
||
402 |
// Some generations may require some cleanup actions after a collection. |
|
403 |
// The default is to do nothing. |
|
404 |
virtual void gc_epilogue(bool full) {}; |
|
405 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
406 |
// Save the high water marks for the used space in a generation. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
407 |
virtual void record_spaces_top() {}; |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
408 |
|
1 | 409 |
// Some generations may need to be "fixed-up" after some allocation |
410 |
// activity to make them parsable again. The default is to do nothing. |
|
411 |
virtual void ensure_parsability() {}; |
|
412 |
||
413 |
// Time (in ms) when we were last collected or now if a collection is |
|
414 |
// in progress. |
|
415 |
virtual jlong time_of_last_gc(jlong now) { |
|
416 |
// XXX See note in genCollectedHeap::millis_since_last_gc() |
|
417 |
NOT_PRODUCT( |
|
418 |
if (now < _time_of_last_gc) { |
|
419 |
warning("time warp: %d to %d", _time_of_last_gc, now); |
|
420 |
} |
|
421 |
) |
|
422 |
return _time_of_last_gc; |
|
423 |
} |
|
424 |
||
425 |
virtual void update_time_of_last_gc(jlong now) { |
|
426 |
_time_of_last_gc = now; |
|
427 |
} |
|
428 |
||
429 |
// Generations may keep statistics about collection. This |
|
430 |
// method updates those statistics. current_level is |
|
431 |
// the level of the collection that has most recently |
|
432 |
// occurred. This allows the generation to decide what |
|
433 |
// statistics are valid to collect. For example, the |
|
434 |
// generation can decide to gather the amount of promoted data |
|
435 |
// if the collection of the younger generations has completed. |
|
436 |
GCStats* gc_stats() const { return _gc_stats; } |
|
437 |
virtual void update_gc_stats(int current_level, bool full) {} |
|
438 |
||
439 |
// Mark sweep support phase2 |
|
440 |
virtual void prepare_for_compaction(CompactPoint* cp); |
|
441 |
// Mark sweep support phase3 |
|
442 |
virtual void pre_adjust_pointers() {ShouldNotReachHere();} |
|
443 |
virtual void adjust_pointers(); |
|
444 |
// Mark sweep support phase4 |
|
445 |
virtual void compact(); |
|
446 |
virtual void post_compact() {ShouldNotReachHere();} |
|
447 |
||
448 |
// Support for CMS's rescan. In this general form we return a pointer |
|
449 |
// to an abstract object that can be used, based on specific previously |
|
450 |
// decided protocols, to exchange information between generations, |
|
451 |
// information that may be useful for speeding up certain types of |
|
452 |
// garbage collectors. A NULL value indicates to the client that |
|
453 |
// no data recording is expected by the provider. The data-recorder is |
|
454 |
// expected to be GC worker thread-local, with the worker index |
|
455 |
// indicated by "thr_num". |
|
456 |
virtual void* get_data_recorder(int thr_num) { return NULL; } |
|
457 |
||
458 |
// Some generations may require some cleanup actions before allowing |
|
459 |
// a verification. |
|
460 |
virtual void prepare_for_verify() {}; |
|
461 |
||
462 |
// Accessing "marks". |
|
463 |
||
464 |
// This function gives a generation a chance to note a point between |
|
465 |
// collections. For example, a contiguous generation might note the |
|
466 |
// beginning allocation point post-collection, which might allow some later |
|
467 |
// operations to be optimized. |
|
468 |
virtual void save_marks() {} |
|
469 |
||
470 |
// This function allows generations to initialize any "saved marks". That |
|
471 |
// is, should only be called when the generation is empty. |
|
472 |
virtual void reset_saved_marks() {} |
|
473 |
||
474 |
// This function is "true" iff any no allocations have occurred in the |
|
475 |
// generation since the last call to "save_marks". |
|
476 |
virtual bool no_allocs_since_save_marks() = 0; |
|
477 |
||
478 |
// Apply "cl->apply" to (the addresses of) all reference fields in objects |
|
479 |
// allocated in the current generation since the last call to "save_marks". |
|
480 |
// If more objects are allocated in this generation as a result of applying |
|
481 |
// the closure, iterates over reference fields in those objects as well. |
|
482 |
// Calls "save_marks" at the end of the iteration. |
|
483 |
// General signature... |
|
484 |
virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; |
|
485 |
// ...and specializations for de-virtualization. (The general |
|
486 |
// implemention of the _nv versions call the virtual version. |
|
487 |
// Note that the _nv suffix is not really semantically necessary, |
|
488 |
// but it avoids some not-so-useful warnings on Solaris.) |
|
489 |
#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
490 |
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ |
|
491 |
oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ |
|
492 |
} |
|
493 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) |
|
494 |
||
495 |
#undef Generation_SINCE_SAVE_MARKS_DECL |
|
496 |
||
497 |
// The "requestor" generation is performing some garbage collection |
|
498 |
// action for which it would be useful to have scratch space. If |
|
499 |
// the target is not the requestor, no gc actions will be required |
|
500 |
// of the target. The requestor promises to allocate no more than |
|
501 |
// "max_alloc_words" in the target generation (via promotion say, |
|
502 |
// if the requestor is a young generation and the target is older). |
|
503 |
// If the target generation can provide any scratch space, it adds |
|
504 |
// it to "list", leaving "list" pointing to the head of the |
|
505 |
// augmented list. The default is to offer no space. |
|
506 |
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, |
|
507 |
size_t max_alloc_words) {} |
|
508 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
509 |
// Give each generation an opportunity to do clean up for any |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
510 |
// contributed scratch. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
511 |
virtual void reset_scratch() {}; |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
512 |
|
1 | 513 |
// When an older generation has been collected, and perhaps resized, |
514 |
// this method will be invoked on all younger generations (from older to |
|
515 |
// younger), allowing them to resize themselves as appropriate. |
|
516 |
virtual void compute_new_size() = 0; |
|
517 |
||
518 |
// Printing |
|
519 |
virtual const char* name() const = 0; |
|
520 |
virtual const char* short_name() const = 0; |
|
521 |
||
522 |
int level() const { return _level; } |
|
523 |
||
524 |
// Attributes |
|
525 |
||
526 |
// True iff the given generation may only be the youngest generation. |
|
527 |
virtual bool must_be_youngest() const = 0; |
|
528 |
// True iff the given generation may only be the oldest generation. |
|
529 |
virtual bool must_be_oldest() const = 0; |
|
530 |
||
531 |
// Reference Processing accessor |
|
532 |
ReferenceProcessor* const ref_processor() { return _ref_processor; } |
|
533 |
||
534 |
// Iteration. |
|
535 |
||
536 |
// Iterate over all the ref-containing fields of all objects in the |
|
537 |
// generation, calling "cl.do_oop" on each. |
|
538 |
virtual void oop_iterate(OopClosure* cl); |
|
539 |
||
540 |
// Same as above, restricted to the intersection of a memory region and |
|
541 |
// the generation. |
|
542 |
virtual void oop_iterate(MemRegion mr, OopClosure* cl); |
|
543 |
||
544 |
// Iterate over all objects in the generation, calling "cl.do_object" on |
|
545 |
// each. |
|
546 |
virtual void object_iterate(ObjectClosure* cl); |
|
547 |
||
1893
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
548 |
// Iterate over all safe objects in the generation, calling "cl.do_object" on |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
549 |
// each. An object is safe if its references point to other objects in |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
550 |
// the heap. This defaults to object_iterate() unless overridden. |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
551 |
virtual void safe_object_iterate(ObjectClosure* cl); |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
552 |
|
1 | 553 |
// Iterate over all objects allocated in the generation since the last |
554 |
// collection, calling "cl.do_object" on each. The generation must have |
|
555 |
// been initialized properly to support this function, or else this call |
|
556 |
// will fail. |
|
557 |
virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0; |
|
558 |
||
559 |
// Apply "cl->do_oop" to (the address of) all and only all the ref fields |
|
560 |
// in the current generation that contain pointers to objects in younger |
|
561 |
// generations. Objects allocated since the last "save_marks" call are |
|
562 |
// excluded. |
|
563 |
virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; |
|
564 |
||
565 |
// Inform a generation that it longer contains references to objects |
|
566 |
// in any younger generation. [e.g. Because younger gens are empty, |
|
567 |
// clear the card table.] |
|
568 |
virtual void clear_remembered_set() { } |
|
569 |
||
570 |
// Inform a generation that some of its objects have moved. [e.g. The |
|
571 |
// generation's spaces were compacted, invalidating the card table.] |
|
572 |
virtual void invalidate_remembered_set() { } |
|
573 |
||
574 |
// Block abstraction. |
|
575 |
||
576 |
// Returns the address of the start of the "block" that contains the |
|
577 |
// address "addr". We say "blocks" instead of "object" since some heaps |
|
578 |
// may not pack objects densely; a chunk may either be an object or a |
|
579 |
// non-object. |
|
580 |
virtual HeapWord* block_start(const void* addr) const; |
|
581 |
||
582 |
// Requires "addr" to be the start of a chunk, and returns its size. |
|
583 |
// "addr + size" is required to be the start of a new chunk, or the end |
|
584 |
// of the active area of the heap. |
|
585 |
virtual size_t block_size(const HeapWord* addr) const ; |
|
586 |
||
587 |
// Requires "addr" to be the start of a block, and returns "TRUE" iff |
|
588 |
// the block is an object. |
|
589 |
virtual bool block_is_obj(const HeapWord* addr) const; |
|
590 |
||
591 |
||
592 |
// PrintGC, PrintGCDetails support |
|
593 |
void print_heap_change(size_t prev_used) const; |
|
594 |
||
595 |
// PrintHeapAtGC support |
|
596 |
virtual void print() const; |
|
597 |
virtual void print_on(outputStream* st) const; |
|
598 |
||
599 |
virtual void verify(bool allow_dirty) = 0; |
|
600 |
||
601 |
struct StatRecord { |
|
602 |
int invocations; |
|
603 |
elapsedTimer accumulated_time; |
|
604 |
StatRecord() : |
|
605 |
invocations(0), |
|
606 |
accumulated_time(elapsedTimer()) {} |
|
607 |
}; |
|
608 |
private: |
|
609 |
StatRecord _stat_record; |
|
610 |
public: |
|
611 |
StatRecord* stat_record() { return &_stat_record; } |
|
612 |
||
613 |
virtual void print_summary_info(); |
|
614 |
virtual void print_summary_info_on(outputStream* st); |
|
615 |
||
616 |
// Performance Counter support |
|
617 |
virtual void update_counters() = 0; |
|
618 |
virtual CollectorCounters* counters() { return _gc_counters; } |
|
619 |
}; |
|
620 |
||
621 |
// Class CardGeneration is a generation that is covered by a card table, |
|
622 |
// and uses a card-size block-offset array to implement block_start. |
|
623 |
||
624 |
// class BlockOffsetArray; |
|
625 |
// class BlockOffsetArrayContigSpace; |
|
626 |
class BlockOffsetSharedArray; |
|
627 |
||
628 |
class CardGeneration: public Generation { |
|
629 |
friend class VMStructs; |
|
630 |
protected: |
|
631 |
// This is shared with other generations. |
|
632 |
GenRemSet* _rs; |
|
633 |
// This is local to this generation. |
|
634 |
BlockOffsetSharedArray* _bts; |
|
635 |
||
636 |
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, |
|
637 |
GenRemSet* remset); |
|
638 |
||
639 |
public: |
|
640 |
||
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
641 |
// Attempt to expand the generation by "bytes". Expand by at a |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
642 |
// minimum "expand_bytes". Return true if some amount (not |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
643 |
// necessarily the full "bytes") was done. |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
644 |
virtual bool expand(size_t bytes, size_t expand_bytes); |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
645 |
|
1 | 646 |
virtual void clear_remembered_set(); |
647 |
||
648 |
virtual void invalidate_remembered_set(); |
|
649 |
||
650 |
virtual void prepare_for_verify(); |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
651 |
|
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
652 |
// Grow generation with specified size (returns false if unable to grow) |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
653 |
virtual bool grow_by(size_t bytes) = 0; |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
654 |
// Grow generation to reserved size. |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
655 |
virtual bool grow_to_reserved() = 0; |
1 | 656 |
}; |
657 |
||
658 |
// OneContigSpaceCardGeneration models a heap of old objects contained in a single |
|
659 |
// contiguous space. |
|
660 |
// |
|
661 |
// Garbage collection is performed using mark-compact. |
|
662 |
||
663 |
class OneContigSpaceCardGeneration: public CardGeneration { |
|
664 |
friend class VMStructs; |
|
665 |
// Abstractly, this is a subtype that gets access to protected fields. |
|
666 |
friend class CompactingPermGen; |
|
667 |
friend class VM_PopulateDumpSharedSpace; |
|
668 |
||
669 |
protected: |
|
670 |
size_t _min_heap_delta_bytes; // Minimum amount to expand. |
|
671 |
ContiguousSpace* _the_space; // actual space holding objects |
|
672 |
WaterMark _last_gc; // watermark between objects allocated before |
|
673 |
// and after last GC. |
|
674 |
||
675 |
// Grow generation with specified size (returns false if unable to grow) |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
676 |
virtual bool grow_by(size_t bytes); |
1 | 677 |
// Grow generation to reserved size. |
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
678 |
virtual bool grow_to_reserved(); |
1 | 679 |
// Shrink generation with specified size (returns false if unable to shrink) |
680 |
void shrink_by(size_t bytes); |
|
681 |
||
682 |
// Allocation failure |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
683 |
virtual bool expand(size_t bytes, size_t expand_bytes); |
1 | 684 |
void shrink(size_t bytes); |
685 |
||
686 |
// Accessing spaces |
|
687 |
ContiguousSpace* the_space() const { return _the_space; } |
|
688 |
||
689 |
public: |
|
690 |
OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, |
|
691 |
size_t min_heap_delta_bytes, |
|
692 |
int level, GenRemSet* remset, |
|
693 |
ContiguousSpace* space) : |
|
694 |
CardGeneration(rs, initial_byte_size, level, remset), |
|
695 |
_the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes) |
|
696 |
{} |
|
697 |
||
698 |
inline bool is_in(const void* p) const; |
|
699 |
||
700 |
// Space enquiries |
|
701 |
size_t capacity() const; |
|
702 |
size_t used() const; |
|
703 |
size_t free() const; |
|
704 |
||
705 |
MemRegion used_region() const; |
|
706 |
||
707 |
size_t unsafe_max_alloc_nogc() const; |
|
708 |
size_t contiguous_available() const; |
|
709 |
||
710 |
// Iteration |
|
711 |
void object_iterate(ObjectClosure* blk); |
|
712 |
void space_iterate(SpaceClosure* blk, bool usedOnly = false); |
|
713 |
void object_iterate_since_last_GC(ObjectClosure* cl); |
|
714 |
||
715 |
void younger_refs_iterate(OopsInGenClosure* blk); |
|
716 |
||
717 |
inline CompactibleSpace* first_compaction_space() const; |
|
718 |
||
719 |
virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); |
|
720 |
virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); |
|
721 |
||
722 |
// Accessing marks |
|
723 |
inline WaterMark top_mark(); |
|
724 |
inline WaterMark bottom_mark(); |
|
725 |
||
726 |
#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
727 |
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); |
|
728 |
OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) |
|
729 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) |
|
730 |
||
731 |
void save_marks(); |
|
732 |
void reset_saved_marks(); |
|
733 |
bool no_allocs_since_save_marks(); |
|
734 |
||
735 |
inline size_t block_size(const HeapWord* addr) const; |
|
736 |
||
737 |
inline bool block_is_obj(const HeapWord* addr) const; |
|
738 |
||
739 |
virtual void collect(bool full, |
|
740 |
bool clear_all_soft_refs, |
|
741 |
size_t size, |
|
742 |
bool is_tlab); |
|
743 |
HeapWord* expand_and_allocate(size_t size, |
|
744 |
bool is_tlab, |
|
745 |
bool parallel = false); |
|
746 |
||
747 |
virtual void prepare_for_verify(); |
|
748 |
||
749 |
virtual void gc_epilogue(bool full); |
|
750 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
751 |
virtual void record_spaces_top(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
752 |
|
1 | 753 |
virtual void verify(bool allow_dirty); |
754 |
virtual void print_on(outputStream* st) const; |
|
755 |
}; |
|
7397 | 756 |
|
757 |
#endif // SHARE_VM_MEMORY_GENERATION_HPP |