author | xdono |
Mon, 09 Mar 2009 13:28:46 -0700 | |
changeset 2105 | 347008ce7984 |
parent 1893 | c82e388e17c5 |
child 4574 | b2d5b0975515 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2105 | 2 |
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// A Generation models a heap area for similarly-aged objects. |
|
26 |
// It will contain one ore more spaces holding the actual objects. |
|
27 |
// |
|
28 |
// The Generation class hierarchy: |
|
29 |
// |
|
30 |
// Generation - abstract base class |
|
31 |
// - DefNewGeneration - allocation area (copy collected) |
|
32 |
// - ParNewGeneration - a DefNewGeneration that is collected by |
|
33 |
// several threads |
|
34 |
// - CardGeneration - abstract class adding offset array behavior |
|
35 |
// - OneContigSpaceCardGeneration - abstract class holding a single |
|
36 |
// contiguous space with card marking |
|
37 |
// - TenuredGeneration - tenured (old object) space (markSweepCompact) |
|
38 |
// - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...) |
|
39 |
// - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation |
|
40 |
// (Detlefs-Printezis refinement of |
|
41 |
// Boehm-Demers-Schenker) |
|
42 |
// |
|
43 |
// The system configurations currently allowed are: |
|
44 |
// |
|
45 |
// DefNewGeneration + TenuredGeneration + PermGeneration |
|
46 |
// DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen |
|
47 |
// |
|
48 |
// ParNewGeneration + TenuredGeneration + PermGeneration |
|
49 |
// ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen |
|
50 |
// |
|
51 |
||
52 |
class DefNewGeneration; |
|
53 |
class GenerationSpec; |
|
54 |
class CompactibleSpace; |
|
55 |
class ContiguousSpace; |
|
56 |
class CompactPoint; |
|
57 |
class OopsInGenClosure; |
|
58 |
class OopClosure; |
|
59 |
class ScanClosure; |
|
60 |
class FastScanClosure; |
|
61 |
class GenCollectedHeap; |
|
62 |
class GenRemSet; |
|
63 |
class GCStats; |
|
64 |
||
65 |
// A "ScratchBlock" represents a block of memory in one generation usable by |
|
66 |
// another. It represents "num_words" free words, starting at and including |
|
67 |
// the address of "this". |
|
68 |
struct ScratchBlock { |
|
69 |
ScratchBlock* next; |
|
70 |
size_t num_words; |
|
71 |
HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming |
|
72 |
// first two fields are word-sized.) |
|
73 |
}; |
|
74 |
||
75 |
||
76 |
class Generation: public CHeapObj { |
|
77 |
friend class VMStructs; |
|
78 |
private: |
|
79 |
jlong _time_of_last_gc; // time when last gc on this generation happened (ms) |
|
80 |
MemRegion _prev_used_region; // for collectors that want to "remember" a value for |
|
81 |
// used region at some specific point during collection. |
|
82 |
||
83 |
protected: |
|
84 |
// Minimum and maximum addresses for memory reserved (not necessarily |
|
85 |
// committed) for generation. |
|
86 |
// Used by card marking code. Must not overlap with address ranges of |
|
87 |
// other generations. |
|
88 |
MemRegion _reserved; |
|
89 |
||
90 |
// Memory area reserved for generation |
|
91 |
VirtualSpace _virtual_space; |
|
92 |
||
93 |
// Level in the generation hierarchy. |
|
94 |
int _level; |
|
95 |
||
96 |
// ("Weak") Reference processing support |
|
97 |
ReferenceProcessor* _ref_processor; |
|
98 |
||
99 |
// Performance Counters |
|
100 |
CollectorCounters* _gc_counters; |
|
101 |
||
102 |
// Statistics for garbage collection |
|
103 |
GCStats* _gc_stats; |
|
104 |
||
105 |
// Returns the next generation in the configuration, or else NULL if this |
|
106 |
// is the highest generation. |
|
107 |
Generation* next_gen() const; |
|
108 |
||
109 |
// Initialize the generation. |
|
110 |
Generation(ReservedSpace rs, size_t initial_byte_size, int level); |
|
111 |
||
112 |
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in |
|
113 |
// "sp" that point into younger generations. |
|
114 |
// The iteration is only over objects allocated at the start of the |
|
115 |
// iterations; objects allocated as a result of applying the closure are |
|
116 |
// not included. |
|
117 |
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); |
|
118 |
||
119 |
public: |
|
120 |
// The set of possible generation kinds. |
|
121 |
enum Name { |
|
122 |
ASParNew, |
|
123 |
ASConcurrentMarkSweep, |
|
124 |
DefNew, |
|
125 |
ParNew, |
|
126 |
MarkSweepCompact, |
|
127 |
ConcurrentMarkSweep, |
|
128 |
Other |
|
129 |
}; |
|
130 |
||
131 |
enum SomePublicConstants { |
|
132 |
// Generations are GenGrain-aligned and have size that are multiples of |
|
133 |
// GenGrain. |
|
134 |
LogOfGenGrain = 16, |
|
135 |
GenGrain = 1 << LogOfGenGrain |
|
136 |
}; |
|
137 |
||
138 |
// allocate and initialize ("weak") refs processing support |
|
139 |
virtual void ref_processor_init(); |
|
140 |
void set_ref_processor(ReferenceProcessor* rp) { |
|
141 |
assert(_ref_processor == NULL, "clobbering existing _ref_processor"); |
|
142 |
_ref_processor = rp; |
|
143 |
} |
|
144 |
||
145 |
virtual Generation::Name kind() { return Generation::Other; } |
|
146 |
GenerationSpec* spec(); |
|
147 |
||
148 |
// This properly belongs in the collector, but for now this |
|
149 |
// will do. |
|
150 |
virtual bool refs_discovery_is_atomic() const { return true; } |
|
151 |
virtual bool refs_discovery_is_mt() const { return false; } |
|
152 |
||
153 |
// Space enquiries (results in bytes) |
|
154 |
virtual size_t capacity() const = 0; // The maximum number of object bytes the |
|
155 |
// generation can currently hold. |
|
156 |
virtual size_t used() const = 0; // The number of used bytes in the gen. |
|
157 |
virtual size_t free() const = 0; // The number of free bytes in the gen. |
|
158 |
||
159 |
// Support for java.lang.Runtime.maxMemory(); see CollectedHeap. |
|
160 |
// Returns the total number of bytes available in a generation |
|
161 |
// for the allocation of objects. |
|
162 |
virtual size_t max_capacity() const; |
|
163 |
||
164 |
// If this is a young generation, the maximum number of bytes that can be |
|
165 |
// allocated in this generation before a GC is triggered. |
|
166 |
virtual size_t capacity_before_gc() const { return 0; } |
|
167 |
||
168 |
// The largest number of contiguous free bytes in the generation, |
|
169 |
// including expansion (Assumes called at a safepoint.) |
|
170 |
virtual size_t contiguous_available() const = 0; |
|
171 |
// The largest number of contiguous free bytes in this or any higher generation. |
|
172 |
virtual size_t max_contiguous_available() const; |
|
173 |
||
174 |
// Returns true if promotions of the specified amount can |
|
175 |
// be attempted safely (without a vm failure). |
|
176 |
// Promotion of the full amount is not guaranteed but |
|
177 |
// can be attempted. |
|
178 |
// younger_handles_promotion_failure |
|
179 |
// is true if the younger generation handles a promotion |
|
180 |
// failure. |
|
181 |
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, |
|
182 |
bool younger_handles_promotion_failure) const; |
|
183 |
||
184 |
// Return an estimate of the maximum allocation that could be performed |
|
185 |
// in the generation without triggering any collection or expansion |
|
186 |
// activity. It is "unsafe" because no locks are taken; the result |
|
187 |
// should be treated as an approximation, not a guarantee, for use in |
|
188 |
// heuristic resizing decisions. |
|
189 |
virtual size_t unsafe_max_alloc_nogc() const = 0; |
|
190 |
||
191 |
// Returns true if this generation cannot be expanded further |
|
192 |
// without a GC. Override as appropriate. |
|
193 |
virtual bool is_maximal_no_gc() const { |
|
194 |
return _virtual_space.uncommitted_size() == 0; |
|
195 |
} |
|
196 |
||
197 |
MemRegion reserved() const { return _reserved; } |
|
198 |
||
199 |
// Returns a region guaranteed to contain all the objects in the |
|
200 |
// generation. |
|
201 |
virtual MemRegion used_region() const { return _reserved; } |
|
202 |
||
203 |
MemRegion prev_used_region() const { return _prev_used_region; } |
|
204 |
virtual void save_used_region() { _prev_used_region = used_region(); } |
|
205 |
||
206 |
// Returns "TRUE" iff "p" points into an allocated object in the generation. |
|
207 |
// For some kinds of generations, this may be an expensive operation. |
|
208 |
// To avoid performance problems stemming from its inadvertent use in |
|
209 |
// product jvm's, we restrict its use to assertion checking or |
|
210 |
// verification only. |
|
211 |
virtual bool is_in(const void* p) const; |
|
212 |
||
213 |
/* Returns "TRUE" iff "p" points into the reserved area of the generation. */ |
|
214 |
bool is_in_reserved(const void* p) const { |
|
215 |
return _reserved.contains(p); |
|
216 |
} |
|
217 |
||
218 |
// Check that the generation kind is DefNewGeneration or a sub |
|
219 |
// class of DefNewGeneration and return a DefNewGeneration* |
|
220 |
DefNewGeneration* as_DefNewGeneration(); |
|
221 |
||
222 |
// If some space in the generation contains the given "addr", return a |
|
223 |
// pointer to that space, else return "NULL". |
|
224 |
virtual Space* space_containing(const void* addr) const; |
|
225 |
||
226 |
// Iteration - do not use for time critical operations |
|
227 |
virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; |
|
228 |
||
229 |
// Returns the first space, if any, in the generation that can participate |
|
230 |
// in compaction, or else "NULL". |
|
231 |
virtual CompactibleSpace* first_compaction_space() const = 0; |
|
232 |
||
233 |
// Returns "true" iff this generation should be used to allocate an |
|
234 |
// object of the given size. Young generations might |
|
235 |
// wish to exclude very large objects, for example, since, if allocated |
|
236 |
// often, they would greatly increase the frequency of young-gen |
|
237 |
// collection. |
|
238 |
virtual bool should_allocate(size_t word_size, bool is_tlab) { |
|
239 |
bool result = false; |
|
240 |
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); |
|
241 |
if (!is_tlab || supports_tlab_allocation()) { |
|
242 |
result = (word_size > 0) && (word_size < overflow_limit); |
|
243 |
} |
|
244 |
return result; |
|
245 |
} |
|
246 |
||
247 |
// Allocate and returns a block of the requested size, or returns "NULL". |
|
248 |
// Assumes the caller has done any necessary locking. |
|
249 |
virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; |
|
250 |
||
251 |
// Like "allocate", but performs any necessary locking internally. |
|
252 |
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; |
|
253 |
||
254 |
// A 'younger' gen has reached an allocation limit, and uses this to notify |
|
255 |
// the next older gen. The return value is a new limit, or NULL if none. The |
|
256 |
// caller must do the necessary locking. |
|
257 |
virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top, |
|
258 |
size_t word_size) { |
|
259 |
return NULL; |
|
260 |
} |
|
261 |
||
262 |
// Some generation may offer a region for shared, contiguous allocation, |
|
263 |
// via inlined code (by exporting the address of the top and end fields |
|
264 |
// defining the extent of the contiguous allocation region.) |
|
265 |
||
266 |
// This function returns "true" iff the heap supports this kind of |
|
267 |
// allocation. (More precisely, this means the style of allocation that |
|
268 |
// increments *top_addr()" with a CAS.) (Default is "no".) |
|
269 |
// A generation that supports this allocation style must use lock-free |
|
270 |
// allocation for *all* allocation, since there are times when lock free |
|
271 |
// allocation will be concurrent with plain "allocate" calls. |
|
272 |
virtual bool supports_inline_contig_alloc() const { return false; } |
|
273 |
||
274 |
// These functions return the addresses of the fields that define the |
|
275 |
// boundaries of the contiguous allocation area. (These fields should be |
|
276 |
// physicall near to one another.) |
|
277 |
virtual HeapWord** top_addr() const { return NULL; } |
|
278 |
virtual HeapWord** end_addr() const { return NULL; } |
|
279 |
||
280 |
// Thread-local allocation buffers |
|
281 |
virtual bool supports_tlab_allocation() const { return false; } |
|
282 |
virtual size_t tlab_capacity() const { |
|
283 |
guarantee(false, "Generation doesn't support thread local allocation buffers"); |
|
284 |
return 0; |
|
285 |
} |
|
286 |
virtual size_t unsafe_max_tlab_alloc() const { |
|
287 |
guarantee(false, "Generation doesn't support thread local allocation buffers"); |
|
288 |
return 0; |
|
289 |
} |
|
290 |
||
291 |
// "obj" is the address of an object in a younger generation. Allocate space |
|
292 |
// for "obj" in the current (or some higher) generation, and copy "obj" into |
|
293 |
// the newly allocated space, if possible, returning the result (or NULL if |
|
294 |
// the allocation failed). |
|
295 |
// |
|
296 |
// The "obj_size" argument is just obj->size(), passed along so the caller can |
|
297 |
// avoid repeating the virtual call to retrieve it. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
298 |
virtual oop promote(oop obj, size_t obj_size); |
1 | 299 |
|
300 |
// Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote |
|
301 |
// object "obj", whose original mark word was "m", and whose size is |
|
302 |
// "word_sz". If possible, allocate space for "obj", copy obj into it |
|
303 |
// (taking care to copy "m" into the mark word when done, since the mark |
|
304 |
// word of "obj" may have been overwritten with a forwarding pointer, and |
|
305 |
// also taking care to copy the klass pointer *last*. Returns the new |
|
306 |
// object if successful, or else NULL. |
|
307 |
virtual oop par_promote(int thread_num, |
|
308 |
oop obj, markOop m, size_t word_sz); |
|
309 |
||
310 |
// Undo, if possible, the most recent par_promote_alloc allocation by |
|
311 |
// "thread_num" ("obj", of "word_sz"). |
|
312 |
virtual void par_promote_alloc_undo(int thread_num, |
|
313 |
HeapWord* obj, size_t word_sz); |
|
314 |
||
315 |
// Informs the current generation that all par_promote_alloc's in the |
|
316 |
// collection have been completed; any supporting data structures can be |
|
317 |
// reset. Default is to do nothing. |
|
318 |
virtual void par_promote_alloc_done(int thread_num) {} |
|
319 |
||
320 |
// Informs the current generation that all oop_since_save_marks_iterates |
|
321 |
// performed by "thread_num" in the current collection, if any, have been |
|
322 |
// completed; any supporting data structures can be reset. Default is to |
|
323 |
// do nothing. |
|
324 |
virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} |
|
325 |
||
326 |
// This generation will collect all younger generations |
|
327 |
// during a full collection. |
|
328 |
virtual bool full_collects_younger_generations() const { return false; } |
|
329 |
||
330 |
// This generation does in-place marking, meaning that mark words |
|
331 |
// are mutated during the marking phase and presumably reinitialized |
|
332 |
// to a canonical value after the GC. This is currently used by the |
|
333 |
// biased locking implementation to determine whether additional |
|
334 |
// work is required during the GC prologue and epilogue. |
|
335 |
virtual bool performs_in_place_marking() const { return true; } |
|
336 |
||
337 |
// Returns "true" iff collect() should subsequently be called on this |
|
338 |
// this generation. See comment below. |
|
339 |
// This is a generic implementation which can be overridden. |
|
340 |
// |
|
341 |
// Note: in the current (1.4) implementation, when genCollectedHeap's |
|
342 |
// incremental_collection_will_fail flag is set, all allocations are |
|
343 |
// slow path (the only fast-path place to allocate is DefNew, which |
|
344 |
// will be full if the flag is set). |
|
345 |
// Thus, older generations which collect younger generations should |
|
346 |
// test this flag and collect if it is set. |
|
347 |
virtual bool should_collect(bool full, |
|
348 |
size_t word_size, |
|
349 |
bool is_tlab) { |
|
350 |
return (full || should_allocate(word_size, is_tlab)); |
|
351 |
} |
|
352 |
||
353 |
// Perform a garbage collection. |
|
354 |
// If full is true attempt a full garbage collection of this generation. |
|
355 |
// Otherwise, attempting to (at least) free enough space to support an |
|
356 |
// allocation of the given "word_size". |
|
357 |
virtual void collect(bool full, |
|
358 |
bool clear_all_soft_refs, |
|
359 |
size_t word_size, |
|
360 |
bool is_tlab) = 0; |
|
361 |
||
362 |
// Perform a heap collection, attempting to create (at least) enough |
|
363 |
// space to support an allocation of the given "word_size". If |
|
364 |
// successful, perform the allocation and return the resulting |
|
365 |
// "oop" (initializing the allocated block). If the allocation is |
|
366 |
// still unsuccessful, return "NULL". |
|
367 |
virtual HeapWord* expand_and_allocate(size_t word_size, |
|
368 |
bool is_tlab, |
|
369 |
bool parallel = false) = 0; |
|
370 |
||
371 |
// Some generations may require some cleanup or preparation actions before |
|
372 |
// allowing a collection. The default is to do nothing. |
|
373 |
virtual void gc_prologue(bool full) {}; |
|
374 |
||
375 |
// Some generations may require some cleanup actions after a collection. |
|
376 |
// The default is to do nothing. |
|
377 |
virtual void gc_epilogue(bool full) {}; |
|
378 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
379 |
// Save the high water marks for the used space in a generation. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
380 |
virtual void record_spaces_top() {}; |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
381 |
|
1 | 382 |
// Some generations may need to be "fixed-up" after some allocation |
383 |
// activity to make them parsable again. The default is to do nothing. |
|
384 |
virtual void ensure_parsability() {}; |
|
385 |
||
386 |
// Time (in ms) when we were last collected or now if a collection is |
|
387 |
// in progress. |
|
388 |
virtual jlong time_of_last_gc(jlong now) { |
|
389 |
// XXX See note in genCollectedHeap::millis_since_last_gc() |
|
390 |
NOT_PRODUCT( |
|
391 |
if (now < _time_of_last_gc) { |
|
392 |
warning("time warp: %d to %d", _time_of_last_gc, now); |
|
393 |
} |
|
394 |
) |
|
395 |
return _time_of_last_gc; |
|
396 |
} |
|
397 |
||
398 |
virtual void update_time_of_last_gc(jlong now) { |
|
399 |
_time_of_last_gc = now; |
|
400 |
} |
|
401 |
||
402 |
// Generations may keep statistics about collection. This |
|
403 |
// method updates those statistics. current_level is |
|
404 |
// the level of the collection that has most recently |
|
405 |
// occurred. This allows the generation to decide what |
|
406 |
// statistics are valid to collect. For example, the |
|
407 |
// generation can decide to gather the amount of promoted data |
|
408 |
// if the collection of the younger generations has completed. |
|
409 |
GCStats* gc_stats() const { return _gc_stats; } |
|
410 |
virtual void update_gc_stats(int current_level, bool full) {} |
|
411 |
||
412 |
// Mark sweep support phase2 |
|
413 |
virtual void prepare_for_compaction(CompactPoint* cp); |
|
414 |
// Mark sweep support phase3 |
|
415 |
virtual void pre_adjust_pointers() {ShouldNotReachHere();} |
|
416 |
virtual void adjust_pointers(); |
|
417 |
// Mark sweep support phase4 |
|
418 |
virtual void compact(); |
|
419 |
virtual void post_compact() {ShouldNotReachHere();} |
|
420 |
||
421 |
// Support for CMS's rescan. In this general form we return a pointer |
|
422 |
// to an abstract object that can be used, based on specific previously |
|
423 |
// decided protocols, to exchange information between generations, |
|
424 |
// information that may be useful for speeding up certain types of |
|
425 |
// garbage collectors. A NULL value indicates to the client that |
|
426 |
// no data recording is expected by the provider. The data-recorder is |
|
427 |
// expected to be GC worker thread-local, with the worker index |
|
428 |
// indicated by "thr_num". |
|
429 |
virtual void* get_data_recorder(int thr_num) { return NULL; } |
|
430 |
||
431 |
// Some generations may require some cleanup actions before allowing |
|
432 |
// a verification. |
|
433 |
virtual void prepare_for_verify() {}; |
|
434 |
||
435 |
// Accessing "marks". |
|
436 |
||
437 |
// This function gives a generation a chance to note a point between |
|
438 |
// collections. For example, a contiguous generation might note the |
|
439 |
// beginning allocation point post-collection, which might allow some later |
|
440 |
// operations to be optimized. |
|
441 |
virtual void save_marks() {} |
|
442 |
||
443 |
// This function allows generations to initialize any "saved marks". That |
|
444 |
// is, should only be called when the generation is empty. |
|
445 |
virtual void reset_saved_marks() {} |
|
446 |
||
447 |
// This function is "true" iff any no allocations have occurred in the |
|
448 |
// generation since the last call to "save_marks". |
|
449 |
virtual bool no_allocs_since_save_marks() = 0; |
|
450 |
||
451 |
// Apply "cl->apply" to (the addresses of) all reference fields in objects |
|
452 |
// allocated in the current generation since the last call to "save_marks". |
|
453 |
// If more objects are allocated in this generation as a result of applying |
|
454 |
// the closure, iterates over reference fields in those objects as well. |
|
455 |
// Calls "save_marks" at the end of the iteration. |
|
456 |
// General signature... |
|
457 |
virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; |
|
458 |
// ...and specializations for de-virtualization. (The general |
|
459 |
// implemention of the _nv versions call the virtual version. |
|
460 |
// Note that the _nv suffix is not really semantically necessary, |
|
461 |
// but it avoids some not-so-useful warnings on Solaris.) |
|
462 |
#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
463 |
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ |
|
464 |
oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ |
|
465 |
} |
|
466 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) |
|
467 |
||
468 |
#undef Generation_SINCE_SAVE_MARKS_DECL |
|
469 |
||
470 |
// The "requestor" generation is performing some garbage collection |
|
471 |
// action for which it would be useful to have scratch space. If |
|
472 |
// the target is not the requestor, no gc actions will be required |
|
473 |
// of the target. The requestor promises to allocate no more than |
|
474 |
// "max_alloc_words" in the target generation (via promotion say, |
|
475 |
// if the requestor is a young generation and the target is older). |
|
476 |
// If the target generation can provide any scratch space, it adds |
|
477 |
// it to "list", leaving "list" pointing to the head of the |
|
478 |
// augmented list. The default is to offer no space. |
|
479 |
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, |
|
480 |
size_t max_alloc_words) {} |
|
481 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
482 |
// Give each generation an opportunity to do clean up for any |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
483 |
// contributed scratch. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
484 |
virtual void reset_scratch() {}; |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
485 |
|
1 | 486 |
// When an older generation has been collected, and perhaps resized, |
487 |
// this method will be invoked on all younger generations (from older to |
|
488 |
// younger), allowing them to resize themselves as appropriate. |
|
489 |
virtual void compute_new_size() = 0; |
|
490 |
||
491 |
// Printing |
|
492 |
virtual const char* name() const = 0; |
|
493 |
virtual const char* short_name() const = 0; |
|
494 |
||
495 |
int level() const { return _level; } |
|
496 |
||
497 |
// Attributes |
|
498 |
||
499 |
// True iff the given generation may only be the youngest generation. |
|
500 |
virtual bool must_be_youngest() const = 0; |
|
501 |
// True iff the given generation may only be the oldest generation. |
|
502 |
virtual bool must_be_oldest() const = 0; |
|
503 |
||
504 |
// Reference Processing accessor |
|
505 |
ReferenceProcessor* const ref_processor() { return _ref_processor; } |
|
506 |
||
507 |
// Iteration. |
|
508 |
||
509 |
// Iterate over all the ref-containing fields of all objects in the |
|
510 |
// generation, calling "cl.do_oop" on each. |
|
511 |
virtual void oop_iterate(OopClosure* cl); |
|
512 |
||
513 |
// Same as above, restricted to the intersection of a memory region and |
|
514 |
// the generation. |
|
515 |
virtual void oop_iterate(MemRegion mr, OopClosure* cl); |
|
516 |
||
517 |
// Iterate over all objects in the generation, calling "cl.do_object" on |
|
518 |
// each. |
|
519 |
virtual void object_iterate(ObjectClosure* cl); |
|
520 |
||
1893
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
521 |
// Iterate over all safe objects in the generation, calling "cl.do_object" on |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
522 |
// each. An object is safe if its references point to other objects in |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
523 |
// the heap. This defaults to object_iterate() unless overridden. |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
524 |
virtual void safe_object_iterate(ObjectClosure* cl); |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
979
diff
changeset
|
525 |
|
1 | 526 |
// Iterate over all objects allocated in the generation since the last |
527 |
// collection, calling "cl.do_object" on each. The generation must have |
|
528 |
// been initialized properly to support this function, or else this call |
|
529 |
// will fail. |
|
530 |
virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0; |
|
531 |
||
532 |
// Apply "cl->do_oop" to (the address of) all and only all the ref fields |
|
533 |
// in the current generation that contain pointers to objects in younger |
|
534 |
// generations. Objects allocated since the last "save_marks" call are |
|
535 |
// excluded. |
|
536 |
virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; |
|
537 |
||
538 |
// Inform a generation that it longer contains references to objects |
|
539 |
// in any younger generation. [e.g. Because younger gens are empty, |
|
540 |
// clear the card table.] |
|
541 |
virtual void clear_remembered_set() { } |
|
542 |
||
543 |
// Inform a generation that some of its objects have moved. [e.g. The |
|
544 |
// generation's spaces were compacted, invalidating the card table.] |
|
545 |
virtual void invalidate_remembered_set() { } |
|
546 |
||
547 |
// Block abstraction. |
|
548 |
||
549 |
// Returns the address of the start of the "block" that contains the |
|
550 |
// address "addr". We say "blocks" instead of "object" since some heaps |
|
551 |
// may not pack objects densely; a chunk may either be an object or a |
|
552 |
// non-object. |
|
553 |
virtual HeapWord* block_start(const void* addr) const; |
|
554 |
||
555 |
// Requires "addr" to be the start of a chunk, and returns its size. |
|
556 |
// "addr + size" is required to be the start of a new chunk, or the end |
|
557 |
// of the active area of the heap. |
|
558 |
virtual size_t block_size(const HeapWord* addr) const ; |
|
559 |
||
560 |
// Requires "addr" to be the start of a block, and returns "TRUE" iff |
|
561 |
// the block is an object. |
|
562 |
virtual bool block_is_obj(const HeapWord* addr) const; |
|
563 |
||
564 |
||
565 |
// PrintGC, PrintGCDetails support |
|
566 |
void print_heap_change(size_t prev_used) const; |
|
567 |
||
568 |
// PrintHeapAtGC support |
|
569 |
virtual void print() const; |
|
570 |
virtual void print_on(outputStream* st) const; |
|
571 |
||
572 |
virtual void verify(bool allow_dirty) = 0; |
|
573 |
||
574 |
struct StatRecord { |
|
575 |
int invocations; |
|
576 |
elapsedTimer accumulated_time; |
|
577 |
StatRecord() : |
|
578 |
invocations(0), |
|
579 |
accumulated_time(elapsedTimer()) {} |
|
580 |
}; |
|
581 |
private: |
|
582 |
StatRecord _stat_record; |
|
583 |
public: |
|
584 |
StatRecord* stat_record() { return &_stat_record; } |
|
585 |
||
586 |
virtual void print_summary_info(); |
|
587 |
virtual void print_summary_info_on(outputStream* st); |
|
588 |
||
589 |
// Performance Counter support |
|
590 |
virtual void update_counters() = 0; |
|
591 |
virtual CollectorCounters* counters() { return _gc_counters; } |
|
592 |
}; |
|
593 |
||
594 |
// Class CardGeneration is a generation that is covered by a card table, |
|
595 |
// and uses a card-size block-offset array to implement block_start. |
|
596 |
||
597 |
// class BlockOffsetArray; |
|
598 |
// class BlockOffsetArrayContigSpace; |
|
599 |
class BlockOffsetSharedArray; |
|
600 |
||
601 |
class CardGeneration: public Generation { |
|
602 |
friend class VMStructs; |
|
603 |
protected: |
|
604 |
// This is shared with other generations. |
|
605 |
GenRemSet* _rs; |
|
606 |
// This is local to this generation. |
|
607 |
BlockOffsetSharedArray* _bts; |
|
608 |
||
609 |
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, |
|
610 |
GenRemSet* remset); |
|
611 |
||
612 |
public: |
|
613 |
||
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
614 |
// Attempt to expand the generation by "bytes". Expand by at a |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
615 |
// minimum "expand_bytes". Return true if some amount (not |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
616 |
// necessarily the full "bytes") was done. |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
617 |
virtual bool expand(size_t bytes, size_t expand_bytes); |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
618 |
|
1 | 619 |
virtual void clear_remembered_set(); |
620 |
||
621 |
virtual void invalidate_remembered_set(); |
|
622 |
||
623 |
virtual void prepare_for_verify(); |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
624 |
|
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
625 |
// Grow generation with specified size (returns false if unable to grow) |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
626 |
virtual bool grow_by(size_t bytes) = 0; |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
627 |
// Grow generation to reserved size. |
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
628 |
virtual bool grow_to_reserved() = 0; |
1 | 629 |
}; |
630 |
||
631 |
// OneContigSpaceCardGeneration models a heap of old objects contained in a single |
|
632 |
// contiguous space. |
|
633 |
// |
|
634 |
// Garbage collection is performed using mark-compact. |
|
635 |
||
636 |
class OneContigSpaceCardGeneration: public CardGeneration { |
|
637 |
friend class VMStructs; |
|
638 |
// Abstractly, this is a subtype that gets access to protected fields. |
|
639 |
friend class CompactingPermGen; |
|
640 |
friend class VM_PopulateDumpSharedSpace; |
|
641 |
||
642 |
protected: |
|
643 |
size_t _min_heap_delta_bytes; // Minimum amount to expand. |
|
644 |
ContiguousSpace* _the_space; // actual space holding objects |
|
645 |
WaterMark _last_gc; // watermark between objects allocated before |
|
646 |
// and after last GC. |
|
647 |
||
648 |
// Grow generation with specified size (returns false if unable to grow) |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
649 |
virtual bool grow_by(size_t bytes); |
1 | 650 |
// Grow generation to reserved size. |
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
651 |
virtual bool grow_to_reserved(); |
1 | 652 |
// Shrink generation with specified size (returns false if unable to shrink) |
653 |
void shrink_by(size_t bytes); |
|
654 |
||
655 |
// Allocation failure |
|
979
c9479f1e0a94
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
977
diff
changeset
|
656 |
virtual bool expand(size_t bytes, size_t expand_bytes); |
1 | 657 |
void shrink(size_t bytes); |
658 |
||
659 |
// Accessing spaces |
|
660 |
ContiguousSpace* the_space() const { return _the_space; } |
|
661 |
||
662 |
public: |
|
663 |
OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, |
|
664 |
size_t min_heap_delta_bytes, |
|
665 |
int level, GenRemSet* remset, |
|
666 |
ContiguousSpace* space) : |
|
667 |
CardGeneration(rs, initial_byte_size, level, remset), |
|
668 |
_the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes) |
|
669 |
{} |
|
670 |
||
671 |
inline bool is_in(const void* p) const; |
|
672 |
||
673 |
// Space enquiries |
|
674 |
size_t capacity() const; |
|
675 |
size_t used() const; |
|
676 |
size_t free() const; |
|
677 |
||
678 |
MemRegion used_region() const; |
|
679 |
||
680 |
size_t unsafe_max_alloc_nogc() const; |
|
681 |
size_t contiguous_available() const; |
|
682 |
||
683 |
// Iteration |
|
684 |
void object_iterate(ObjectClosure* blk); |
|
685 |
void space_iterate(SpaceClosure* blk, bool usedOnly = false); |
|
686 |
void object_iterate_since_last_GC(ObjectClosure* cl); |
|
687 |
||
688 |
void younger_refs_iterate(OopsInGenClosure* blk); |
|
689 |
||
690 |
inline CompactibleSpace* first_compaction_space() const; |
|
691 |
||
692 |
virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); |
|
693 |
virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); |
|
694 |
||
695 |
// Accessing marks |
|
696 |
inline WaterMark top_mark(); |
|
697 |
inline WaterMark bottom_mark(); |
|
698 |
||
699 |
#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
700 |
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); |
|
701 |
OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) |
|
702 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) |
|
703 |
||
704 |
void save_marks(); |
|
705 |
void reset_saved_marks(); |
|
706 |
bool no_allocs_since_save_marks(); |
|
707 |
||
708 |
inline size_t block_size(const HeapWord* addr) const; |
|
709 |
||
710 |
inline bool block_is_obj(const HeapWord* addr) const; |
|
711 |
||
712 |
virtual void collect(bool full, |
|
713 |
bool clear_all_soft_refs, |
|
714 |
size_t size, |
|
715 |
bool is_tlab); |
|
716 |
HeapWord* expand_and_allocate(size_t size, |
|
717 |
bool is_tlab, |
|
718 |
bool parallel = false); |
|
719 |
||
720 |
virtual void prepare_for_verify(); |
|
721 |
||
722 |
virtual void gc_epilogue(bool full); |
|
723 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
724 |
virtual void record_spaces_top(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
725 |
|
1 | 726 |
virtual void verify(bool allow_dirty); |
727 |
virtual void print_on(outputStream* st) const; |
|
728 |
}; |