author | coleenp |
Sun, 13 Apr 2008 17:43:42 -0400 | |
changeset 360 | 21d113ecbf6a |
parent 1 | 489c9b5090e2 |
child 971 | f0b20be4165d |
child 670 | ddf3e9583f2f |
child 1374 | 4c24294029a9 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2 |
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// A space is an abstraction for the "storage units" backing |
|
26 |
// up the generation abstraction. It includes specific |
|
27 |
// implementations for keeping track of free and used space, |
|
28 |
// for iterating over objects and free blocks, etc. |
|
29 |
||
30 |
// Here's the Space hierarchy: |
|
31 |
// |
|
32 |
// - Space -- an asbtract base class describing a heap area |
|
33 |
// - CompactibleSpace -- a space supporting compaction |
|
34 |
// - CompactibleFreeListSpace -- (used for CMS generation) |
|
35 |
// - ContiguousSpace -- a compactible space in which all free space |
|
36 |
// is contiguous |
|
37 |
// - EdenSpace -- contiguous space used as nursery |
|
38 |
// - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation |
|
39 |
// - OffsetTableContigSpace -- contiguous space with a block offset array |
|
40 |
// that allows "fast" block_start calls |
|
41 |
// - TenuredSpace -- (used for TenuredGeneration) |
|
42 |
// - ContigPermSpace -- an offset table contiguous space for perm gen |
|
43 |
||
44 |
// Forward decls. |
|
45 |
class Space; |
|
46 |
class BlockOffsetArray; |
|
47 |
class BlockOffsetArrayContigSpace; |
|
48 |
class Generation; |
|
49 |
class CompactibleSpace; |
|
50 |
class BlockOffsetTable; |
|
51 |
class GenRemSet; |
|
52 |
class CardTableRS; |
|
53 |
class DirtyCardToOopClosure; |
|
54 |
||
55 |
// An oop closure that is circumscribed by a filtering memory region. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
56 |
class SpaceMemRegionOopsIterClosure: public OopClosure { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
57 |
private: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
58 |
OopClosure* _cl; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
59 |
MemRegion _mr; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
60 |
protected: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
61 |
template <class T> void do_oop_work(T* p) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
62 |
if (_mr.contains(p)) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
63 |
_cl->do_oop(p); |
1 | 64 |
} |
65 |
} |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
66 |
public: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
67 |
SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr): |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
68 |
_cl(cl), _mr(mr) {} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
69 |
virtual void do_oop(oop* p); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
70 |
virtual void do_oop(narrowOop* p); |
1 | 71 |
}; |
72 |
||
73 |
// A Space describes a heap area. Class Space is an abstract |
|
74 |
// base class. |
|
75 |
// |
|
76 |
// Space supports allocation, size computation and GC support is provided. |
|
77 |
// |
|
78 |
// Invariant: bottom() and end() are on page_size boundaries and |
|
79 |
// bottom() <= top() <= end() |
|
80 |
// top() is inclusive and end() is exclusive. |
|
81 |
||
82 |
class Space: public CHeapObj { |
|
83 |
friend class VMStructs; |
|
84 |
protected: |
|
85 |
HeapWord* _bottom; |
|
86 |
HeapWord* _end; |
|
87 |
||
88 |
// Used in support of save_marks() |
|
89 |
HeapWord* _saved_mark_word; |
|
90 |
||
91 |
MemRegionClosure* _preconsumptionDirtyCardClosure; |
|
92 |
||
93 |
// A sequential tasks done structure. This supports |
|
94 |
// parallel GC, where we have threads dynamically |
|
95 |
// claiming sub-tasks from a larger parallel task. |
|
96 |
SequentialSubTasksDone _par_seq_tasks; |
|
97 |
||
98 |
Space(): |
|
99 |
_bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } |
|
100 |
||
101 |
public: |
|
102 |
// Accessors |
|
103 |
HeapWord* bottom() const { return _bottom; } |
|
104 |
HeapWord* end() const { return _end; } |
|
105 |
virtual void set_bottom(HeapWord* value) { _bottom = value; } |
|
106 |
virtual void set_end(HeapWord* value) { _end = value; } |
|
107 |
||
108 |
HeapWord* saved_mark_word() const { return _saved_mark_word; } |
|
109 |
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } |
|
110 |
||
111 |
MemRegionClosure* preconsumptionDirtyCardClosure() const { |
|
112 |
return _preconsumptionDirtyCardClosure; |
|
113 |
} |
|
114 |
void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { |
|
115 |
_preconsumptionDirtyCardClosure = cl; |
|
116 |
} |
|
117 |
||
118 |
// Returns a subregion of the space containing all the objects in |
|
119 |
// the space. |
|
120 |
virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } |
|
121 |
||
122 |
// Returns a region that is guaranteed to contain (at least) all objects |
|
123 |
// allocated at the time of the last call to "save_marks". If the space |
|
124 |
// initializes its DirtyCardToOopClosure's specifying the "contig" option |
|
125 |
// (that is, if the space is contiguous), then this region must contain only |
|
126 |
// such objects: the memregion will be from the bottom of the region to the |
|
127 |
// saved mark. Otherwise, the "obj_allocated_since_save_marks" method of |
|
128 |
// the space must distiguish between objects in the region allocated before |
|
129 |
// and after the call to save marks. |
|
130 |
virtual MemRegion used_region_at_save_marks() const { |
|
131 |
return MemRegion(bottom(), saved_mark_word()); |
|
132 |
} |
|
133 |
||
134 |
// Initialization |
|
135 |
virtual void initialize(MemRegion mr, bool clear_space); |
|
136 |
virtual void clear(); |
|
137 |
||
138 |
// For detecting GC bugs. Should only be called at GC boundaries, since |
|
139 |
// some unused space may be used as scratch space during GC's. |
|
140 |
// Default implementation does nothing. We also call this when expanding |
|
141 |
// a space to satisfy an allocation request. See bug #4668531 |
|
142 |
virtual void mangle_unused_area() {} |
|
143 |
virtual void mangle_region(MemRegion mr) {} |
|
144 |
||
145 |
// Testers |
|
146 |
bool is_empty() const { return used() == 0; } |
|
147 |
bool not_empty() const { return used() > 0; } |
|
148 |
||
149 |
// Returns true iff the given the space contains the |
|
150 |
// given address as part of an allocated object. For |
|
151 |
// ceratin kinds of spaces, this might be a potentially |
|
152 |
// expensive operation. To prevent performance problems |
|
153 |
// on account of its inadvertent use in product jvm's, |
|
154 |
// we restrict its use to assertion checks only. |
|
155 |
virtual bool is_in(const void* p) const; |
|
156 |
||
157 |
// Returns true iff the given reserved memory of the space contains the |
|
158 |
// given address. |
|
159 |
bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } |
|
160 |
||
161 |
// Returns true iff the given block is not allocated. |
|
162 |
virtual bool is_free_block(const HeapWord* p) const = 0; |
|
163 |
||
164 |
// Test whether p is double-aligned |
|
165 |
static bool is_aligned(void* p) { |
|
166 |
return ((intptr_t)p & (sizeof(double)-1)) == 0; |
|
167 |
} |
|
168 |
||
169 |
// Size computations. Sizes are in bytes. |
|
170 |
size_t capacity() const { return byte_size(bottom(), end()); } |
|
171 |
virtual size_t used() const = 0; |
|
172 |
virtual size_t free() const = 0; |
|
173 |
||
174 |
// Iterate over all the ref-containing fields of all objects in the |
|
175 |
// space, calling "cl.do_oop" on each. Fields in objects allocated by |
|
176 |
// applications of the closure are not included in the iteration. |
|
177 |
virtual void oop_iterate(OopClosure* cl); |
|
178 |
||
179 |
// Same as above, restricted to the intersection of a memory region and |
|
180 |
// the space. Fields in objects allocated by applications of the closure |
|
181 |
// are not included in the iteration. |
|
182 |
virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0; |
|
183 |
||
184 |
// Iterate over all objects in the space, calling "cl.do_object" on |
|
185 |
// each. Objects allocated by applications of the closure are not |
|
186 |
// included in the iteration. |
|
187 |
virtual void object_iterate(ObjectClosure* blk) = 0; |
|
188 |
||
189 |
// Iterate over all objects that intersect with mr, calling "cl->do_object" |
|
190 |
// on each. There is an exception to this: if this closure has already |
|
191 |
// been invoked on an object, it may skip such objects in some cases. This is |
|
192 |
// Most likely to happen in an "upwards" (ascending address) iteration of |
|
193 |
// MemRegions. |
|
194 |
virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
|
195 |
||
196 |
// Iterate over as many initialized objects in the space as possible, |
|
197 |
// calling "cl.do_object_careful" on each. Return NULL if all objects |
|
198 |
// in the space (at the start of the iteration) were iterated over. |
|
199 |
// Return an address indicating the extent of the iteration in the |
|
200 |
// event that the iteration had to return because of finding an |
|
201 |
// uninitialized object in the space, or if the closure "cl" |
|
202 |
// signalled early termination. |
|
203 |
virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); |
|
204 |
virtual HeapWord* object_iterate_careful_m(MemRegion mr, |
|
205 |
ObjectClosureCareful* cl); |
|
206 |
||
207 |
// Create and return a new dirty card to oop closure. Can be |
|
208 |
// overriden to return the appropriate type of closure |
|
209 |
// depending on the type of space in which the closure will |
|
210 |
// operate. ResourceArea allocated. |
|
211 |
virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, |
|
212 |
CardTableModRefBS::PrecisionStyle precision, |
|
213 |
HeapWord* boundary = NULL); |
|
214 |
||
215 |
// If "p" is in the space, returns the address of the start of the |
|
216 |
// "block" that contains "p". We say "block" instead of "object" since |
|
217 |
// some heaps may not pack objects densely; a chunk may either be an |
|
218 |
// object or a non-object. If "p" is not in the space, return NULL. |
|
219 |
virtual HeapWord* block_start(const void* p) const = 0; |
|
220 |
||
221 |
// Requires "addr" to be the start of a chunk, and returns its size. |
|
222 |
// "addr + size" is required to be the start of a new chunk, or the end |
|
223 |
// of the active area of the heap. |
|
224 |
virtual size_t block_size(const HeapWord* addr) const = 0; |
|
225 |
||
226 |
// Requires "addr" to be the start of a block, and returns "TRUE" iff |
|
227 |
// the block is an object. |
|
228 |
virtual bool block_is_obj(const HeapWord* addr) const = 0; |
|
229 |
||
230 |
// Requires "addr" to be the start of a block, and returns "TRUE" iff |
|
231 |
// the block is an object and the object is alive. |
|
232 |
virtual bool obj_is_alive(const HeapWord* addr) const; |
|
233 |
||
234 |
// Allocation (return NULL if full). Assumes the caller has established |
|
235 |
// mutually exclusive access to the space. |
|
236 |
virtual HeapWord* allocate(size_t word_size) = 0; |
|
237 |
||
238 |
// Allocation (return NULL if full). Enforces mutual exclusion internally. |
|
239 |
virtual HeapWord* par_allocate(size_t word_size) = 0; |
|
240 |
||
241 |
// Returns true if this object has been allocated since a |
|
242 |
// generation's "save_marks" call. |
|
243 |
virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; |
|
244 |
||
245 |
// Mark-sweep-compact support: all spaces can update pointers to objects |
|
246 |
// moving as a part of compaction. |
|
247 |
virtual void adjust_pointers(); |
|
248 |
||
249 |
// PrintHeapAtGC support |
|
250 |
virtual void print() const; |
|
251 |
virtual void print_on(outputStream* st) const; |
|
252 |
virtual void print_short() const; |
|
253 |
virtual void print_short_on(outputStream* st) const; |
|
254 |
||
255 |
||
256 |
// Accessor for parallel sequential tasks. |
|
257 |
SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } |
|
258 |
||
259 |
// IF "this" is a ContiguousSpace, return it, else return NULL. |
|
260 |
virtual ContiguousSpace* toContiguousSpace() { |
|
261 |
return NULL; |
|
262 |
} |
|
263 |
||
264 |
// Debugging |
|
265 |
virtual void verify(bool allow_dirty) const = 0; |
|
266 |
}; |
|
267 |
||
268 |
// A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an |
|
269 |
// OopClosure to (the addresses of) all the ref-containing fields that could |
|
270 |
// be modified by virtue of the given MemRegion being dirty. (Note that |
|
271 |
// because of the imprecise nature of the write barrier, this may iterate |
|
272 |
// over oops beyond the region.) |
|
273 |
// This base type for dirty card to oop closures handles memory regions |
|
274 |
// in non-contiguous spaces with no boundaries, and should be sub-classed |
|
275 |
// to support other space types. See ContiguousDCTOC for a sub-class |
|
276 |
// that works with ContiguousSpaces. |
|
277 |
||
278 |
class DirtyCardToOopClosure: public MemRegionClosureRO { |
|
279 |
protected: |
|
280 |
OopClosure* _cl; |
|
281 |
Space* _sp; |
|
282 |
CardTableModRefBS::PrecisionStyle _precision; |
|
283 |
HeapWord* _boundary; // If non-NULL, process only non-NULL oops |
|
284 |
// pointing below boundary. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
285 |
HeapWord* _min_done; // ObjHeadPreciseArray precision requires |
1 | 286 |
// a downwards traversal; this is the |
287 |
// lowest location already done (or, |
|
288 |
// alternatively, the lowest address that |
|
289 |
// shouldn't be done again. NULL means infinity.) |
|
290 |
NOT_PRODUCT(HeapWord* _last_bottom;) |
|
291 |
||
292 |
// Get the actual top of the area on which the closure will |
|
293 |
// operate, given where the top is assumed to be (the end of the |
|
294 |
// memory region passed to do_MemRegion) and where the object |
|
295 |
// at the top is assumed to start. For example, an object may |
|
296 |
// start at the top but actually extend past the assumed top, |
|
297 |
// in which case the top becomes the end of the object. |
|
298 |
virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); |
|
299 |
||
300 |
// Walk the given memory region from bottom to (actual) top |
|
301 |
// looking for objects and applying the oop closure (_cl) to |
|
302 |
// them. The base implementation of this treats the area as |
|
303 |
// blocks, where a block may or may not be an object. Sub- |
|
304 |
// classes should override this to provide more accurate |
|
305 |
// or possibly more efficient walking. |
|
306 |
virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); |
|
307 |
||
308 |
public: |
|
309 |
DirtyCardToOopClosure(Space* sp, OopClosure* cl, |
|
310 |
CardTableModRefBS::PrecisionStyle precision, |
|
311 |
HeapWord* boundary) : |
|
312 |
_sp(sp), _cl(cl), _precision(precision), _boundary(boundary), |
|
313 |
_min_done(NULL) { |
|
314 |
NOT_PRODUCT(_last_bottom = NULL;) |
|
315 |
} |
|
316 |
||
317 |
void do_MemRegion(MemRegion mr); |
|
318 |
||
319 |
void set_min_done(HeapWord* min_done) { |
|
320 |
_min_done = min_done; |
|
321 |
} |
|
322 |
#ifndef PRODUCT |
|
323 |
void set_last_bottom(HeapWord* last_bottom) { |
|
324 |
_last_bottom = last_bottom; |
|
325 |
} |
|
326 |
#endif |
|
327 |
}; |
|
328 |
||
329 |
// A structure to represent a point at which objects are being copied |
|
330 |
// during compaction. |
|
331 |
class CompactPoint : public StackObj { |
|
332 |
public: |
|
333 |
Generation* gen; |
|
334 |
CompactibleSpace* space; |
|
335 |
HeapWord* threshold; |
|
336 |
CompactPoint(Generation* _gen, CompactibleSpace* _space, |
|
337 |
HeapWord* _threshold) : |
|
338 |
gen(_gen), space(_space), threshold(_threshold) {} |
|
339 |
}; |
|
340 |
||
341 |
||
342 |
// A space that supports compaction operations. This is usually, but not |
|
343 |
// necessarily, a space that is normally contiguous. But, for example, a |
|
344 |
// free-list-based space whose normal collection is a mark-sweep without |
|
345 |
// compaction could still support compaction in full GC's. |
|
346 |
||
347 |
class CompactibleSpace: public Space { |
|
348 |
friend class VMStructs; |
|
349 |
friend class CompactibleFreeListSpace; |
|
350 |
friend class CompactingPermGenGen; |
|
351 |
friend class CMSPermGenGen; |
|
352 |
private: |
|
353 |
HeapWord* _compaction_top; |
|
354 |
CompactibleSpace* _next_compaction_space; |
|
355 |
||
356 |
public: |
|
357 |
virtual void initialize(MemRegion mr, bool clear_space); |
|
358 |
||
359 |
// Used temporarily during a compaction phase to hold the value |
|
360 |
// top should have when compaction is complete. |
|
361 |
HeapWord* compaction_top() const { return _compaction_top; } |
|
362 |
||
363 |
void set_compaction_top(HeapWord* value) { |
|
364 |
assert(value == NULL || (value >= bottom() && value <= end()), |
|
365 |
"should point inside space"); |
|
366 |
_compaction_top = value; |
|
367 |
} |
|
368 |
||
369 |
// Perform operations on the space needed after a compaction |
|
370 |
// has been performed. |
|
371 |
virtual void reset_after_compaction() {} |
|
372 |
||
373 |
// Returns the next space (in the current generation) to be compacted in |
|
374 |
// the global compaction order. Also is used to select the next |
|
375 |
// space into which to compact. |
|
376 |
||
377 |
virtual CompactibleSpace* next_compaction_space() const { |
|
378 |
return _next_compaction_space; |
|
379 |
} |
|
380 |
||
381 |
void set_next_compaction_space(CompactibleSpace* csp) { |
|
382 |
_next_compaction_space = csp; |
|
383 |
} |
|
384 |
||
385 |
// MarkSweep support phase2 |
|
386 |
||
387 |
// Start the process of compaction of the current space: compute |
|
388 |
// post-compaction addresses, and insert forwarding pointers. The fields |
|
389 |
// "cp->gen" and "cp->compaction_space" are the generation and space into |
|
390 |
// which we are currently compacting. This call updates "cp" as necessary, |
|
391 |
// and leaves the "compaction_top" of the final value of |
|
392 |
// "cp->compaction_space" up-to-date. Offset tables may be updated in |
|
393 |
// this phase as if the final copy had occurred; if so, "cp->threshold" |
|
394 |
// indicates when the next such action should be taken. |
|
395 |
virtual void prepare_for_compaction(CompactPoint* cp); |
|
396 |
// MarkSweep support phase3 |
|
397 |
virtual void adjust_pointers(); |
|
398 |
// MarkSweep support phase4 |
|
399 |
virtual void compact(); |
|
400 |
||
401 |
// The maximum percentage of objects that can be dead in the compacted |
|
402 |
// live part of a compacted space ("deadwood" support.) |
|
403 |
virtual int allowed_dead_ratio() const { return 0; }; |
|
404 |
||
405 |
// Some contiguous spaces may maintain some data structures that should |
|
406 |
// be updated whenever an allocation crosses a boundary. This function |
|
407 |
// returns the first such boundary. |
|
408 |
// (The default implementation returns the end of the space, so the |
|
409 |
// boundary is never crossed.) |
|
410 |
virtual HeapWord* initialize_threshold() { return end(); } |
|
411 |
||
412 |
// "q" is an object of the given "size" that should be forwarded; |
|
413 |
// "cp" names the generation ("gen") and containing "this" (which must |
|
414 |
// also equal "cp->space"). "compact_top" is where in "this" the |
|
415 |
// next object should be forwarded to. If there is room in "this" for |
|
416 |
// the object, insert an appropriate forwarding pointer in "q". |
|
417 |
// If not, go to the next compaction space (there must |
|
418 |
// be one, since compaction must succeed -- we go to the first space of |
|
419 |
// the previous generation if necessary, updating "cp"), reset compact_top |
|
420 |
// and then forward. In either case, returns the new value of "compact_top". |
|
421 |
// If the forwarding crosses "cp->threshold", invokes the "cross_threhold" |
|
422 |
// function of the then-current compaction space, and updates "cp->threshold |
|
423 |
// accordingly". |
|
424 |
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, |
|
425 |
HeapWord* compact_top); |
|
426 |
||
427 |
// Return a size with adjusments as required of the space. |
|
428 |
virtual size_t adjust_object_size_v(size_t size) const { return size; } |
|
429 |
||
430 |
protected: |
|
431 |
// Used during compaction. |
|
432 |
HeapWord* _first_dead; |
|
433 |
HeapWord* _end_of_live; |
|
434 |
||
435 |
// Minimum size of a free block. |
|
436 |
virtual size_t minimum_free_block_size() const = 0; |
|
437 |
||
438 |
// This the function is invoked when an allocation of an object covering |
|
439 |
// "start" to "end occurs crosses the threshold; returns the next |
|
440 |
// threshold. (The default implementation does nothing.) |
|
441 |
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { |
|
442 |
return end(); |
|
443 |
} |
|
444 |
||
445 |
// Requires "allowed_deadspace_words > 0", that "q" is the start of a |
|
446 |
// free block of the given "word_len", and that "q", were it an object, |
|
447 |
// would not move if forwared. If the size allows, fill the free |
|
448 |
// block with an object, to prevent excessive compaction. Returns "true" |
|
449 |
// iff the free region was made deadspace, and modifies |
|
450 |
// "allowed_deadspace_words" to reflect the number of available deadspace |
|
451 |
// words remaining after this operation. |
|
452 |
bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, |
|
453 |
size_t word_len); |
|
454 |
}; |
|
455 |
||
456 |
#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ |
|
457 |
/* Compute the new addresses for the live objects and store it in the mark \ |
|
458 |
* Used by universe::mark_sweep_phase2() \ |
|
459 |
*/ \ |
|
460 |
HeapWord* compact_top; /* This is where we are currently compacting to. */ \ |
|
461 |
\ |
|
462 |
/* We're sure to be here before any objects are compacted into this \ |
|
463 |
* space, so this is a good time to initialize this: \ |
|
464 |
*/ \ |
|
465 |
set_compaction_top(bottom()); \ |
|
466 |
\ |
|
467 |
if (cp->space == NULL) { \ |
|
468 |
assert(cp->gen != NULL, "need a generation"); \ |
|
469 |
assert(cp->threshold == NULL, "just checking"); \ |
|
470 |
assert(cp->gen->first_compaction_space() == this, "just checking"); \ |
|
471 |
cp->space = cp->gen->first_compaction_space(); \ |
|
472 |
compact_top = cp->space->bottom(); \ |
|
473 |
cp->space->set_compaction_top(compact_top); \ |
|
474 |
cp->threshold = cp->space->initialize_threshold(); \ |
|
475 |
} else { \ |
|
476 |
compact_top = cp->space->compaction_top(); \ |
|
477 |
} \ |
|
478 |
\ |
|
479 |
/* We allow some amount of garbage towards the bottom of the space, so \ |
|
480 |
* we don't start compacting before there is a significant gain to be made.\ |
|
481 |
* Occasionally, we want to ensure a full compaction, which is determined \ |
|
482 |
* by the MarkSweepAlwaysCompactCount parameter. \ |
|
483 |
*/ \ |
|
484 |
int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\ |
|
485 |
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ |
|
486 |
\ |
|
487 |
size_t allowed_deadspace = 0; \ |
|
488 |
if (skip_dead) { \ |
|
489 |
int ratio = allowed_dead_ratio(); \ |
|
490 |
allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ |
|
491 |
} \ |
|
492 |
\ |
|
493 |
HeapWord* q = bottom(); \ |
|
494 |
HeapWord* t = scan_limit(); \ |
|
495 |
\ |
|
496 |
HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ |
|
497 |
live object. */ \ |
|
498 |
HeapWord* first_dead = end();/* The first dead object. */ \ |
|
499 |
LiveRange* liveRange = NULL; /* The current live range, recorded in the \ |
|
500 |
first header of preceding free area. */ \ |
|
501 |
_first_dead = first_dead; \ |
|
502 |
\ |
|
503 |
const intx interval = PrefetchScanIntervalInBytes; \ |
|
504 |
\ |
|
505 |
while (q < t) { \ |
|
506 |
assert(!block_is_obj(q) || \ |
|
507 |
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ |
|
508 |
oop(q)->mark()->has_bias_pattern(), \ |
|
509 |
"these are the only valid states during a mark sweep"); \ |
|
510 |
if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ |
|
511 |
/* prefetch beyond q */ \ |
|
512 |
Prefetch::write(q, interval); \ |
|
513 |
/* size_t size = oop(q)->size(); changing this for cms for perm gen */\ |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
514 |
size_t size = block_size(q); \ |
1 | 515 |
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ |
516 |
q += size; \ |
|
517 |
end_of_live = q; \ |
|
518 |
} else { \ |
|
519 |
/* run over all the contiguous dead objects */ \ |
|
520 |
HeapWord* end = q; \ |
|
521 |
do { \ |
|
522 |
/* prefetch beyond end */ \ |
|
523 |
Prefetch::write(end, interval); \ |
|
524 |
end += block_size(end); \ |
|
525 |
} while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ |
|
526 |
\ |
|
527 |
/* see if we might want to pretend this object is alive so that \ |
|
528 |
* we don't have to compact quite as often. \ |
|
529 |
*/ \ |
|
530 |
if (allowed_deadspace > 0 && q == compact_top) { \ |
|
531 |
size_t sz = pointer_delta(end, q); \ |
|
532 |
if (insert_deadspace(allowed_deadspace, q, sz)) { \ |
|
533 |
compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ |
|
534 |
q = end; \ |
|
535 |
end_of_live = end; \ |
|
536 |
continue; \ |
|
537 |
} \ |
|
538 |
} \ |
|
539 |
\ |
|
540 |
/* otherwise, it really is a free region. */ \ |
|
541 |
\ |
|
542 |
/* for the previous LiveRange, record the end of the live objects. */ \ |
|
543 |
if (liveRange) { \ |
|
544 |
liveRange->set_end(q); \ |
|
545 |
} \ |
|
546 |
\ |
|
547 |
/* record the current LiveRange object. \ |
|
548 |
* liveRange->start() is overlaid on the mark word. \ |
|
549 |
*/ \ |
|
550 |
liveRange = (LiveRange*)q; \ |
|
551 |
liveRange->set_start(end); \ |
|
552 |
liveRange->set_end(end); \ |
|
553 |
\ |
|
554 |
/* see if this is the first dead region. */ \ |
|
555 |
if (q < first_dead) { \ |
|
556 |
first_dead = q; \ |
|
557 |
} \ |
|
558 |
\ |
|
559 |
/* move on to the next object */ \ |
|
560 |
q = end; \ |
|
561 |
} \ |
|
562 |
} \ |
|
563 |
\ |
|
564 |
assert(q == t, "just checking"); \ |
|
565 |
if (liveRange != NULL) { \ |
|
566 |
liveRange->set_end(q); \ |
|
567 |
} \ |
|
568 |
_end_of_live = end_of_live; \ |
|
569 |
if (end_of_live < first_dead) { \ |
|
570 |
first_dead = end_of_live; \ |
|
571 |
} \ |
|
572 |
_first_dead = first_dead; \ |
|
573 |
\ |
|
574 |
/* save the compaction_top of the compaction space. */ \ |
|
575 |
cp->space->set_compaction_top(compact_top); \ |
|
576 |
} |
|
577 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
578 |
#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
579 |
/* adjust all the interior pointers to point at the new locations of objects \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
580 |
* Used by MarkSweep::mark_sweep_phase3() */ \ |
1 | 581 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
582 |
HeapWord* q = bottom(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
583 |
HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ |
1 | 584 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
585 |
assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ |
1 | 586 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
587 |
if (q < t && _first_dead > q && \ |
1 | 588 |
!oop(q)->is_gc_marked()) { \ |
589 |
/* we have a chunk of the space which hasn't moved and we've \ |
|
590 |
* reinitialized the mark word during the previous pass, so we can't \ |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
591 |
* use is_gc_marked for the traversal. */ \ |
1 | 592 |
HeapWord* end = _first_dead; \ |
593 |
\ |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
594 |
while (q < end) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
595 |
/* I originally tried to conjoin "block_start(q) == q" to the \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
596 |
* assertion below, but that doesn't work, because you can't \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
597 |
* accurately traverse previous objects to get to the current one \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
598 |
* after their pointers (including pointers into permGen) have been \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
599 |
* updated, until the actual compaction is done. dld, 4/00 */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
600 |
assert(block_is_obj(q), \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
601 |
"should be at block boundaries, and should be looking at objs"); \ |
1 | 602 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
603 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ |
1 | 604 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
605 |
/* point all the oops to the new location */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
606 |
size_t size = oop(q)->adjust_pointers(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
607 |
size = adjust_obj_size(size); \ |
1 | 608 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
609 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
610 |
\ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
611 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
612 |
\ |
1 | 613 |
q += size; \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
614 |
} \ |
1 | 615 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
616 |
if (_first_dead == t) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
617 |
q = t; \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
618 |
} else { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
619 |
/* $$$ This is funky. Using this to read the previously written \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
620 |
* LiveRange. See also use below. */ \ |
1 | 621 |
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
622 |
} \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
623 |
} \ |
1 | 624 |
\ |
625 |
const intx interval = PrefetchScanIntervalInBytes; \ |
|
626 |
\ |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
627 |
debug_only(HeapWord* prev_q = NULL); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
628 |
while (q < t) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
629 |
/* prefetch beyond q */ \ |
1 | 630 |
Prefetch::write(q, interval); \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
631 |
if (oop(q)->is_gc_marked()) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
632 |
/* q is alive */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
633 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
634 |
/* point all the oops to the new location */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
635 |
size_t size = oop(q)->adjust_pointers(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
636 |
size = adjust_obj_size(size); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
637 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
638 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
639 |
debug_only(prev_q = q); \ |
1 | 640 |
q += size; \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
641 |
} else { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
642 |
/* q is not a live object, so its mark should point at the next \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
643 |
* live object */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
644 |
debug_only(prev_q = q); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
645 |
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
646 |
assert(q > prev_q, "we should be moving forward through memory"); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
647 |
} \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
648 |
} \ |
1 | 649 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
650 |
assert(q == t, "just checking"); \ |
1 | 651 |
} |
652 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
653 |
#define SCAN_AND_COMPACT(obj_size) { \ |
1 | 654 |
/* Copy all live objects to their new location \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
655 |
* Used by MarkSweep::mark_sweep_phase4() */ \ |
1 | 656 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
657 |
HeapWord* q = bottom(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
658 |
HeapWord* const t = _end_of_live; \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
659 |
debug_only(HeapWord* prev_q = NULL); \ |
1 | 660 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
661 |
if (q < t && _first_dead > q && \ |
1 | 662 |
!oop(q)->is_gc_marked()) { \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
663 |
debug_only( \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
664 |
/* we have a chunk of the space which hasn't moved and we've reinitialized \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
665 |
* the mark word during the previous pass, so we can't use is_gc_marked for \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
666 |
* the traversal. */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
667 |
HeapWord* const end = _first_dead; \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
668 |
\ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
669 |
while (q < end) { \ |
1 | 670 |
size_t size = obj_size(q); \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
671 |
assert(!oop(q)->is_gc_marked(), \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
672 |
"should be unmarked (special dense prefix handling)"); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
673 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
674 |
debug_only(prev_q = q); \ |
1 | 675 |
q += size; \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
676 |
} \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
677 |
) /* debug_only */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
678 |
\ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
679 |
if (_first_dead == t) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
680 |
q = t; \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
681 |
} else { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
682 |
/* $$$ Funky */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
683 |
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
684 |
} \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
685 |
} \ |
1 | 686 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
687 |
const intx scan_interval = PrefetchScanIntervalInBytes; \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
688 |
const intx copy_interval = PrefetchCopyIntervalInBytes; \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
689 |
while (q < t) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
690 |
if (!oop(q)->is_gc_marked()) { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
691 |
/* mark is pointer to next marked oop */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
692 |
debug_only(prev_q = q); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
693 |
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
694 |
assert(q > prev_q, "we should be moving forward through memory"); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
695 |
} else { \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
696 |
/* prefetch beyond q */ \ |
1 | 697 |
Prefetch::read(q, scan_interval); \ |
698 |
\ |
|
699 |
/* size and destination */ \ |
|
700 |
size_t size = obj_size(q); \ |
|
701 |
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ |
|
702 |
\ |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
703 |
/* prefetch beyond compaction_top */ \ |
1 | 704 |
Prefetch::write(compaction_top, copy_interval); \ |
705 |
\ |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
706 |
/* copy object and reinit its mark */ \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
707 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
708 |
compaction_top)); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
709 |
assert(q != compaction_top, "everything in this pass should be moving"); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
710 |
Copy::aligned_conjoint_words(q, compaction_top, size); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
711 |
oop(compaction_top)->init_mark(); \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
712 |
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ |
1 | 713 |
\ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
714 |
debug_only(prev_q = q); \ |
1 | 715 |
q += size; \ |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
716 |
} \ |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
717 |
} \ |
1 | 718 |
\ |
719 |
/* Reset space after compaction is complete */ \ |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
720 |
reset_after_compaction(); \ |
1 | 721 |
/* We do this clear, below, since it has overloaded meanings for some */ \ |
722 |
/* space subtypes. For example, OffsetTableContigSpace's that were */ \ |
|
723 |
/* compacted into will have had their offset table thresholds updated */ \ |
|
724 |
/* continuously, but those that weren't need to have their thresholds */ \ |
|
725 |
/* re-initialized. Also mangles unused area for debugging. */ \ |
|
726 |
if (is_empty()) { \ |
|
727 |
clear(); \ |
|
728 |
} else { \ |
|
729 |
if (ZapUnusedHeapArea) mangle_unused_area(); \ |
|
730 |
} \ |
|
731 |
} |
|
732 |
||
733 |
// A space in which the free area is contiguous. It therefore supports |
|
734 |
// faster allocation, and compaction. |
|
735 |
class ContiguousSpace: public CompactibleSpace { |
|
736 |
friend class OneContigSpaceCardGeneration; |
|
737 |
friend class VMStructs; |
|
738 |
protected: |
|
739 |
HeapWord* _top; |
|
740 |
HeapWord* _concurrent_iteration_safe_limit; |
|
741 |
||
742 |
// Allocation helpers (return NULL if full). |
|
743 |
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); |
|
744 |
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); |
|
745 |
||
746 |
public: |
|
747 |
virtual void initialize(MemRegion mr, bool clear_space); |
|
748 |
||
749 |
// Accessors |
|
750 |
HeapWord* top() const { return _top; } |
|
751 |
void set_top(HeapWord* value) { _top = value; } |
|
752 |
||
753 |
void set_saved_mark() { _saved_mark_word = top(); } |
|
754 |
void reset_saved_mark() { _saved_mark_word = bottom(); } |
|
755 |
||
756 |
virtual void clear(); |
|
757 |
||
758 |
WaterMark bottom_mark() { return WaterMark(this, bottom()); } |
|
759 |
WaterMark top_mark() { return WaterMark(this, top()); } |
|
760 |
WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } |
|
761 |
bool saved_mark_at_top() const { return saved_mark_word() == top(); } |
|
762 |
||
763 |
void mangle_unused_area(); |
|
764 |
void mangle_region(MemRegion mr); |
|
765 |
||
766 |
// Size computations: sizes in bytes. |
|
767 |
size_t capacity() const { return byte_size(bottom(), end()); } |
|
768 |
size_t used() const { return byte_size(bottom(), top()); } |
|
769 |
size_t free() const { return byte_size(top(), end()); } |
|
770 |
||
771 |
// Override from space. |
|
772 |
bool is_in(const void* p) const; |
|
773 |
||
774 |
virtual bool is_free_block(const HeapWord* p) const; |
|
775 |
||
776 |
// In a contiguous space we have a more obvious bound on what parts |
|
777 |
// contain objects. |
|
778 |
MemRegion used_region() const { return MemRegion(bottom(), top()); } |
|
779 |
||
780 |
MemRegion used_region_at_save_marks() const { |
|
781 |
return MemRegion(bottom(), saved_mark_word()); |
|
782 |
} |
|
783 |
||
784 |
// Allocation (return NULL if full) |
|
785 |
virtual HeapWord* allocate(size_t word_size); |
|
786 |
virtual HeapWord* par_allocate(size_t word_size); |
|
787 |
||
788 |
virtual bool obj_allocated_since_save_marks(const oop obj) const { |
|
789 |
return (HeapWord*)obj >= saved_mark_word(); |
|
790 |
} |
|
791 |
||
792 |
// Iteration |
|
793 |
void oop_iterate(OopClosure* cl); |
|
794 |
void oop_iterate(MemRegion mr, OopClosure* cl); |
|
795 |
void object_iterate(ObjectClosure* blk); |
|
796 |
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
|
797 |
// iterates on objects up to the safe limit |
|
798 |
HeapWord* object_iterate_careful(ObjectClosureCareful* cl); |
|
799 |
inline HeapWord* concurrent_iteration_safe_limit(); |
|
800 |
// changes the safe limit, all objects from bottom() to the new |
|
801 |
// limit should be properly initialized |
|
802 |
inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit); |
|
803 |
||
804 |
#ifndef SERIALGC |
|
805 |
// In support of parallel oop_iterate. |
|
806 |
#define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ |
|
807 |
void par_oop_iterate(MemRegion mr, OopClosureType* blk); |
|
808 |
||
809 |
ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) |
|
810 |
#undef ContigSpace_PAR_OOP_ITERATE_DECL |
|
811 |
#endif // SERIALGC |
|
812 |
||
813 |
// Compaction support |
|
814 |
virtual void reset_after_compaction() { |
|
815 |
assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); |
|
816 |
set_top(compaction_top()); |
|
817 |
// set new iteration safe limit |
|
818 |
set_concurrent_iteration_safe_limit(compaction_top()); |
|
819 |
} |
|
820 |
virtual size_t minimum_free_block_size() const { return 0; } |
|
821 |
||
822 |
// Override. |
|
823 |
DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, |
|
824 |
CardTableModRefBS::PrecisionStyle precision, |
|
825 |
HeapWord* boundary = NULL); |
|
826 |
||
827 |
// Apply "blk->do_oop" to the addresses of all reference fields in objects |
|
828 |
// starting with the _saved_mark_word, which was noted during a generation's |
|
829 |
// save_marks and is required to denote the head of an object. |
|
830 |
// Fields in objects allocated by applications of the closure |
|
831 |
// *are* included in the iteration. |
|
832 |
// Updates _saved_mark_word to point to just after the last object |
|
833 |
// iterated over. |
|
834 |
#define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
835 |
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); |
|
836 |
||
837 |
ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) |
|
838 |
#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL |
|
839 |
||
840 |
// Same as object_iterate, but starting from "mark", which is required |
|
841 |
// to denote the start of an object. Objects allocated by |
|
842 |
// applications of the closure *are* included in the iteration. |
|
843 |
virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); |
|
844 |
||
845 |
// Very inefficient implementation. |
|
846 |
virtual HeapWord* block_start(const void* p) const; |
|
847 |
size_t block_size(const HeapWord* p) const; |
|
848 |
// If a block is in the allocated area, it is an object. |
|
849 |
bool block_is_obj(const HeapWord* p) const { return p < top(); } |
|
850 |
||
851 |
// Addresses for inlined allocation |
|
852 |
HeapWord** top_addr() { return &_top; } |
|
853 |
HeapWord** end_addr() { return &_end; } |
|
854 |
||
855 |
// Overrides for more efficient compaction support. |
|
856 |
void prepare_for_compaction(CompactPoint* cp); |
|
857 |
||
858 |
// PrintHeapAtGC support. |
|
859 |
virtual void print_on(outputStream* st) const; |
|
860 |
||
861 |
// Checked dynamic downcasts. |
|
862 |
virtual ContiguousSpace* toContiguousSpace() { |
|
863 |
return this; |
|
864 |
} |
|
865 |
||
866 |
// Debugging |
|
867 |
virtual void verify(bool allow_dirty) const; |
|
868 |
||
869 |
// Used to increase collection frequency. "factor" of 0 means entire |
|
870 |
// space. |
|
871 |
void allocate_temporary_filler(int factor); |
|
872 |
||
873 |
}; |
|
874 |
||
875 |
||
876 |
// A dirty card to oop closure that does filtering. |
|
877 |
// It knows how to filter out objects that are outside of the _boundary. |
|
878 |
class Filtering_DCTOC : public DirtyCardToOopClosure { |
|
879 |
protected: |
|
880 |
// Override. |
|
881 |
void walk_mem_region(MemRegion mr, |
|
882 |
HeapWord* bottom, HeapWord* top); |
|
883 |
||
884 |
// Walk the given memory region, from bottom to top, applying |
|
885 |
// the given oop closure to (possibly) all objects found. The |
|
886 |
// given oop closure may or may not be the same as the oop |
|
887 |
// closure with which this closure was created, as it may |
|
888 |
// be a filtering closure which makes use of the _boundary. |
|
889 |
// We offer two signatures, so the FilteringClosure static type is |
|
890 |
// apparent. |
|
891 |
virtual void walk_mem_region_with_cl(MemRegion mr, |
|
892 |
HeapWord* bottom, HeapWord* top, |
|
893 |
OopClosure* cl) = 0; |
|
894 |
virtual void walk_mem_region_with_cl(MemRegion mr, |
|
895 |
HeapWord* bottom, HeapWord* top, |
|
896 |
FilteringClosure* cl) = 0; |
|
897 |
||
898 |
public: |
|
899 |
Filtering_DCTOC(Space* sp, OopClosure* cl, |
|
900 |
CardTableModRefBS::PrecisionStyle precision, |
|
901 |
HeapWord* boundary) : |
|
902 |
DirtyCardToOopClosure(sp, cl, precision, boundary) {} |
|
903 |
}; |
|
904 |
||
905 |
// A dirty card to oop closure for contiguous spaces |
|
906 |
// (ContiguousSpace and sub-classes). |
|
907 |
// It is a FilteringClosure, as defined above, and it knows: |
|
908 |
// |
|
909 |
// 1. That the actual top of any area in a memory region |
|
910 |
// contained by the space is bounded by the end of the contiguous |
|
911 |
// region of the space. |
|
912 |
// 2. That the space is really made up of objects and not just |
|
913 |
// blocks. |
|
914 |
||
915 |
class ContiguousSpaceDCTOC : public Filtering_DCTOC { |
|
916 |
protected: |
|
917 |
// Overrides. |
|
918 |
HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); |
|
919 |
||
920 |
virtual void walk_mem_region_with_cl(MemRegion mr, |
|
921 |
HeapWord* bottom, HeapWord* top, |
|
922 |
OopClosure* cl); |
|
923 |
virtual void walk_mem_region_with_cl(MemRegion mr, |
|
924 |
HeapWord* bottom, HeapWord* top, |
|
925 |
FilteringClosure* cl); |
|
926 |
||
927 |
public: |
|
928 |
ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl, |
|
929 |
CardTableModRefBS::PrecisionStyle precision, |
|
930 |
HeapWord* boundary) : |
|
931 |
Filtering_DCTOC(sp, cl, precision, boundary) |
|
932 |
{} |
|
933 |
}; |
|
934 |
||
935 |
||
936 |
// Class EdenSpace describes eden-space in new generation. |
|
937 |
||
938 |
class DefNewGeneration; |
|
939 |
||
940 |
class EdenSpace : public ContiguousSpace { |
|
941 |
friend class VMStructs; |
|
942 |
private: |
|
943 |
DefNewGeneration* _gen; |
|
944 |
||
945 |
// _soft_end is used as a soft limit on allocation. As soft limits are |
|
946 |
// reached, the slow-path allocation code can invoke other actions and then |
|
947 |
// adjust _soft_end up to a new soft limit or to end(). |
|
948 |
HeapWord* _soft_end; |
|
949 |
||
950 |
public: |
|
951 |
EdenSpace(DefNewGeneration* gen) : _gen(gen) { _soft_end = NULL; } |
|
952 |
||
953 |
// Get/set just the 'soft' limit. |
|
954 |
HeapWord* soft_end() { return _soft_end; } |
|
955 |
HeapWord** soft_end_addr() { return &_soft_end; } |
|
956 |
void set_soft_end(HeapWord* value) { _soft_end = value; } |
|
957 |
||
958 |
// Override. |
|
959 |
void clear(); |
|
960 |
||
961 |
// Set both the 'hard' and 'soft' limits (_end and _soft_end). |
|
962 |
void set_end(HeapWord* value) { |
|
963 |
set_soft_end(value); |
|
964 |
ContiguousSpace::set_end(value); |
|
965 |
} |
|
966 |
||
967 |
// Allocation (return NULL if full) |
|
968 |
HeapWord* allocate(size_t word_size); |
|
969 |
HeapWord* par_allocate(size_t word_size); |
|
970 |
}; |
|
971 |
||
972 |
// Class ConcEdenSpace extends EdenSpace for the sake of safe |
|
973 |
// allocation while soft-end is being modified concurrently |
|
974 |
||
975 |
class ConcEdenSpace : public EdenSpace { |
|
976 |
public: |
|
977 |
ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } |
|
978 |
||
979 |
// Allocation (return NULL if full) |
|
980 |
HeapWord* par_allocate(size_t word_size); |
|
981 |
}; |
|
982 |
||
983 |
||
984 |
// A ContigSpace that Supports an efficient "block_start" operation via |
|
985 |
// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with |
|
986 |
// other spaces.) This is the abstract base class for old generation |
|
987 |
// (tenured, perm) spaces. |
|
988 |
||
989 |
class OffsetTableContigSpace: public ContiguousSpace { |
|
990 |
friend class VMStructs; |
|
991 |
protected: |
|
992 |
BlockOffsetArrayContigSpace _offsets; |
|
993 |
Mutex _par_alloc_lock; |
|
994 |
||
995 |
public: |
|
996 |
// Constructor |
|
997 |
OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, |
|
998 |
MemRegion mr); |
|
999 |
||
1000 |
void set_bottom(HeapWord* value); |
|
1001 |
void set_end(HeapWord* value); |
|
1002 |
||
1003 |
void clear(); |
|
1004 |
||
1005 |
inline HeapWord* block_start(const void* p) const; |
|
1006 |
||
1007 |
// Add offset table update. |
|
1008 |
virtual inline HeapWord* allocate(size_t word_size); |
|
1009 |
inline HeapWord* par_allocate(size_t word_size); |
|
1010 |
||
1011 |
// MarkSweep support phase3 |
|
1012 |
virtual HeapWord* initialize_threshold(); |
|
1013 |
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
|
1014 |
||
1015 |
virtual void print_on(outputStream* st) const; |
|
1016 |
||
1017 |
// Debugging |
|
1018 |
void verify(bool allow_dirty) const; |
|
1019 |
||
1020 |
// Shared space support |
|
1021 |
void serialize_block_offset_array_offsets(SerializeOopClosure* soc); |
|
1022 |
}; |
|
1023 |
||
1024 |
||
1025 |
// Class TenuredSpace is used by TenuredGeneration |
|
1026 |
||
1027 |
class TenuredSpace: public OffsetTableContigSpace { |
|
1028 |
friend class VMStructs; |
|
1029 |
protected: |
|
1030 |
// Mark sweep support |
|
1031 |
int allowed_dead_ratio() const; |
|
1032 |
public: |
|
1033 |
// Constructor |
|
1034 |
TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, |
|
1035 |
MemRegion mr) : |
|
1036 |
OffsetTableContigSpace(sharedOffsetArray, mr) {} |
|
1037 |
}; |
|
1038 |
||
1039 |
||
1040 |
// Class ContigPermSpace is used by CompactingPermGen |
|
1041 |
||
1042 |
class ContigPermSpace: public OffsetTableContigSpace { |
|
1043 |
friend class VMStructs; |
|
1044 |
protected: |
|
1045 |
// Mark sweep support |
|
1046 |
int allowed_dead_ratio() const; |
|
1047 |
public: |
|
1048 |
// Constructor |
|
1049 |
ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : |
|
1050 |
OffsetTableContigSpace(sharedOffsetArray, mr) {} |
|
1051 |
}; |