1
|
1 |
/*
|
|
2 |
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
// A space is an abstraction for the "storage units" backing
|
|
26 |
// up the generation abstraction. It includes specific
|
|
27 |
// implementations for keeping track of free and used space,
|
|
28 |
// for iterating over objects and free blocks, etc.
|
|
29 |
|
|
30 |
// Here's the Space hierarchy:
|
|
31 |
//
|
|
32 |
// - Space -- an asbtract base class describing a heap area
|
|
33 |
// - CompactibleSpace -- a space supporting compaction
|
|
34 |
// - CompactibleFreeListSpace -- (used for CMS generation)
|
|
35 |
// - ContiguousSpace -- a compactible space in which all free space
|
|
36 |
// is contiguous
|
|
37 |
// - EdenSpace -- contiguous space used as nursery
|
|
38 |
// - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
|
|
39 |
// - OffsetTableContigSpace -- contiguous space with a block offset array
|
|
40 |
// that allows "fast" block_start calls
|
|
41 |
// - TenuredSpace -- (used for TenuredGeneration)
|
|
42 |
// - ContigPermSpace -- an offset table contiguous space for perm gen
|
|
43 |
|
|
44 |
// Forward decls.
|
|
45 |
class Space;
|
|
46 |
class BlockOffsetArray;
|
|
47 |
class BlockOffsetArrayContigSpace;
|
|
48 |
class Generation;
|
|
49 |
class CompactibleSpace;
|
|
50 |
class BlockOffsetTable;
|
|
51 |
class GenRemSet;
|
|
52 |
class CardTableRS;
|
|
53 |
class DirtyCardToOopClosure;
|
|
54 |
|
|
55 |
|
|
56 |
// An oop closure that is circumscribed by a filtering memory region.
|
|
57 |
class SpaceMemRegionOopsIterClosure: public virtual OopClosure {
|
|
58 |
OopClosure* cl;
|
|
59 |
MemRegion mr;
|
|
60 |
public:
|
|
61 |
void do_oop(oop* p) {
|
|
62 |
if (mr.contains(p)) {
|
|
63 |
cl->do_oop(p);
|
|
64 |
}
|
|
65 |
}
|
|
66 |
SpaceMemRegionOopsIterClosure(OopClosure* _cl, MemRegion _mr): cl(_cl), mr(_mr) {}
|
|
67 |
};
|
|
68 |
|
|
69 |
|
|
70 |
// A Space describes a heap area. Class Space is an abstract
|
|
71 |
// base class.
|
|
72 |
//
|
|
73 |
// Space supports allocation, size computation and GC support is provided.
|
|
74 |
//
|
|
75 |
// Invariant: bottom() and end() are on page_size boundaries and
|
|
76 |
// bottom() <= top() <= end()
|
|
77 |
// top() is inclusive and end() is exclusive.
|
|
78 |
|
|
79 |
class Space: public CHeapObj {
|
|
80 |
friend class VMStructs;
|
|
81 |
protected:
|
|
82 |
HeapWord* _bottom;
|
|
83 |
HeapWord* _end;
|
|
84 |
|
|
85 |
// Used in support of save_marks()
|
|
86 |
HeapWord* _saved_mark_word;
|
|
87 |
|
|
88 |
MemRegionClosure* _preconsumptionDirtyCardClosure;
|
|
89 |
|
|
90 |
// A sequential tasks done structure. This supports
|
|
91 |
// parallel GC, where we have threads dynamically
|
|
92 |
// claiming sub-tasks from a larger parallel task.
|
|
93 |
SequentialSubTasksDone _par_seq_tasks;
|
|
94 |
|
|
95 |
Space():
|
|
96 |
_bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
|
|
97 |
|
|
98 |
public:
|
|
99 |
// Accessors
|
|
100 |
HeapWord* bottom() const { return _bottom; }
|
|
101 |
HeapWord* end() const { return _end; }
|
|
102 |
virtual void set_bottom(HeapWord* value) { _bottom = value; }
|
|
103 |
virtual void set_end(HeapWord* value) { _end = value; }
|
|
104 |
|
|
105 |
HeapWord* saved_mark_word() const { return _saved_mark_word; }
|
|
106 |
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
|
|
107 |
|
|
108 |
MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
|
109 |
return _preconsumptionDirtyCardClosure;
|
|
110 |
}
|
|
111 |
void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
|
|
112 |
_preconsumptionDirtyCardClosure = cl;
|
|
113 |
}
|
|
114 |
|
|
115 |
// Returns a subregion of the space containing all the objects in
|
|
116 |
// the space.
|
|
117 |
virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
|
|
118 |
|
|
119 |
// Returns a region that is guaranteed to contain (at least) all objects
|
|
120 |
// allocated at the time of the last call to "save_marks". If the space
|
|
121 |
// initializes its DirtyCardToOopClosure's specifying the "contig" option
|
|
122 |
// (that is, if the space is contiguous), then this region must contain only
|
|
123 |
// such objects: the memregion will be from the bottom of the region to the
|
|
124 |
// saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
|
|
125 |
// the space must distiguish between objects in the region allocated before
|
|
126 |
// and after the call to save marks.
|
|
127 |
virtual MemRegion used_region_at_save_marks() const {
|
|
128 |
return MemRegion(bottom(), saved_mark_word());
|
|
129 |
}
|
|
130 |
|
|
131 |
// Initialization
|
|
132 |
virtual void initialize(MemRegion mr, bool clear_space);
|
|
133 |
virtual void clear();
|
|
134 |
|
|
135 |
// For detecting GC bugs. Should only be called at GC boundaries, since
|
|
136 |
// some unused space may be used as scratch space during GC's.
|
|
137 |
// Default implementation does nothing. We also call this when expanding
|
|
138 |
// a space to satisfy an allocation request. See bug #4668531
|
|
139 |
virtual void mangle_unused_area() {}
|
|
140 |
virtual void mangle_region(MemRegion mr) {}
|
|
141 |
|
|
142 |
// Testers
|
|
143 |
bool is_empty() const { return used() == 0; }
|
|
144 |
bool not_empty() const { return used() > 0; }
|
|
145 |
|
|
146 |
// Returns true iff the given the space contains the
|
|
147 |
// given address as part of an allocated object. For
|
|
148 |
// ceratin kinds of spaces, this might be a potentially
|
|
149 |
// expensive operation. To prevent performance problems
|
|
150 |
// on account of its inadvertent use in product jvm's,
|
|
151 |
// we restrict its use to assertion checks only.
|
|
152 |
virtual bool is_in(const void* p) const;
|
|
153 |
|
|
154 |
// Returns true iff the given reserved memory of the space contains the
|
|
155 |
// given address.
|
|
156 |
bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
|
|
157 |
|
|
158 |
// Returns true iff the given block is not allocated.
|
|
159 |
virtual bool is_free_block(const HeapWord* p) const = 0;
|
|
160 |
|
|
161 |
// Test whether p is double-aligned
|
|
162 |
static bool is_aligned(void* p) {
|
|
163 |
return ((intptr_t)p & (sizeof(double)-1)) == 0;
|
|
164 |
}
|
|
165 |
|
|
166 |
// Size computations. Sizes are in bytes.
|
|
167 |
size_t capacity() const { return byte_size(bottom(), end()); }
|
|
168 |
virtual size_t used() const = 0;
|
|
169 |
virtual size_t free() const = 0;
|
|
170 |
|
|
171 |
// Iterate over all the ref-containing fields of all objects in the
|
|
172 |
// space, calling "cl.do_oop" on each. Fields in objects allocated by
|
|
173 |
// applications of the closure are not included in the iteration.
|
|
174 |
virtual void oop_iterate(OopClosure* cl);
|
|
175 |
|
|
176 |
// Same as above, restricted to the intersection of a memory region and
|
|
177 |
// the space. Fields in objects allocated by applications of the closure
|
|
178 |
// are not included in the iteration.
|
|
179 |
virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0;
|
|
180 |
|
|
181 |
// Iterate over all objects in the space, calling "cl.do_object" on
|
|
182 |
// each. Objects allocated by applications of the closure are not
|
|
183 |
// included in the iteration.
|
|
184 |
virtual void object_iterate(ObjectClosure* blk) = 0;
|
|
185 |
|
|
186 |
// Iterate over all objects that intersect with mr, calling "cl->do_object"
|
|
187 |
// on each. There is an exception to this: if this closure has already
|
|
188 |
// been invoked on an object, it may skip such objects in some cases. This is
|
|
189 |
// Most likely to happen in an "upwards" (ascending address) iteration of
|
|
190 |
// MemRegions.
|
|
191 |
virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
|
192 |
|
|
193 |
// Iterate over as many initialized objects in the space as possible,
|
|
194 |
// calling "cl.do_object_careful" on each. Return NULL if all objects
|
|
195 |
// in the space (at the start of the iteration) were iterated over.
|
|
196 |
// Return an address indicating the extent of the iteration in the
|
|
197 |
// event that the iteration had to return because of finding an
|
|
198 |
// uninitialized object in the space, or if the closure "cl"
|
|
199 |
// signalled early termination.
|
|
200 |
virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
|
|
201 |
virtual HeapWord* object_iterate_careful_m(MemRegion mr,
|
|
202 |
ObjectClosureCareful* cl);
|
|
203 |
|
|
204 |
// Create and return a new dirty card to oop closure. Can be
|
|
205 |
// overriden to return the appropriate type of closure
|
|
206 |
// depending on the type of space in which the closure will
|
|
207 |
// operate. ResourceArea allocated.
|
|
208 |
virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
|
|
209 |
CardTableModRefBS::PrecisionStyle precision,
|
|
210 |
HeapWord* boundary = NULL);
|
|
211 |
|
|
212 |
// If "p" is in the space, returns the address of the start of the
|
|
213 |
// "block" that contains "p". We say "block" instead of "object" since
|
|
214 |
// some heaps may not pack objects densely; a chunk may either be an
|
|
215 |
// object or a non-object. If "p" is not in the space, return NULL.
|
|
216 |
virtual HeapWord* block_start(const void* p) const = 0;
|
|
217 |
|
|
218 |
// Requires "addr" to be the start of a chunk, and returns its size.
|
|
219 |
// "addr + size" is required to be the start of a new chunk, or the end
|
|
220 |
// of the active area of the heap.
|
|
221 |
virtual size_t block_size(const HeapWord* addr) const = 0;
|
|
222 |
|
|
223 |
// Requires "addr" to be the start of a block, and returns "TRUE" iff
|
|
224 |
// the block is an object.
|
|
225 |
virtual bool block_is_obj(const HeapWord* addr) const = 0;
|
|
226 |
|
|
227 |
// Requires "addr" to be the start of a block, and returns "TRUE" iff
|
|
228 |
// the block is an object and the object is alive.
|
|
229 |
virtual bool obj_is_alive(const HeapWord* addr) const;
|
|
230 |
|
|
231 |
// Allocation (return NULL if full). Assumes the caller has established
|
|
232 |
// mutually exclusive access to the space.
|
|
233 |
virtual HeapWord* allocate(size_t word_size) = 0;
|
|
234 |
|
|
235 |
// Allocation (return NULL if full). Enforces mutual exclusion internally.
|
|
236 |
virtual HeapWord* par_allocate(size_t word_size) = 0;
|
|
237 |
|
|
238 |
// Returns true if this object has been allocated since a
|
|
239 |
// generation's "save_marks" call.
|
|
240 |
virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
|
|
241 |
|
|
242 |
// Mark-sweep-compact support: all spaces can update pointers to objects
|
|
243 |
// moving as a part of compaction.
|
|
244 |
virtual void adjust_pointers();
|
|
245 |
|
|
246 |
// PrintHeapAtGC support
|
|
247 |
virtual void print() const;
|
|
248 |
virtual void print_on(outputStream* st) const;
|
|
249 |
virtual void print_short() const;
|
|
250 |
virtual void print_short_on(outputStream* st) const;
|
|
251 |
|
|
252 |
|
|
253 |
// Accessor for parallel sequential tasks.
|
|
254 |
SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
|
|
255 |
|
|
256 |
// IF "this" is a ContiguousSpace, return it, else return NULL.
|
|
257 |
virtual ContiguousSpace* toContiguousSpace() {
|
|
258 |
return NULL;
|
|
259 |
}
|
|
260 |
|
|
261 |
// Debugging
|
|
262 |
virtual void verify(bool allow_dirty) const = 0;
|
|
263 |
};
|
|
264 |
|
|
265 |
// A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
|
|
266 |
// OopClosure to (the addresses of) all the ref-containing fields that could
|
|
267 |
// be modified by virtue of the given MemRegion being dirty. (Note that
|
|
268 |
// because of the imprecise nature of the write barrier, this may iterate
|
|
269 |
// over oops beyond the region.)
|
|
270 |
// This base type for dirty card to oop closures handles memory regions
|
|
271 |
// in non-contiguous spaces with no boundaries, and should be sub-classed
|
|
272 |
// to support other space types. See ContiguousDCTOC for a sub-class
|
|
273 |
// that works with ContiguousSpaces.
|
|
274 |
|
|
275 |
class DirtyCardToOopClosure: public MemRegionClosureRO {
|
|
276 |
protected:
|
|
277 |
OopClosure* _cl;
|
|
278 |
Space* _sp;
|
|
279 |
CardTableModRefBS::PrecisionStyle _precision;
|
|
280 |
HeapWord* _boundary; // If non-NULL, process only non-NULL oops
|
|
281 |
// pointing below boundary.
|
|
282 |
HeapWord* _min_done; // ObjHeadPreciseArray precision requires
|
|
283 |
// a downwards traversal; this is the
|
|
284 |
// lowest location already done (or,
|
|
285 |
// alternatively, the lowest address that
|
|
286 |
// shouldn't be done again. NULL means infinity.)
|
|
287 |
NOT_PRODUCT(HeapWord* _last_bottom;)
|
|
288 |
|
|
289 |
// Get the actual top of the area on which the closure will
|
|
290 |
// operate, given where the top is assumed to be (the end of the
|
|
291 |
// memory region passed to do_MemRegion) and where the object
|
|
292 |
// at the top is assumed to start. For example, an object may
|
|
293 |
// start at the top but actually extend past the assumed top,
|
|
294 |
// in which case the top becomes the end of the object.
|
|
295 |
virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
|
|
296 |
|
|
297 |
// Walk the given memory region from bottom to (actual) top
|
|
298 |
// looking for objects and applying the oop closure (_cl) to
|
|
299 |
// them. The base implementation of this treats the area as
|
|
300 |
// blocks, where a block may or may not be an object. Sub-
|
|
301 |
// classes should override this to provide more accurate
|
|
302 |
// or possibly more efficient walking.
|
|
303 |
virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
|
|
304 |
|
|
305 |
public:
|
|
306 |
DirtyCardToOopClosure(Space* sp, OopClosure* cl,
|
|
307 |
CardTableModRefBS::PrecisionStyle precision,
|
|
308 |
HeapWord* boundary) :
|
|
309 |
_sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
|
|
310 |
_min_done(NULL) {
|
|
311 |
NOT_PRODUCT(_last_bottom = NULL;)
|
|
312 |
}
|
|
313 |
|
|
314 |
void do_MemRegion(MemRegion mr);
|
|
315 |
|
|
316 |
void set_min_done(HeapWord* min_done) {
|
|
317 |
_min_done = min_done;
|
|
318 |
}
|
|
319 |
#ifndef PRODUCT
|
|
320 |
void set_last_bottom(HeapWord* last_bottom) {
|
|
321 |
_last_bottom = last_bottom;
|
|
322 |
}
|
|
323 |
#endif
|
|
324 |
};
|
|
325 |
|
|
326 |
// A structure to represent a point at which objects are being copied
|
|
327 |
// during compaction.
|
|
328 |
class CompactPoint : public StackObj {
|
|
329 |
public:
|
|
330 |
Generation* gen;
|
|
331 |
CompactibleSpace* space;
|
|
332 |
HeapWord* threshold;
|
|
333 |
CompactPoint(Generation* _gen, CompactibleSpace* _space,
|
|
334 |
HeapWord* _threshold) :
|
|
335 |
gen(_gen), space(_space), threshold(_threshold) {}
|
|
336 |
};
|
|
337 |
|
|
338 |
|
|
339 |
// A space that supports compaction operations. This is usually, but not
|
|
340 |
// necessarily, a space that is normally contiguous. But, for example, a
|
|
341 |
// free-list-based space whose normal collection is a mark-sweep without
|
|
342 |
// compaction could still support compaction in full GC's.
|
|
343 |
|
|
344 |
class CompactibleSpace: public Space {
|
|
345 |
friend class VMStructs;
|
|
346 |
friend class CompactibleFreeListSpace;
|
|
347 |
friend class CompactingPermGenGen;
|
|
348 |
friend class CMSPermGenGen;
|
|
349 |
private:
|
|
350 |
HeapWord* _compaction_top;
|
|
351 |
CompactibleSpace* _next_compaction_space;
|
|
352 |
|
|
353 |
public:
|
|
354 |
virtual void initialize(MemRegion mr, bool clear_space);
|
|
355 |
|
|
356 |
// Used temporarily during a compaction phase to hold the value
|
|
357 |
// top should have when compaction is complete.
|
|
358 |
HeapWord* compaction_top() const { return _compaction_top; }
|
|
359 |
|
|
360 |
void set_compaction_top(HeapWord* value) {
|
|
361 |
assert(value == NULL || (value >= bottom() && value <= end()),
|
|
362 |
"should point inside space");
|
|
363 |
_compaction_top = value;
|
|
364 |
}
|
|
365 |
|
|
366 |
// Perform operations on the space needed after a compaction
|
|
367 |
// has been performed.
|
|
368 |
virtual void reset_after_compaction() {}
|
|
369 |
|
|
370 |
// Returns the next space (in the current generation) to be compacted in
|
|
371 |
// the global compaction order. Also is used to select the next
|
|
372 |
// space into which to compact.
|
|
373 |
|
|
374 |
virtual CompactibleSpace* next_compaction_space() const {
|
|
375 |
return _next_compaction_space;
|
|
376 |
}
|
|
377 |
|
|
378 |
void set_next_compaction_space(CompactibleSpace* csp) {
|
|
379 |
_next_compaction_space = csp;
|
|
380 |
}
|
|
381 |
|
|
382 |
// MarkSweep support phase2
|
|
383 |
|
|
384 |
// Start the process of compaction of the current space: compute
|
|
385 |
// post-compaction addresses, and insert forwarding pointers. The fields
|
|
386 |
// "cp->gen" and "cp->compaction_space" are the generation and space into
|
|
387 |
// which we are currently compacting. This call updates "cp" as necessary,
|
|
388 |
// and leaves the "compaction_top" of the final value of
|
|
389 |
// "cp->compaction_space" up-to-date. Offset tables may be updated in
|
|
390 |
// this phase as if the final copy had occurred; if so, "cp->threshold"
|
|
391 |
// indicates when the next such action should be taken.
|
|
392 |
virtual void prepare_for_compaction(CompactPoint* cp);
|
|
393 |
// MarkSweep support phase3
|
|
394 |
virtual void adjust_pointers();
|
|
395 |
// MarkSweep support phase4
|
|
396 |
virtual void compact();
|
|
397 |
|
|
398 |
// The maximum percentage of objects that can be dead in the compacted
|
|
399 |
// live part of a compacted space ("deadwood" support.)
|
|
400 |
virtual int allowed_dead_ratio() const { return 0; };
|
|
401 |
|
|
402 |
// Some contiguous spaces may maintain some data structures that should
|
|
403 |
// be updated whenever an allocation crosses a boundary. This function
|
|
404 |
// returns the first such boundary.
|
|
405 |
// (The default implementation returns the end of the space, so the
|
|
406 |
// boundary is never crossed.)
|
|
407 |
virtual HeapWord* initialize_threshold() { return end(); }
|
|
408 |
|
|
409 |
// "q" is an object of the given "size" that should be forwarded;
|
|
410 |
// "cp" names the generation ("gen") and containing "this" (which must
|
|
411 |
// also equal "cp->space"). "compact_top" is where in "this" the
|
|
412 |
// next object should be forwarded to. If there is room in "this" for
|
|
413 |
// the object, insert an appropriate forwarding pointer in "q".
|
|
414 |
// If not, go to the next compaction space (there must
|
|
415 |
// be one, since compaction must succeed -- we go to the first space of
|
|
416 |
// the previous generation if necessary, updating "cp"), reset compact_top
|
|
417 |
// and then forward. In either case, returns the new value of "compact_top".
|
|
418 |
// If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
|
|
419 |
// function of the then-current compaction space, and updates "cp->threshold
|
|
420 |
// accordingly".
|
|
421 |
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
|
|
422 |
HeapWord* compact_top);
|
|
423 |
|
|
424 |
// Return a size with adjusments as required of the space.
|
|
425 |
virtual size_t adjust_object_size_v(size_t size) const { return size; }
|
|
426 |
|
|
427 |
protected:
|
|
428 |
// Used during compaction.
|
|
429 |
HeapWord* _first_dead;
|
|
430 |
HeapWord* _end_of_live;
|
|
431 |
|
|
432 |
// Minimum size of a free block.
|
|
433 |
virtual size_t minimum_free_block_size() const = 0;
|
|
434 |
|
|
435 |
// This the function is invoked when an allocation of an object covering
|
|
436 |
// "start" to "end occurs crosses the threshold; returns the next
|
|
437 |
// threshold. (The default implementation does nothing.)
|
|
438 |
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
|
|
439 |
return end();
|
|
440 |
}
|
|
441 |
|
|
442 |
// Requires "allowed_deadspace_words > 0", that "q" is the start of a
|
|
443 |
// free block of the given "word_len", and that "q", were it an object,
|
|
444 |
// would not move if forwared. If the size allows, fill the free
|
|
445 |
// block with an object, to prevent excessive compaction. Returns "true"
|
|
446 |
// iff the free region was made deadspace, and modifies
|
|
447 |
// "allowed_deadspace_words" to reflect the number of available deadspace
|
|
448 |
// words remaining after this operation.
|
|
449 |
bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
|
|
450 |
size_t word_len);
|
|
451 |
};
|
|
452 |
|
|
453 |
#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
|
|
454 |
/* Compute the new addresses for the live objects and store it in the mark \
|
|
455 |
* Used by universe::mark_sweep_phase2() \
|
|
456 |
*/ \
|
|
457 |
HeapWord* compact_top; /* This is where we are currently compacting to. */ \
|
|
458 |
\
|
|
459 |
/* We're sure to be here before any objects are compacted into this \
|
|
460 |
* space, so this is a good time to initialize this: \
|
|
461 |
*/ \
|
|
462 |
set_compaction_top(bottom()); \
|
|
463 |
\
|
|
464 |
if (cp->space == NULL) { \
|
|
465 |
assert(cp->gen != NULL, "need a generation"); \
|
|
466 |
assert(cp->threshold == NULL, "just checking"); \
|
|
467 |
assert(cp->gen->first_compaction_space() == this, "just checking"); \
|
|
468 |
cp->space = cp->gen->first_compaction_space(); \
|
|
469 |
compact_top = cp->space->bottom(); \
|
|
470 |
cp->space->set_compaction_top(compact_top); \
|
|
471 |
cp->threshold = cp->space->initialize_threshold(); \
|
|
472 |
} else { \
|
|
473 |
compact_top = cp->space->compaction_top(); \
|
|
474 |
} \
|
|
475 |
\
|
|
476 |
/* We allow some amount of garbage towards the bottom of the space, so \
|
|
477 |
* we don't start compacting before there is a significant gain to be made.\
|
|
478 |
* Occasionally, we want to ensure a full compaction, which is determined \
|
|
479 |
* by the MarkSweepAlwaysCompactCount parameter. \
|
|
480 |
*/ \
|
|
481 |
int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
|
|
482 |
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
|
|
483 |
\
|
|
484 |
size_t allowed_deadspace = 0; \
|
|
485 |
if (skip_dead) { \
|
|
486 |
int ratio = allowed_dead_ratio(); \
|
|
487 |
allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
|
|
488 |
} \
|
|
489 |
\
|
|
490 |
HeapWord* q = bottom(); \
|
|
491 |
HeapWord* t = scan_limit(); \
|
|
492 |
\
|
|
493 |
HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
|
|
494 |
live object. */ \
|
|
495 |
HeapWord* first_dead = end();/* The first dead object. */ \
|
|
496 |
LiveRange* liveRange = NULL; /* The current live range, recorded in the \
|
|
497 |
first header of preceding free area. */ \
|
|
498 |
_first_dead = first_dead; \
|
|
499 |
\
|
|
500 |
const intx interval = PrefetchScanIntervalInBytes; \
|
|
501 |
\
|
|
502 |
while (q < t) { \
|
|
503 |
assert(!block_is_obj(q) || \
|
|
504 |
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
|
|
505 |
oop(q)->mark()->has_bias_pattern(), \
|
|
506 |
"these are the only valid states during a mark sweep"); \
|
|
507 |
if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
|
|
508 |
/* prefetch beyond q */ \
|
|
509 |
Prefetch::write(q, interval); \
|
|
510 |
/* size_t size = oop(q)->size(); changing this for cms for perm gen */\
|
|
511 |
size_t size = block_size(q); \
|
|
512 |
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
|
|
513 |
q += size; \
|
|
514 |
end_of_live = q; \
|
|
515 |
} else { \
|
|
516 |
/* run over all the contiguous dead objects */ \
|
|
517 |
HeapWord* end = q; \
|
|
518 |
do { \
|
|
519 |
/* prefetch beyond end */ \
|
|
520 |
Prefetch::write(end, interval); \
|
|
521 |
end += block_size(end); \
|
|
522 |
} while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
|
|
523 |
\
|
|
524 |
/* see if we might want to pretend this object is alive so that \
|
|
525 |
* we don't have to compact quite as often. \
|
|
526 |
*/ \
|
|
527 |
if (allowed_deadspace > 0 && q == compact_top) { \
|
|
528 |
size_t sz = pointer_delta(end, q); \
|
|
529 |
if (insert_deadspace(allowed_deadspace, q, sz)) { \
|
|
530 |
compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
|
|
531 |
q = end; \
|
|
532 |
end_of_live = end; \
|
|
533 |
continue; \
|
|
534 |
} \
|
|
535 |
} \
|
|
536 |
\
|
|
537 |
/* otherwise, it really is a free region. */ \
|
|
538 |
\
|
|
539 |
/* for the previous LiveRange, record the end of the live objects. */ \
|
|
540 |
if (liveRange) { \
|
|
541 |
liveRange->set_end(q); \
|
|
542 |
} \
|
|
543 |
\
|
|
544 |
/* record the current LiveRange object. \
|
|
545 |
* liveRange->start() is overlaid on the mark word. \
|
|
546 |
*/ \
|
|
547 |
liveRange = (LiveRange*)q; \
|
|
548 |
liveRange->set_start(end); \
|
|
549 |
liveRange->set_end(end); \
|
|
550 |
\
|
|
551 |
/* see if this is the first dead region. */ \
|
|
552 |
if (q < first_dead) { \
|
|
553 |
first_dead = q; \
|
|
554 |
} \
|
|
555 |
\
|
|
556 |
/* move on to the next object */ \
|
|
557 |
q = end; \
|
|
558 |
} \
|
|
559 |
} \
|
|
560 |
\
|
|
561 |
assert(q == t, "just checking"); \
|
|
562 |
if (liveRange != NULL) { \
|
|
563 |
liveRange->set_end(q); \
|
|
564 |
} \
|
|
565 |
_end_of_live = end_of_live; \
|
|
566 |
if (end_of_live < first_dead) { \
|
|
567 |
first_dead = end_of_live; \
|
|
568 |
} \
|
|
569 |
_first_dead = first_dead; \
|
|
570 |
\
|
|
571 |
/* save the compaction_top of the compaction space. */ \
|
|
572 |
cp->space->set_compaction_top(compact_top); \
|
|
573 |
}
|
|
574 |
|
|
575 |
#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
|
|
576 |
/* adjust all the interior pointers to point at the new locations of objects \
|
|
577 |
* Used by MarkSweep::mark_sweep_phase3() */ \
|
|
578 |
\
|
|
579 |
HeapWord* q = bottom(); \
|
|
580 |
HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
|
|
581 |
\
|
|
582 |
assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
|
|
583 |
\
|
|
584 |
if (q < t && _first_dead > q && \
|
|
585 |
!oop(q)->is_gc_marked()) { \
|
|
586 |
/* we have a chunk of the space which hasn't moved and we've \
|
|
587 |
* reinitialized the mark word during the previous pass, so we can't \
|
|
588 |
* use is_gc_marked for the traversal. */ \
|
|
589 |
HeapWord* end = _first_dead; \
|
|
590 |
\
|
|
591 |
while (q < end) { \
|
|
592 |
/* I originally tried to conjoin "block_start(q) == q" to the \
|
|
593 |
* assertion below, but that doesn't work, because you can't \
|
|
594 |
* accurately traverse previous objects to get to the current one \
|
|
595 |
* after their pointers (including pointers into permGen) have been \
|
|
596 |
* updated, until the actual compaction is done. dld, 4/00 */ \
|
|
597 |
assert(block_is_obj(q), \
|
|
598 |
"should be at block boundaries, and should be looking at objs"); \
|
|
599 |
\
|
|
600 |
debug_only(MarkSweep::track_interior_pointers(oop(q))); \
|
|
601 |
\
|
|
602 |
/* point all the oops to the new location */ \
|
|
603 |
size_t size = oop(q)->adjust_pointers(); \
|
|
604 |
size = adjust_obj_size(size); \
|
|
605 |
\
|
|
606 |
debug_only(MarkSweep::check_interior_pointers()); \
|
|
607 |
\
|
|
608 |
debug_only(MarkSweep::validate_live_oop(oop(q), size)); \
|
|
609 |
\
|
|
610 |
q += size; \
|
|
611 |
} \
|
|
612 |
\
|
|
613 |
if (_first_dead == t) { \
|
|
614 |
q = t; \
|
|
615 |
} else { \
|
|
616 |
/* $$$ This is funky. Using this to read the previously written \
|
|
617 |
* LiveRange. See also use below. */ \
|
|
618 |
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
|
|
619 |
} \
|
|
620 |
} \
|
|
621 |
\
|
|
622 |
const intx interval = PrefetchScanIntervalInBytes; \
|
|
623 |
\
|
|
624 |
debug_only(HeapWord* prev_q = NULL); \
|
|
625 |
while (q < t) { \
|
|
626 |
/* prefetch beyond q */ \
|
|
627 |
Prefetch::write(q, interval); \
|
|
628 |
if (oop(q)->is_gc_marked()) { \
|
|
629 |
/* q is alive */ \
|
|
630 |
debug_only(MarkSweep::track_interior_pointers(oop(q))); \
|
|
631 |
/* point all the oops to the new location */ \
|
|
632 |
size_t size = oop(q)->adjust_pointers(); \
|
|
633 |
size = adjust_obj_size(size); \
|
|
634 |
debug_only(MarkSweep::check_interior_pointers()); \
|
|
635 |
debug_only(MarkSweep::validate_live_oop(oop(q), size)); \
|
|
636 |
debug_only(prev_q = q); \
|
|
637 |
q += size; \
|
|
638 |
} else { \
|
|
639 |
/* q is not a live object, so its mark should point at the next \
|
|
640 |
* live object */ \
|
|
641 |
debug_only(prev_q = q); \
|
|
642 |
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
|
|
643 |
assert(q > prev_q, "we should be moving forward through memory"); \
|
|
644 |
} \
|
|
645 |
} \
|
|
646 |
\
|
|
647 |
assert(q == t, "just checking"); \
|
|
648 |
}
|
|
649 |
|
|
650 |
#define SCAN_AND_COMPACT(obj_size) { \
|
|
651 |
/* Copy all live objects to their new location \
|
|
652 |
* Used by MarkSweep::mark_sweep_phase4() */ \
|
|
653 |
\
|
|
654 |
HeapWord* q = bottom(); \
|
|
655 |
HeapWord* const t = _end_of_live; \
|
|
656 |
debug_only(HeapWord* prev_q = NULL); \
|
|
657 |
\
|
|
658 |
if (q < t && _first_dead > q && \
|
|
659 |
!oop(q)->is_gc_marked()) { \
|
|
660 |
debug_only( \
|
|
661 |
/* we have a chunk of the space which hasn't moved and we've reinitialized the \
|
|
662 |
* mark word during the previous pass, so we can't use is_gc_marked for the \
|
|
663 |
* traversal. */ \
|
|
664 |
HeapWord* const end = _first_dead; \
|
|
665 |
\
|
|
666 |
while (q < end) { \
|
|
667 |
size_t size = obj_size(q); \
|
|
668 |
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \
|
|
669 |
debug_only(MarkSweep::live_oop_moved_to(q, size, q)); \
|
|
670 |
debug_only(prev_q = q); \
|
|
671 |
q += size; \
|
|
672 |
} \
|
|
673 |
) /* debug_only */ \
|
|
674 |
\
|
|
675 |
if (_first_dead == t) { \
|
|
676 |
q = t; \
|
|
677 |
} else { \
|
|
678 |
/* $$$ Funky */ \
|
|
679 |
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
|
|
680 |
} \
|
|
681 |
} \
|
|
682 |
\
|
|
683 |
const intx scan_interval = PrefetchScanIntervalInBytes; \
|
|
684 |
const intx copy_interval = PrefetchCopyIntervalInBytes; \
|
|
685 |
while (q < t) { \
|
|
686 |
if (!oop(q)->is_gc_marked()) { \
|
|
687 |
/* mark is pointer to next marked oop */ \
|
|
688 |
debug_only(prev_q = q); \
|
|
689 |
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
|
|
690 |
assert(q > prev_q, "we should be moving forward through memory"); \
|
|
691 |
} else { \
|
|
692 |
/* prefetch beyond q */ \
|
|
693 |
Prefetch::read(q, scan_interval); \
|
|
694 |
\
|
|
695 |
/* size and destination */ \
|
|
696 |
size_t size = obj_size(q); \
|
|
697 |
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
|
|
698 |
\
|
|
699 |
/* prefetch beyond compaction_top */ \
|
|
700 |
Prefetch::write(compaction_top, copy_interval); \
|
|
701 |
\
|
|
702 |
/* copy object and reinit its mark */ \
|
|
703 |
debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); \
|
|
704 |
assert(q != compaction_top, "everything in this pass should be moving"); \
|
|
705 |
Copy::aligned_conjoint_words(q, compaction_top, size); \
|
|
706 |
oop(compaction_top)->init_mark(); \
|
|
707 |
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
|
|
708 |
\
|
|
709 |
debug_only(prev_q = q); \
|
|
710 |
q += size; \
|
|
711 |
} \
|
|
712 |
} \
|
|
713 |
\
|
|
714 |
/* Reset space after compaction is complete */ \
|
|
715 |
reset_after_compaction(); \
|
|
716 |
/* We do this clear, below, since it has overloaded meanings for some */ \
|
|
717 |
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
|
|
718 |
/* compacted into will have had their offset table thresholds updated */ \
|
|
719 |
/* continuously, but those that weren't need to have their thresholds */ \
|
|
720 |
/* re-initialized. Also mangles unused area for debugging. */ \
|
|
721 |
if (is_empty()) { \
|
|
722 |
clear(); \
|
|
723 |
} else { \
|
|
724 |
if (ZapUnusedHeapArea) mangle_unused_area(); \
|
|
725 |
} \
|
|
726 |
}
|
|
727 |
|
|
728 |
// A space in which the free area is contiguous. It therefore supports
|
|
729 |
// faster allocation, and compaction.
|
|
730 |
class ContiguousSpace: public CompactibleSpace {
|
|
731 |
friend class OneContigSpaceCardGeneration;
|
|
732 |
friend class VMStructs;
|
|
733 |
protected:
|
|
734 |
HeapWord* _top;
|
|
735 |
HeapWord* _concurrent_iteration_safe_limit;
|
|
736 |
|
|
737 |
// Allocation helpers (return NULL if full).
|
|
738 |
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
|
|
739 |
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
|
|
740 |
|
|
741 |
public:
|
|
742 |
virtual void initialize(MemRegion mr, bool clear_space);
|
|
743 |
|
|
744 |
// Accessors
|
|
745 |
HeapWord* top() const { return _top; }
|
|
746 |
void set_top(HeapWord* value) { _top = value; }
|
|
747 |
|
|
748 |
void set_saved_mark() { _saved_mark_word = top(); }
|
|
749 |
void reset_saved_mark() { _saved_mark_word = bottom(); }
|
|
750 |
|
|
751 |
virtual void clear();
|
|
752 |
|
|
753 |
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
|
|
754 |
WaterMark top_mark() { return WaterMark(this, top()); }
|
|
755 |
WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
|
|
756 |
bool saved_mark_at_top() const { return saved_mark_word() == top(); }
|
|
757 |
|
|
758 |
void mangle_unused_area();
|
|
759 |
void mangle_region(MemRegion mr);
|
|
760 |
|
|
761 |
// Size computations: sizes in bytes.
|
|
762 |
size_t capacity() const { return byte_size(bottom(), end()); }
|
|
763 |
size_t used() const { return byte_size(bottom(), top()); }
|
|
764 |
size_t free() const { return byte_size(top(), end()); }
|
|
765 |
|
|
766 |
// Override from space.
|
|
767 |
bool is_in(const void* p) const;
|
|
768 |
|
|
769 |
virtual bool is_free_block(const HeapWord* p) const;
|
|
770 |
|
|
771 |
// In a contiguous space we have a more obvious bound on what parts
|
|
772 |
// contain objects.
|
|
773 |
MemRegion used_region() const { return MemRegion(bottom(), top()); }
|
|
774 |
|
|
775 |
MemRegion used_region_at_save_marks() const {
|
|
776 |
return MemRegion(bottom(), saved_mark_word());
|
|
777 |
}
|
|
778 |
|
|
779 |
// Allocation (return NULL if full)
|
|
780 |
virtual HeapWord* allocate(size_t word_size);
|
|
781 |
virtual HeapWord* par_allocate(size_t word_size);
|
|
782 |
|
|
783 |
virtual bool obj_allocated_since_save_marks(const oop obj) const {
|
|
784 |
return (HeapWord*)obj >= saved_mark_word();
|
|
785 |
}
|
|
786 |
|
|
787 |
// Iteration
|
|
788 |
void oop_iterate(OopClosure* cl);
|
|
789 |
void oop_iterate(MemRegion mr, OopClosure* cl);
|
|
790 |
void object_iterate(ObjectClosure* blk);
|
|
791 |
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
|
792 |
// iterates on objects up to the safe limit
|
|
793 |
HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
|
|
794 |
inline HeapWord* concurrent_iteration_safe_limit();
|
|
795 |
// changes the safe limit, all objects from bottom() to the new
|
|
796 |
// limit should be properly initialized
|
|
797 |
inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit);
|
|
798 |
|
|
799 |
#ifndef SERIALGC
|
|
800 |
// In support of parallel oop_iterate.
|
|
801 |
#define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
|
802 |
void par_oop_iterate(MemRegion mr, OopClosureType* blk);
|
|
803 |
|
|
804 |
ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
|
|
805 |
#undef ContigSpace_PAR_OOP_ITERATE_DECL
|
|
806 |
#endif // SERIALGC
|
|
807 |
|
|
808 |
// Compaction support
|
|
809 |
virtual void reset_after_compaction() {
|
|
810 |
assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
|
|
811 |
set_top(compaction_top());
|
|
812 |
// set new iteration safe limit
|
|
813 |
set_concurrent_iteration_safe_limit(compaction_top());
|
|
814 |
}
|
|
815 |
virtual size_t minimum_free_block_size() const { return 0; }
|
|
816 |
|
|
817 |
// Override.
|
|
818 |
DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
|
|
819 |
CardTableModRefBS::PrecisionStyle precision,
|
|
820 |
HeapWord* boundary = NULL);
|
|
821 |
|
|
822 |
// Apply "blk->do_oop" to the addresses of all reference fields in objects
|
|
823 |
// starting with the _saved_mark_word, which was noted during a generation's
|
|
824 |
// save_marks and is required to denote the head of an object.
|
|
825 |
// Fields in objects allocated by applications of the closure
|
|
826 |
// *are* included in the iteration.
|
|
827 |
// Updates _saved_mark_word to point to just after the last object
|
|
828 |
// iterated over.
|
|
829 |
#define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
|
|
830 |
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
|
|
831 |
|
|
832 |
ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
|
|
833 |
#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
|
|
834 |
|
|
835 |
// Same as object_iterate, but starting from "mark", which is required
|
|
836 |
// to denote the start of an object. Objects allocated by
|
|
837 |
// applications of the closure *are* included in the iteration.
|
|
838 |
virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
|
|
839 |
|
|
840 |
// Very inefficient implementation.
|
|
841 |
virtual HeapWord* block_start(const void* p) const;
|
|
842 |
size_t block_size(const HeapWord* p) const;
|
|
843 |
// If a block is in the allocated area, it is an object.
|
|
844 |
bool block_is_obj(const HeapWord* p) const { return p < top(); }
|
|
845 |
|
|
846 |
// Addresses for inlined allocation
|
|
847 |
HeapWord** top_addr() { return &_top; }
|
|
848 |
HeapWord** end_addr() { return &_end; }
|
|
849 |
|
|
850 |
// Overrides for more efficient compaction support.
|
|
851 |
void prepare_for_compaction(CompactPoint* cp);
|
|
852 |
|
|
853 |
// PrintHeapAtGC support.
|
|
854 |
virtual void print_on(outputStream* st) const;
|
|
855 |
|
|
856 |
// Checked dynamic downcasts.
|
|
857 |
virtual ContiguousSpace* toContiguousSpace() {
|
|
858 |
return this;
|
|
859 |
}
|
|
860 |
|
|
861 |
// Debugging
|
|
862 |
virtual void verify(bool allow_dirty) const;
|
|
863 |
|
|
864 |
// Used to increase collection frequency. "factor" of 0 means entire
|
|
865 |
// space.
|
|
866 |
void allocate_temporary_filler(int factor);
|
|
867 |
|
|
868 |
};
|
|
869 |
|
|
870 |
|
|
871 |
// A dirty card to oop closure that does filtering.
|
|
872 |
// It knows how to filter out objects that are outside of the _boundary.
|
|
873 |
class Filtering_DCTOC : public DirtyCardToOopClosure {
|
|
874 |
protected:
|
|
875 |
// Override.
|
|
876 |
void walk_mem_region(MemRegion mr,
|
|
877 |
HeapWord* bottom, HeapWord* top);
|
|
878 |
|
|
879 |
// Walk the given memory region, from bottom to top, applying
|
|
880 |
// the given oop closure to (possibly) all objects found. The
|
|
881 |
// given oop closure may or may not be the same as the oop
|
|
882 |
// closure with which this closure was created, as it may
|
|
883 |
// be a filtering closure which makes use of the _boundary.
|
|
884 |
// We offer two signatures, so the FilteringClosure static type is
|
|
885 |
// apparent.
|
|
886 |
virtual void walk_mem_region_with_cl(MemRegion mr,
|
|
887 |
HeapWord* bottom, HeapWord* top,
|
|
888 |
OopClosure* cl) = 0;
|
|
889 |
virtual void walk_mem_region_with_cl(MemRegion mr,
|
|
890 |
HeapWord* bottom, HeapWord* top,
|
|
891 |
FilteringClosure* cl) = 0;
|
|
892 |
|
|
893 |
public:
|
|
894 |
Filtering_DCTOC(Space* sp, OopClosure* cl,
|
|
895 |
CardTableModRefBS::PrecisionStyle precision,
|
|
896 |
HeapWord* boundary) :
|
|
897 |
DirtyCardToOopClosure(sp, cl, precision, boundary) {}
|
|
898 |
};
|
|
899 |
|
|
900 |
// A dirty card to oop closure for contiguous spaces
|
|
901 |
// (ContiguousSpace and sub-classes).
|
|
902 |
// It is a FilteringClosure, as defined above, and it knows:
|
|
903 |
//
|
|
904 |
// 1. That the actual top of any area in a memory region
|
|
905 |
// contained by the space is bounded by the end of the contiguous
|
|
906 |
// region of the space.
|
|
907 |
// 2. That the space is really made up of objects and not just
|
|
908 |
// blocks.
|
|
909 |
|
|
910 |
class ContiguousSpaceDCTOC : public Filtering_DCTOC {
|
|
911 |
protected:
|
|
912 |
// Overrides.
|
|
913 |
HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
|
|
914 |
|
|
915 |
virtual void walk_mem_region_with_cl(MemRegion mr,
|
|
916 |
HeapWord* bottom, HeapWord* top,
|
|
917 |
OopClosure* cl);
|
|
918 |
virtual void walk_mem_region_with_cl(MemRegion mr,
|
|
919 |
HeapWord* bottom, HeapWord* top,
|
|
920 |
FilteringClosure* cl);
|
|
921 |
|
|
922 |
public:
|
|
923 |
ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl,
|
|
924 |
CardTableModRefBS::PrecisionStyle precision,
|
|
925 |
HeapWord* boundary) :
|
|
926 |
Filtering_DCTOC(sp, cl, precision, boundary)
|
|
927 |
{}
|
|
928 |
};
|
|
929 |
|
|
930 |
|
|
931 |
// Class EdenSpace describes eden-space in new generation.
|
|
932 |
|
|
933 |
class DefNewGeneration;
|
|
934 |
|
|
935 |
class EdenSpace : public ContiguousSpace {
|
|
936 |
friend class VMStructs;
|
|
937 |
private:
|
|
938 |
DefNewGeneration* _gen;
|
|
939 |
|
|
940 |
// _soft_end is used as a soft limit on allocation. As soft limits are
|
|
941 |
// reached, the slow-path allocation code can invoke other actions and then
|
|
942 |
// adjust _soft_end up to a new soft limit or to end().
|
|
943 |
HeapWord* _soft_end;
|
|
944 |
|
|
945 |
public:
|
|
946 |
EdenSpace(DefNewGeneration* gen) : _gen(gen) { _soft_end = NULL; }
|
|
947 |
|
|
948 |
// Get/set just the 'soft' limit.
|
|
949 |
HeapWord* soft_end() { return _soft_end; }
|
|
950 |
HeapWord** soft_end_addr() { return &_soft_end; }
|
|
951 |
void set_soft_end(HeapWord* value) { _soft_end = value; }
|
|
952 |
|
|
953 |
// Override.
|
|
954 |
void clear();
|
|
955 |
|
|
956 |
// Set both the 'hard' and 'soft' limits (_end and _soft_end).
|
|
957 |
void set_end(HeapWord* value) {
|
|
958 |
set_soft_end(value);
|
|
959 |
ContiguousSpace::set_end(value);
|
|
960 |
}
|
|
961 |
|
|
962 |
// Allocation (return NULL if full)
|
|
963 |
HeapWord* allocate(size_t word_size);
|
|
964 |
HeapWord* par_allocate(size_t word_size);
|
|
965 |
};
|
|
966 |
|
|
967 |
// Class ConcEdenSpace extends EdenSpace for the sake of safe
|
|
968 |
// allocation while soft-end is being modified concurrently
|
|
969 |
|
|
970 |
class ConcEdenSpace : public EdenSpace {
|
|
971 |
public:
|
|
972 |
ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
|
|
973 |
|
|
974 |
// Allocation (return NULL if full)
|
|
975 |
HeapWord* par_allocate(size_t word_size);
|
|
976 |
};
|
|
977 |
|
|
978 |
|
|
979 |
// A ContigSpace that Supports an efficient "block_start" operation via
|
|
980 |
// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
|
|
981 |
// other spaces.) This is the abstract base class for old generation
|
|
982 |
// (tenured, perm) spaces.
|
|
983 |
|
|
984 |
class OffsetTableContigSpace: public ContiguousSpace {
|
|
985 |
friend class VMStructs;
|
|
986 |
protected:
|
|
987 |
BlockOffsetArrayContigSpace _offsets;
|
|
988 |
Mutex _par_alloc_lock;
|
|
989 |
|
|
990 |
public:
|
|
991 |
// Constructor
|
|
992 |
OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
|
|
993 |
MemRegion mr);
|
|
994 |
|
|
995 |
void set_bottom(HeapWord* value);
|
|
996 |
void set_end(HeapWord* value);
|
|
997 |
|
|
998 |
void clear();
|
|
999 |
|
|
1000 |
inline HeapWord* block_start(const void* p) const;
|
|
1001 |
|
|
1002 |
// Add offset table update.
|
|
1003 |
virtual inline HeapWord* allocate(size_t word_size);
|
|
1004 |
inline HeapWord* par_allocate(size_t word_size);
|
|
1005 |
|
|
1006 |
// MarkSweep support phase3
|
|
1007 |
virtual HeapWord* initialize_threshold();
|
|
1008 |
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
|
1009 |
|
|
1010 |
virtual void print_on(outputStream* st) const;
|
|
1011 |
|
|
1012 |
// Debugging
|
|
1013 |
void verify(bool allow_dirty) const;
|
|
1014 |
|
|
1015 |
// Shared space support
|
|
1016 |
void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
|
|
1017 |
};
|
|
1018 |
|
|
1019 |
|
|
1020 |
// Class TenuredSpace is used by TenuredGeneration
|
|
1021 |
|
|
1022 |
class TenuredSpace: public OffsetTableContigSpace {
|
|
1023 |
friend class VMStructs;
|
|
1024 |
protected:
|
|
1025 |
// Mark sweep support
|
|
1026 |
int allowed_dead_ratio() const;
|
|
1027 |
public:
|
|
1028 |
// Constructor
|
|
1029 |
TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
|
|
1030 |
MemRegion mr) :
|
|
1031 |
OffsetTableContigSpace(sharedOffsetArray, mr) {}
|
|
1032 |
};
|
|
1033 |
|
|
1034 |
|
|
1035 |
// Class ContigPermSpace is used by CompactingPermGen
|
|
1036 |
|
|
1037 |
class ContigPermSpace: public OffsetTableContigSpace {
|
|
1038 |
friend class VMStructs;
|
|
1039 |
protected:
|
|
1040 |
// Mark sweep support
|
|
1041 |
int allowed_dead_ratio() const;
|
|
1042 |
public:
|
|
1043 |
// Constructor
|
|
1044 |
ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
|
|
1045 |
OffsetTableContigSpace(sharedOffsetArray, mr) {}
|
|
1046 |
};
|