author | trims |
Thu, 27 May 2010 19:08:38 -0700 | |
changeset 5547 | f4b087cbb361 |
parent 5350 | cccf0925702e |
child 6068 | 80ef41e75a2d |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5350
diff
changeset
|
2 |
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5350
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5350
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5350
diff
changeset
|
21 |
* questions. |
1374 | 22 |
* |
23 |
*/ |
|
24 |
||
25 |
#ifndef SERIALGC |
|
26 |
||
27 |
// A HeapRegion is the smallest piece of a G1CollectedHeap that |
|
28 |
// can be collected independently. |
|
29 |
||
30 |
// NOTE: Although a HeapRegion is a Space, its |
|
31 |
// Space::initDirtyCardClosure method must not be called. |
|
32 |
// The problem is that the existence of this method breaks |
|
33 |
// the independence of barrier sets from remembered sets. |
|
34 |
// The solution is to remove this method from the definition |
|
35 |
// of a Space. |
|
36 |
||
37 |
class CompactibleSpace; |
|
38 |
class ContiguousSpace; |
|
39 |
class HeapRegionRemSet; |
|
40 |
class HeapRegionRemSetIterator; |
|
41 |
class HeapRegion; |
|
42 |
||
43 |
// A dirty card to oop closure for heap regions. It |
|
44 |
// knows how to get the G1 heap and how to use the bitmap |
|
45 |
// in the concurrent marker used by G1 to filter remembered |
|
46 |
// sets. |
|
47 |
||
48 |
class HeapRegionDCTOC : public ContiguousSpaceDCTOC { |
|
49 |
public: |
|
50 |
// Specification of possible DirtyCardToOopClosure filtering. |
|
51 |
enum FilterKind { |
|
52 |
NoFilterKind, |
|
53 |
IntoCSFilterKind, |
|
54 |
OutOfRegionFilterKind |
|
55 |
}; |
|
56 |
||
57 |
protected: |
|
58 |
HeapRegion* _hr; |
|
59 |
FilterKind _fk; |
|
60 |
G1CollectedHeap* _g1; |
|
61 |
||
62 |
void walk_mem_region_with_cl(MemRegion mr, |
|
63 |
HeapWord* bottom, HeapWord* top, |
|
64 |
OopClosure* cl); |
|
65 |
||
66 |
// We don't specialize this for FilteringClosure; filtering is handled by |
|
67 |
// the "FilterKind" mechanism. But we provide this to avoid a compiler |
|
68 |
// warning. |
|
69 |
void walk_mem_region_with_cl(MemRegion mr, |
|
70 |
HeapWord* bottom, HeapWord* top, |
|
71 |
FilteringClosure* cl) { |
|
72 |
HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top, |
|
73 |
(OopClosure*)cl); |
|
74 |
} |
|
75 |
||
76 |
// Get the actual top of the area on which the closure will |
|
77 |
// operate, given where the top is assumed to be (the end of the |
|
78 |
// memory region passed to do_MemRegion) and where the object |
|
79 |
// at the top is assumed to start. For example, an object may |
|
80 |
// start at the top but actually extend past the assumed top, |
|
81 |
// in which case the top becomes the end of the object. |
|
82 |
HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) { |
|
83 |
return ContiguousSpaceDCTOC::get_actual_top(top, top_obj); |
|
84 |
} |
|
85 |
||
86 |
// Walk the given memory region from bottom to (actual) top |
|
87 |
// looking for objects and applying the oop closure (_cl) to |
|
88 |
// them. The base implementation of this treats the area as |
|
89 |
// blocks, where a block may or may not be an object. Sub- |
|
90 |
// classes should override this to provide more accurate |
|
91 |
// or possibly more efficient walking. |
|
92 |
void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) { |
|
93 |
Filtering_DCTOC::walk_mem_region(mr, bottom, top); |
|
94 |
} |
|
95 |
||
96 |
public: |
|
97 |
HeapRegionDCTOC(G1CollectedHeap* g1, |
|
98 |
HeapRegion* hr, OopClosure* cl, |
|
99 |
CardTableModRefBS::PrecisionStyle precision, |
|
100 |
FilterKind fk); |
|
101 |
}; |
|
102 |
||
103 |
||
104 |
// The complicating factor is that BlockOffsetTable diverged |
|
105 |
// significantly, and we need functionality that is only in the G1 version. |
|
106 |
// So I copied that code, which led to an alternate G1 version of |
|
107 |
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could |
|
108 |
// be reconciled, then G1OffsetTableContigSpace could go away. |
|
109 |
||
110 |
// The idea behind time stamps is the following. Doing a save_marks on |
|
111 |
// all regions at every GC pause is time consuming (if I remember |
|
112 |
// well, 10ms or so). So, we would like to do that only for regions |
|
113 |
// that are GC alloc regions. To achieve this, we use time |
|
114 |
// stamps. For every evacuation pause, G1CollectedHeap generates a |
|
115 |
// unique time stamp (essentially a counter that gets |
|
116 |
// incremented). Every time we want to call save_marks on a region, |
|
117 |
// we set the saved_mark_word to top and also copy the current GC |
|
118 |
// time stamp to the time stamp field of the space. Reading the |
|
119 |
// saved_mark_word involves checking the time stamp of the |
|
120 |
// region. If it is the same as the current GC time stamp, then we |
|
121 |
// can safely read the saved_mark_word field, as it is valid. If the |
|
122 |
// time stamp of the region is not the same as the current GC time |
|
123 |
// stamp, then we instead read top, as the saved_mark_word field is |
|
124 |
// invalid. Time stamps (on the regions and also on the |
|
125 |
// G1CollectedHeap) are reset at every cleanup (we iterate over |
|
126 |
// the regions anyway) and at the end of a Full GC. The current scheme |
|
127 |
// that uses sequential unsigned ints will fail only if we have 4b |
|
128 |
// evacuation pauses between two cleanups, which is _highly_ unlikely. |
|
129 |
||
130 |
class G1OffsetTableContigSpace: public ContiguousSpace { |
|
131 |
friend class VMStructs; |
|
132 |
protected: |
|
133 |
G1BlockOffsetArrayContigSpace _offsets; |
|
134 |
Mutex _par_alloc_lock; |
|
135 |
volatile unsigned _gc_time_stamp; |
|
136 |
||
137 |
public: |
|
138 |
// Constructor. If "is_zeroed" is true, the MemRegion "mr" may be |
|
139 |
// assumed to contain zeros. |
|
140 |
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
141 |
MemRegion mr, bool is_zeroed = false); |
|
142 |
||
143 |
void set_bottom(HeapWord* value); |
|
144 |
void set_end(HeapWord* value); |
|
145 |
||
146 |
virtual HeapWord* saved_mark_word() const; |
|
147 |
virtual void set_saved_mark(); |
|
148 |
void reset_gc_time_stamp() { _gc_time_stamp = 0; } |
|
149 |
||
1388 | 150 |
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
151 |
virtual void clear(bool mangle_space); |
|
1374 | 152 |
|
153 |
HeapWord* block_start(const void* p); |
|
154 |
HeapWord* block_start_const(const void* p) const; |
|
155 |
||
156 |
// Add offset table update. |
|
157 |
virtual HeapWord* allocate(size_t word_size); |
|
158 |
HeapWord* par_allocate(size_t word_size); |
|
159 |
||
160 |
// MarkSweep support phase3 |
|
161 |
virtual HeapWord* initialize_threshold(); |
|
162 |
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
|
163 |
||
164 |
virtual void print() const; |
|
165 |
}; |
|
166 |
||
167 |
class HeapRegion: public G1OffsetTableContigSpace { |
|
168 |
friend class VMStructs; |
|
169 |
private: |
|
170 |
||
1387 | 171 |
enum HumongousType { |
172 |
NotHumongous = 0, |
|
173 |
StartsHumongous, |
|
174 |
ContinuesHumongous |
|
175 |
}; |
|
176 |
||
1374 | 177 |
// The next filter kind that should be used for a "new_dcto_cl" call with |
178 |
// the "traditional" signature. |
|
179 |
HeapRegionDCTOC::FilterKind _next_fk; |
|
180 |
||
181 |
// Requires that the region "mr" be dense with objects, and begin and end |
|
182 |
// with an object. |
|
183 |
void oops_in_mr_iterate(MemRegion mr, OopClosure* cl); |
|
184 |
||
185 |
// The remembered set for this region. |
|
186 |
// (Might want to make this "inline" later, to avoid some alloc failure |
|
187 |
// issues.) |
|
188 |
HeapRegionRemSet* _rem_set; |
|
189 |
||
190 |
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } |
|
191 |
||
192 |
protected: |
|
193 |
// If this region is a member of a HeapRegionSeq, the index in that |
|
194 |
// sequence, otherwise -1. |
|
195 |
int _hrs_index; |
|
196 |
||
1387 | 197 |
HumongousType _humongous_type; |
1374 | 198 |
// For a humongous region, region in which it starts. |
199 |
HeapRegion* _humongous_start_region; |
|
200 |
// For the start region of a humongous sequence, it's original end(). |
|
201 |
HeapWord* _orig_end; |
|
202 |
||
203 |
// True iff the region is in current collection_set. |
|
204 |
bool _in_collection_set; |
|
205 |
||
206 |
// True iff the region is on the unclean list, waiting to be zero filled. |
|
207 |
bool _is_on_unclean_list; |
|
208 |
||
209 |
// True iff the region is on the free list, ready for allocation. |
|
210 |
bool _is_on_free_list; |
|
211 |
||
212 |
// Is this or has it been an allocation region in the current collection |
|
213 |
// pause. |
|
214 |
bool _is_gc_alloc_region; |
|
215 |
||
216 |
// True iff an attempt to evacuate an object in the region failed. |
|
217 |
bool _evacuation_failed; |
|
218 |
||
219 |
// A heap region may be a member one of a number of special subsets, each |
|
220 |
// represented as linked lists through the field below. Currently, these |
|
221 |
// sets include: |
|
222 |
// The collection set. |
|
223 |
// The set of allocation regions used in a collection pause. |
|
224 |
// Spaces that may contain gray objects. |
|
225 |
HeapRegion* _next_in_special_set; |
|
226 |
||
227 |
// next region in the young "generation" region set |
|
228 |
HeapRegion* _next_young_region; |
|
229 |
||
2883
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
230 |
// Next region whose cards need cleaning |
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
231 |
HeapRegion* _next_dirty_cards_region; |
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
232 |
|
1374 | 233 |
// For parallel heapRegion traversal. |
234 |
jint _claimed; |
|
235 |
||
236 |
// We use concurrent marking to determine the amount of live data |
|
237 |
// in each heap region. |
|
238 |
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. |
|
239 |
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. |
|
240 |
||
241 |
// See "sort_index" method. -1 means is not in the array. |
|
242 |
int _sort_index; |
|
243 |
||
244 |
// <PREDICTION> |
|
245 |
double _gc_efficiency; |
|
246 |
// </PREDICTION> |
|
247 |
||
248 |
enum YoungType { |
|
249 |
NotYoung, // a region is not young |
|
250 |
Young, // a region is young |
|
251 |
Survivor // a region is young and it contains |
|
252 |
// survivor |
|
253 |
}; |
|
254 |
||
255 |
YoungType _young_type; |
|
256 |
int _young_index_in_cset; |
|
257 |
SurvRateGroup* _surv_rate_group; |
|
258 |
int _age_index; |
|
259 |
||
260 |
// The start of the unmarked area. The unmarked area extends from this |
|
261 |
// word until the top and/or end of the region, and is the part |
|
262 |
// of the region for which no marking was done, i.e. objects may |
|
263 |
// have been allocated in this part since the last mark phase. |
|
264 |
// "prev" is the top at the start of the last completed marking. |
|
265 |
// "next" is the top at the start of the in-progress marking (if any.) |
|
266 |
HeapWord* _prev_top_at_mark_start; |
|
267 |
HeapWord* _next_top_at_mark_start; |
|
268 |
// If a collection pause is in progress, this is the top at the start |
|
269 |
// of that pause. |
|
270 |
||
271 |
// We've counted the marked bytes of objects below here. |
|
272 |
HeapWord* _top_at_conc_mark_count; |
|
273 |
||
274 |
void init_top_at_mark_start() { |
|
275 |
assert(_prev_marked_bytes == 0 && |
|
276 |
_next_marked_bytes == 0, |
|
277 |
"Must be called after zero_marked_bytes."); |
|
278 |
HeapWord* bot = bottom(); |
|
279 |
_prev_top_at_mark_start = bot; |
|
280 |
_next_top_at_mark_start = bot; |
|
281 |
_top_at_conc_mark_count = bot; |
|
282 |
} |
|
283 |
||
284 |
jint _zfs; // A member of ZeroFillState. Protected by ZF_lock. |
|
285 |
Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last) |
|
286 |
// made it so. |
|
287 |
||
288 |
void set_young_type(YoungType new_type) { |
|
289 |
//assert(_young_type != new_type, "setting the same type" ); |
|
290 |
// TODO: add more assertions here |
|
291 |
_young_type = new_type; |
|
292 |
} |
|
293 |
||
5350
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
294 |
// Cached attributes used in the collection set policy information |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
295 |
|
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
296 |
// The RSet length that was added to the total value |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
297 |
// for the collection set. |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
298 |
size_t _recorded_rs_length; |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
299 |
|
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
300 |
// The predicted elapsed time that was added to total value |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
301 |
// for the collection set. |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
302 |
double _predicted_elapsed_time_ms; |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
303 |
|
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
304 |
// The predicted number of bytes to copy that was added to |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
305 |
// the total value for the collection set. |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
306 |
size_t _predicted_bytes_to_copy; |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
307 |
|
1374 | 308 |
public: |
309 |
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. |
|
310 |
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
311 |
MemRegion mr, bool is_zeroed); |
|
312 |
||
3697
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
313 |
static int LogOfHRGrainBytes; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
314 |
static int LogOfHRGrainWords; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
315 |
// The normal type of these should be size_t. However, they used to |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
316 |
// be members of an enum before and they are assumed by the |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
317 |
// compilers to be ints. To avoid going and fixing all their uses, |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
318 |
// I'm declaring them as ints. I'm not anticipating heap region |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
319 |
// sizes to reach anywhere near 2g, so using an int here is safe. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
320 |
static int GrainBytes; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
321 |
static int GrainWords; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
322 |
static int CardsPerRegion; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
323 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
324 |
// It sets up the heap region size (GrainBytes / GrainWords), as |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
325 |
// well as other related fields that are based on the heap region |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
326 |
// size (LogOfHRGrainBytes / LogOfHRGrainWords / |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
327 |
// CardsPerRegion). All those fields are considered constant |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
328 |
// throughout the JVM's execution, therefore they should only be set |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
329 |
// up once during initialization time. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3000
diff
changeset
|
330 |
static void setup_heap_region_size(uintx min_heap_size); |
1374 | 331 |
|
1387 | 332 |
enum ClaimValues { |
333 |
InitialClaimValue = 0, |
|
334 |
FinalCountClaimValue = 1, |
|
335 |
NoteEndClaimValue = 2, |
|
1422 | 336 |
ScrubRemSetClaimValue = 3, |
2152
99356e7f31b1
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
2013
diff
changeset
|
337 |
ParVerifyClaimValue = 4, |
99356e7f31b1
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
2013
diff
changeset
|
338 |
RebuildRSClaimValue = 5 |
1387 | 339 |
}; |
340 |
||
1374 | 341 |
// Concurrent refinement requires contiguous heap regions (in which TLABs |
342 |
// might be allocated) to be zero-filled. Each region therefore has a |
|
343 |
// zero-fill-state. |
|
344 |
enum ZeroFillState { |
|
345 |
NotZeroFilled, |
|
346 |
ZeroFilling, |
|
347 |
ZeroFilled, |
|
348 |
Allocated |
|
349 |
}; |
|
350 |
||
351 |
// If this region is a member of a HeapRegionSeq, the index in that |
|
352 |
// sequence, otherwise -1. |
|
353 |
int hrs_index() const { return _hrs_index; } |
|
354 |
void set_hrs_index(int index) { _hrs_index = index; } |
|
355 |
||
356 |
// The number of bytes marked live in the region in the last marking phase. |
|
357 |
size_t marked_bytes() { return _prev_marked_bytes; } |
|
358 |
// The number of bytes counted in the next marking. |
|
359 |
size_t next_marked_bytes() { return _next_marked_bytes; } |
|
360 |
// The number of bytes live wrt the next marking. |
|
361 |
size_t next_live_bytes() { |
|
362 |
return (top() - next_top_at_mark_start()) |
|
363 |
* HeapWordSize |
|
364 |
+ next_marked_bytes(); |
|
365 |
} |
|
366 |
||
367 |
// A lower bound on the amount of garbage bytes in the region. |
|
368 |
size_t garbage_bytes() { |
|
369 |
size_t used_at_mark_start_bytes = |
|
370 |
(prev_top_at_mark_start() - bottom()) * HeapWordSize; |
|
371 |
assert(used_at_mark_start_bytes >= marked_bytes(), |
|
372 |
"Can't mark more than we have."); |
|
373 |
return used_at_mark_start_bytes - marked_bytes(); |
|
374 |
} |
|
375 |
||
376 |
// An upper bound on the number of live bytes in the region. |
|
377 |
size_t max_live_bytes() { return used() - garbage_bytes(); } |
|
378 |
||
379 |
void add_to_marked_bytes(size_t incr_bytes) { |
|
380 |
_next_marked_bytes = _next_marked_bytes + incr_bytes; |
|
381 |
guarantee( _next_marked_bytes <= used(), "invariant" ); |
|
382 |
} |
|
383 |
||
384 |
void zero_marked_bytes() { |
|
385 |
_prev_marked_bytes = _next_marked_bytes = 0; |
|
386 |
} |
|
387 |
||
1387 | 388 |
bool isHumongous() const { return _humongous_type != NotHumongous; } |
389 |
bool startsHumongous() const { return _humongous_type == StartsHumongous; } |
|
390 |
bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; } |
|
1374 | 391 |
// For a humongous region, region in which it starts. |
392 |
HeapRegion* humongous_start_region() const { |
|
393 |
return _humongous_start_region; |
|
394 |
} |
|
395 |
||
396 |
// Causes the current region to represent a humongous object spanning "n" |
|
397 |
// regions. |
|
398 |
virtual void set_startsHumongous(); |
|
399 |
||
400 |
// The regions that continue a humongous sequence should be added using |
|
401 |
// this method, in increasing address order. |
|
402 |
void set_continuesHumongous(HeapRegion* start); |
|
403 |
||
404 |
void add_continuingHumongousRegion(HeapRegion* cont); |
|
405 |
||
406 |
// If the region has a remembered set, return a pointer to it. |
|
407 |
HeapRegionRemSet* rem_set() const { |
|
408 |
return _rem_set; |
|
409 |
} |
|
410 |
||
411 |
// True iff the region is in current collection_set. |
|
412 |
bool in_collection_set() const { |
|
413 |
return _in_collection_set; |
|
414 |
} |
|
415 |
void set_in_collection_set(bool b) { |
|
416 |
_in_collection_set = b; |
|
417 |
} |
|
418 |
HeapRegion* next_in_collection_set() { |
|
419 |
assert(in_collection_set(), "should only invoke on member of CS."); |
|
420 |
assert(_next_in_special_set == NULL || |
|
421 |
_next_in_special_set->in_collection_set(), |
|
422 |
"Malformed CS."); |
|
423 |
return _next_in_special_set; |
|
424 |
} |
|
425 |
void set_next_in_collection_set(HeapRegion* r) { |
|
426 |
assert(in_collection_set(), "should only invoke on member of CS."); |
|
427 |
assert(r == NULL || r->in_collection_set(), "Malformed CS."); |
|
428 |
_next_in_special_set = r; |
|
429 |
} |
|
430 |
||
431 |
// True iff it is or has been an allocation region in the current |
|
432 |
// collection pause. |
|
433 |
bool is_gc_alloc_region() const { |
|
434 |
return _is_gc_alloc_region; |
|
435 |
} |
|
436 |
void set_is_gc_alloc_region(bool b) { |
|
437 |
_is_gc_alloc_region = b; |
|
438 |
} |
|
439 |
HeapRegion* next_gc_alloc_region() { |
|
440 |
assert(is_gc_alloc_region(), "should only invoke on member of CS."); |
|
441 |
assert(_next_in_special_set == NULL || |
|
442 |
_next_in_special_set->is_gc_alloc_region(), |
|
443 |
"Malformed CS."); |
|
444 |
return _next_in_special_set; |
|
445 |
} |
|
446 |
void set_next_gc_alloc_region(HeapRegion* r) { |
|
447 |
assert(is_gc_alloc_region(), "should only invoke on member of CS."); |
|
448 |
assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS."); |
|
449 |
_next_in_special_set = r; |
|
450 |
} |
|
451 |
||
452 |
bool is_on_free_list() { |
|
453 |
return _is_on_free_list; |
|
454 |
} |
|
455 |
||
456 |
void set_on_free_list(bool b) { |
|
457 |
_is_on_free_list = b; |
|
458 |
} |
|
459 |
||
460 |
HeapRegion* next_from_free_list() { |
|
461 |
assert(is_on_free_list(), |
|
462 |
"Should only invoke on free space."); |
|
463 |
assert(_next_in_special_set == NULL || |
|
464 |
_next_in_special_set->is_on_free_list(), |
|
465 |
"Malformed Free List."); |
|
466 |
return _next_in_special_set; |
|
467 |
} |
|
468 |
||
469 |
void set_next_on_free_list(HeapRegion* r) { |
|
470 |
assert(r == NULL || r->is_on_free_list(), "Malformed free list."); |
|
471 |
_next_in_special_set = r; |
|
472 |
} |
|
473 |
||
474 |
bool is_on_unclean_list() { |
|
475 |
return _is_on_unclean_list; |
|
476 |
} |
|
477 |
||
478 |
void set_on_unclean_list(bool b); |
|
479 |
||
480 |
HeapRegion* next_from_unclean_list() { |
|
481 |
assert(is_on_unclean_list(), |
|
482 |
"Should only invoke on unclean space."); |
|
483 |
assert(_next_in_special_set == NULL || |
|
484 |
_next_in_special_set->is_on_unclean_list(), |
|
485 |
"Malformed unclean List."); |
|
486 |
return _next_in_special_set; |
|
487 |
} |
|
488 |
||
489 |
void set_next_on_unclean_list(HeapRegion* r); |
|
490 |
||
491 |
HeapRegion* get_next_young_region() { return _next_young_region; } |
|
492 |
void set_next_young_region(HeapRegion* hr) { |
|
493 |
_next_young_region = hr; |
|
494 |
} |
|
495 |
||
2883
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
496 |
HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; } |
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
497 |
HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; } |
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
498 |
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } |
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
499 |
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } |
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2344
diff
changeset
|
500 |
|
1374 | 501 |
// Allows logical separation between objects allocated before and after. |
502 |
void save_marks(); |
|
503 |
||
504 |
// Reset HR stuff to default values. |
|
505 |
void hr_clear(bool par, bool clear_space); |
|
506 |
||
1388 | 507 |
void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
1374 | 508 |
|
509 |
// Ensure that "this" is zero-filled. |
|
510 |
void ensure_zero_filled(); |
|
511 |
// This one requires that the calling thread holds ZF_mon. |
|
512 |
void ensure_zero_filled_locked(); |
|
513 |
||
514 |
// Get the start of the unmarked area in this region. |
|
515 |
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } |
|
516 |
HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } |
|
517 |
||
518 |
// Apply "cl->do_oop" to (the addresses of) all reference fields in objects |
|
519 |
// allocated in the current region before the last call to "save_mark". |
|
520 |
void oop_before_save_marks_iterate(OopClosure* cl); |
|
521 |
||
522 |
// This call determines the "filter kind" argument that will be used for |
|
523 |
// the next call to "new_dcto_cl" on this region with the "traditional" |
|
524 |
// signature (i.e., the call below.) The default, in the absence of a |
|
525 |
// preceding call to this method, is "NoFilterKind", and a call to this |
|
526 |
// method is necessary for each such call, or else it reverts to the |
|
527 |
// default. |
|
528 |
// (This is really ugly, but all other methods I could think of changed a |
|
529 |
// lot of main-line code for G1.) |
|
530 |
void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) { |
|
531 |
_next_fk = nfk; |
|
532 |
} |
|
533 |
||
534 |
DirtyCardToOopClosure* |
|
535 |
new_dcto_closure(OopClosure* cl, |
|
536 |
CardTableModRefBS::PrecisionStyle precision, |
|
537 |
HeapRegionDCTOC::FilterKind fk); |
|
538 |
||
539 |
#if WHASSUP |
|
540 |
DirtyCardToOopClosure* |
|
541 |
new_dcto_closure(OopClosure* cl, |
|
542 |
CardTableModRefBS::PrecisionStyle precision, |
|
543 |
HeapWord* boundary) { |
|
544 |
assert(boundary == NULL, "This arg doesn't make sense here."); |
|
545 |
DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk); |
|
546 |
_next_fk = HeapRegionDCTOC::NoFilterKind; |
|
547 |
return res; |
|
548 |
} |
|
549 |
#endif |
|
550 |
||
551 |
// |
|
552 |
// Note the start or end of marking. This tells the heap region |
|
553 |
// that the collector is about to start or has finished (concurrently) |
|
554 |
// marking the heap. |
|
555 |
// |
|
556 |
||
557 |
// Note the start of a marking phase. Record the |
|
558 |
// start of the unmarked area of the region here. |
|
559 |
void note_start_of_marking(bool during_initial_mark) { |
|
560 |
init_top_at_conc_mark_count(); |
|
561 |
_next_marked_bytes = 0; |
|
562 |
if (during_initial_mark && is_young() && !is_survivor()) |
|
563 |
_next_top_at_mark_start = bottom(); |
|
564 |
else |
|
565 |
_next_top_at_mark_start = top(); |
|
566 |
} |
|
567 |
||
568 |
// Note the end of a marking phase. Install the start of |
|
569 |
// the unmarked area that was captured at start of marking. |
|
570 |
void note_end_of_marking() { |
|
571 |
_prev_top_at_mark_start = _next_top_at_mark_start; |
|
572 |
_prev_marked_bytes = _next_marked_bytes; |
|
573 |
_next_marked_bytes = 0; |
|
574 |
||
575 |
guarantee(_prev_marked_bytes <= |
|
576 |
(size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize, |
|
577 |
"invariant"); |
|
578 |
} |
|
579 |
||
580 |
// After an evacuation, we need to update _next_top_at_mark_start |
|
581 |
// to be the current top. Note this is only valid if we have only |
|
582 |
// ever evacuated into this region. If we evacuate, allocate, and |
|
583 |
// then evacuate we are in deep doodoo. |
|
584 |
void note_end_of_copying() { |
|
4024
b90cfcea7031
6847956: G1: crash in oopDesc*G1ParCopyHelper::copy_to_survivor_space(oopDesc*)
tonyp
parents:
4023
diff
changeset
|
585 |
assert(top() >= _next_top_at_mark_start, "Increase only"); |
b90cfcea7031
6847956: G1: crash in oopDesc*G1ParCopyHelper::copy_to_survivor_space(oopDesc*)
tonyp
parents:
4023
diff
changeset
|
586 |
_next_top_at_mark_start = top(); |
1374 | 587 |
} |
588 |
||
589 |
// Returns "false" iff no object in the region was allocated when the |
|
590 |
// last mark phase ended. |
|
591 |
bool is_marked() { return _prev_top_at_mark_start != bottom(); } |
|
592 |
||
593 |
// If "is_marked()" is true, then this is the index of the region in |
|
594 |
// an array constructed at the end of marking of the regions in a |
|
595 |
// "desirability" order. |
|
596 |
int sort_index() { |
|
597 |
return _sort_index; |
|
598 |
} |
|
599 |
void set_sort_index(int i) { |
|
600 |
_sort_index = i; |
|
601 |
} |
|
602 |
||
603 |
void init_top_at_conc_mark_count() { |
|
604 |
_top_at_conc_mark_count = bottom(); |
|
605 |
} |
|
606 |
||
607 |
void set_top_at_conc_mark_count(HeapWord *cur) { |
|
608 |
assert(bottom() <= cur && cur <= end(), "Sanity."); |
|
609 |
_top_at_conc_mark_count = cur; |
|
610 |
} |
|
611 |
||
612 |
HeapWord* top_at_conc_mark_count() { |
|
613 |
return _top_at_conc_mark_count; |
|
614 |
} |
|
615 |
||
616 |
void reset_during_compaction() { |
|
617 |
guarantee( isHumongous() && startsHumongous(), |
|
618 |
"should only be called for humongous regions"); |
|
619 |
||
620 |
zero_marked_bytes(); |
|
621 |
init_top_at_mark_start(); |
|
622 |
} |
|
623 |
||
624 |
// <PREDICTION> |
|
625 |
void calc_gc_efficiency(void); |
|
626 |
double gc_efficiency() { return _gc_efficiency;} |
|
627 |
// </PREDICTION> |
|
628 |
||
629 |
bool is_young() const { return _young_type != NotYoung; } |
|
630 |
bool is_survivor() const { return _young_type == Survivor; } |
|
631 |
||
632 |
int young_index_in_cset() const { return _young_index_in_cset; } |
|
633 |
void set_young_index_in_cset(int index) { |
|
634 |
assert( (index == -1) || is_young(), "pre-condition" ); |
|
635 |
_young_index_in_cset = index; |
|
636 |
} |
|
637 |
||
638 |
int age_in_surv_rate_group() { |
|
639 |
assert( _surv_rate_group != NULL, "pre-condition" ); |
|
640 |
assert( _age_index > -1, "pre-condition" ); |
|
641 |
return _surv_rate_group->age_in_group(_age_index); |
|
642 |
} |
|
643 |
||
644 |
void record_surv_words_in_group(size_t words_survived) { |
|
645 |
assert( _surv_rate_group != NULL, "pre-condition" ); |
|
646 |
assert( _age_index > -1, "pre-condition" ); |
|
647 |
int age_in_group = age_in_surv_rate_group(); |
|
648 |
_surv_rate_group->record_surviving_words(age_in_group, words_survived); |
|
649 |
} |
|
650 |
||
651 |
int age_in_surv_rate_group_cond() { |
|
652 |
if (_surv_rate_group != NULL) |
|
653 |
return age_in_surv_rate_group(); |
|
654 |
else |
|
655 |
return -1; |
|
656 |
} |
|
657 |
||
658 |
SurvRateGroup* surv_rate_group() { |
|
659 |
return _surv_rate_group; |
|
660 |
} |
|
661 |
||
662 |
void install_surv_rate_group(SurvRateGroup* surv_rate_group) { |
|
663 |
assert( surv_rate_group != NULL, "pre-condition" ); |
|
664 |
assert( _surv_rate_group == NULL, "pre-condition" ); |
|
665 |
assert( is_young(), "pre-condition" ); |
|
666 |
||
667 |
_surv_rate_group = surv_rate_group; |
|
668 |
_age_index = surv_rate_group->next_age_index(); |
|
669 |
} |
|
670 |
||
671 |
void uninstall_surv_rate_group() { |
|
672 |
if (_surv_rate_group != NULL) { |
|
673 |
assert( _age_index > -1, "pre-condition" ); |
|
674 |
assert( is_young(), "pre-condition" ); |
|
675 |
||
676 |
_surv_rate_group = NULL; |
|
677 |
_age_index = -1; |
|
678 |
} else { |
|
679 |
assert( _age_index == -1, "pre-condition" ); |
|
680 |
} |
|
681 |
} |
|
682 |
||
683 |
void set_young() { set_young_type(Young); } |
|
684 |
||
685 |
void set_survivor() { set_young_type(Survivor); } |
|
686 |
||
687 |
void set_not_young() { set_young_type(NotYoung); } |
|
688 |
||
689 |
// Determine if an object has been allocated since the last |
|
690 |
// mark performed by the collector. This returns true iff the object |
|
691 |
// is within the unmarked area of the region. |
|
692 |
bool obj_allocated_since_prev_marking(oop obj) const { |
|
693 |
return (HeapWord *) obj >= prev_top_at_mark_start(); |
|
694 |
} |
|
695 |
bool obj_allocated_since_next_marking(oop obj) const { |
|
696 |
return (HeapWord *) obj >= next_top_at_mark_start(); |
|
697 |
} |
|
698 |
||
699 |
// For parallel heapRegion traversal. |
|
700 |
bool claimHeapRegion(int claimValue); |
|
701 |
jint claim_value() { return _claimed; } |
|
702 |
// Use this carefully: only when you're sure no one is claiming... |
|
703 |
void set_claim_value(int claimValue) { _claimed = claimValue; } |
|
704 |
||
705 |
// Returns the "evacuation_failed" property of the region. |
|
706 |
bool evacuation_failed() { return _evacuation_failed; } |
|
707 |
||
708 |
// Sets the "evacuation_failed" property of the region. |
|
709 |
void set_evacuation_failed(bool b) { |
|
710 |
_evacuation_failed = b; |
|
711 |
||
712 |
if (b) { |
|
713 |
init_top_at_conc_mark_count(); |
|
714 |
_next_marked_bytes = 0; |
|
715 |
} |
|
716 |
} |
|
717 |
||
718 |
// Requires that "mr" be entirely within the region. |
|
719 |
// Apply "cl->do_object" to all objects that intersect with "mr". |
|
720 |
// If the iteration encounters an unparseable portion of the region, |
|
721 |
// or if "cl->abort()" is true after a closure application, |
|
722 |
// terminate the iteration and return the address of the start of the |
|
723 |
// subregion that isn't done. (The two can be distinguished by querying |
|
724 |
// "cl->abort()".) Return of "NULL" indicates that the iteration |
|
725 |
// completed. |
|
726 |
HeapWord* |
|
727 |
object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); |
|
728 |
||
729 |
HeapWord* |
|
730 |
oops_on_card_seq_iterate_careful(MemRegion mr, |
|
731 |
FilterOutOfRegionClosure* cl); |
|
732 |
||
733 |
// The region "mr" is entirely in "this", and starts and ends at block |
|
734 |
// boundaries. The caller declares that all the contained blocks are |
|
735 |
// coalesced into one. |
|
736 |
void declare_filled_region_to_BOT(MemRegion mr) { |
|
737 |
_offsets.single_block(mr.start(), mr.end()); |
|
738 |
} |
|
739 |
||
740 |
// A version of block start that is guaranteed to find *some* block |
|
741 |
// boundary at or before "p", but does not object iteration, and may |
|
742 |
// therefore be used safely when the heap is unparseable. |
|
743 |
HeapWord* block_start_careful(const void* p) const { |
|
744 |
return _offsets.block_start_careful(p); |
|
745 |
} |
|
746 |
||
747 |
// Requires that "addr" is within the region. Returns the start of the |
|
748 |
// first ("careful") block that starts at or after "addr", or else the |
|
749 |
// "end" of the region if there is no such block. |
|
750 |
HeapWord* next_block_start_careful(HeapWord* addr); |
|
751 |
||
752 |
// Returns the zero-fill-state of the current region. |
|
753 |
ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; } |
|
754 |
bool zero_fill_is_allocated() { return _zfs == Allocated; } |
|
755 |
Thread* zero_filler() { return _zero_filler; } |
|
756 |
||
757 |
// Indicate that the contents of the region are unknown, and therefore |
|
758 |
// might require zero-filling. |
|
759 |
void set_zero_fill_needed() { |
|
760 |
set_zero_fill_state_work(NotZeroFilled); |
|
761 |
} |
|
762 |
void set_zero_fill_in_progress(Thread* t) { |
|
763 |
set_zero_fill_state_work(ZeroFilling); |
|
764 |
_zero_filler = t; |
|
765 |
} |
|
766 |
void set_zero_fill_complete(); |
|
767 |
void set_zero_fill_allocated() { |
|
768 |
set_zero_fill_state_work(Allocated); |
|
769 |
} |
|
770 |
||
771 |
void set_zero_fill_state_work(ZeroFillState zfs); |
|
772 |
||
773 |
// This is called when a full collection shrinks the heap. |
|
774 |
// We want to set the heap region to a value which says |
|
775 |
// it is no longer part of the heap. For now, we'll let "NotZF" fill |
|
776 |
// that role. |
|
777 |
void reset_zero_fill() { |
|
778 |
set_zero_fill_state_work(NotZeroFilled); |
|
779 |
_zero_filler = NULL; |
|
780 |
} |
|
781 |
||
5350
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
782 |
size_t recorded_rs_length() const { return _recorded_rs_length; } |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
783 |
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
784 |
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
785 |
|
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
786 |
void set_recorded_rs_length(size_t rs_length) { |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
787 |
_recorded_rs_length = rs_length; |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
788 |
} |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
789 |
|
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
790 |
void set_predicted_elapsed_time_ms(double ms) { |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
791 |
_predicted_elapsed_time_ms = ms; |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
792 |
} |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
793 |
|
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
794 |
void set_predicted_bytes_to_copy(size_t bytes) { |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
795 |
_predicted_bytes_to_copy = bytes; |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
796 |
} |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
4024
diff
changeset
|
797 |
|
1374 | 798 |
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
799 |
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); |
|
800 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) |
|
801 |
||
802 |
CompactibleSpace* next_compaction_space() const; |
|
803 |
||
804 |
virtual void reset_after_compaction(); |
|
805 |
||
806 |
void print() const; |
|
807 |
void print_on(outputStream* st) const; |
|
808 |
||
3000 | 809 |
// use_prev_marking == true -> use "prev" marking information, |
810 |
// use_prev_marking == false -> use "next" marking information |
|
811 |
// NOTE: Only the "prev" marking information is guaranteed to be |
|
812 |
// consistent most of the time, so most calls to this should use |
|
813 |
// use_prev_marking == true. Currently, there is only one case where |
|
814 |
// this is called with use_prev_marking == false, which is to verify |
|
815 |
// the "next" marking information at the end of remark. |
|
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
816 |
void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const; |
3000 | 817 |
|
818 |
// Override; it uses the "prev" marking information |
|
1374 | 819 |
virtual void verify(bool allow_dirty) const; |
820 |
||
821 |
#ifdef DEBUG |
|
822 |
HeapWord* allocate(size_t size); |
|
823 |
#endif |
|
824 |
}; |
|
825 |
||
826 |
// HeapRegionClosure is used for iterating over regions. |
|
827 |
// Terminates the iteration when the "doHeapRegion" method returns "true". |
|
828 |
class HeapRegionClosure : public StackObj { |
|
829 |
friend class HeapRegionSeq; |
|
830 |
friend class G1CollectedHeap; |
|
831 |
||
832 |
bool _complete; |
|
833 |
void incomplete() { _complete = false; } |
|
834 |
||
835 |
public: |
|
836 |
HeapRegionClosure(): _complete(true) {} |
|
837 |
||
838 |
// Typically called on each region until it returns true. |
|
839 |
virtual bool doHeapRegion(HeapRegion* r) = 0; |
|
840 |
||
841 |
// True after iteration if the closure was applied to all heap regions |
|
842 |
// and returned "false" in all cases. |
|
843 |
bool complete() { return _complete; } |
|
844 |
}; |
|
845 |
||
846 |
// A linked lists of heap regions. It leaves the "next" field |
|
847 |
// unspecified; that's up to subtypes. |
|
2013
49e915da0905
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
2009
diff
changeset
|
848 |
class RegionList VALUE_OBJ_CLASS_SPEC { |
1374 | 849 |
protected: |
850 |
virtual HeapRegion* get_next(HeapRegion* chr) = 0; |
|
851 |
virtual void set_next(HeapRegion* chr, |
|
852 |
HeapRegion* new_next) = 0; |
|
853 |
||
854 |
HeapRegion* _hd; |
|
855 |
HeapRegion* _tl; |
|
856 |
size_t _sz; |
|
857 |
||
858 |
// Protected constructor because this type is only meaningful |
|
859 |
// when the _get/_set next functions are defined. |
|
860 |
RegionList() : _hd(NULL), _tl(NULL), _sz(0) {} |
|
861 |
public: |
|
862 |
void reset() { |
|
863 |
_hd = NULL; |
|
864 |
_tl = NULL; |
|
865 |
_sz = 0; |
|
866 |
} |
|
867 |
HeapRegion* hd() { return _hd; } |
|
868 |
HeapRegion* tl() { return _tl; } |
|
869 |
size_t sz() { return _sz; } |
|
870 |
size_t length(); |
|
871 |
||
872 |
bool well_formed() { |
|
873 |
return |
|
874 |
((hd() == NULL && tl() == NULL && sz() == 0) |
|
875 |
|| (hd() != NULL && tl() != NULL && sz() > 0)) |
|
876 |
&& (sz() == length()); |
|
877 |
} |
|
878 |
virtual void insert_before_head(HeapRegion* r); |
|
879 |
void prepend_list(RegionList* new_list); |
|
880 |
virtual HeapRegion* pop(); |
|
881 |
void dec_sz() { _sz--; } |
|
882 |
// Requires that "r" is an element of the list, and is not the tail. |
|
883 |
void delete_after(HeapRegion* r); |
|
884 |
}; |
|
885 |
||
886 |
class EmptyNonHRegionList: public RegionList { |
|
887 |
protected: |
|
888 |
// Protected constructor because this type is only meaningful |
|
889 |
// when the _get/_set next functions are defined. |
|
890 |
EmptyNonHRegionList() : RegionList() {} |
|
891 |
||
892 |
public: |
|
893 |
void insert_before_head(HeapRegion* r) { |
|
894 |
// assert(r->is_empty(), "Better be empty"); |
|
895 |
assert(!r->isHumongous(), "Better not be humongous."); |
|
896 |
RegionList::insert_before_head(r); |
|
897 |
} |
|
898 |
void prepend_list(EmptyNonHRegionList* new_list) { |
|
899 |
// assert(new_list->hd() == NULL || new_list->hd()->is_empty(), |
|
900 |
// "Better be empty"); |
|
901 |
assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(), |
|
902 |
"Better not be humongous."); |
|
903 |
// assert(new_list->tl() == NULL || new_list->tl()->is_empty(), |
|
904 |
// "Better be empty"); |
|
905 |
assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(), |
|
906 |
"Better not be humongous."); |
|
907 |
RegionList::prepend_list(new_list); |
|
908 |
} |
|
909 |
}; |
|
910 |
||
911 |
class UncleanRegionList: public EmptyNonHRegionList { |
|
912 |
public: |
|
913 |
HeapRegion* get_next(HeapRegion* hr) { |
|
914 |
return hr->next_from_unclean_list(); |
|
915 |
} |
|
916 |
void set_next(HeapRegion* hr, HeapRegion* new_next) { |
|
917 |
hr->set_next_on_unclean_list(new_next); |
|
918 |
} |
|
919 |
||
920 |
UncleanRegionList() : EmptyNonHRegionList() {} |
|
921 |
||
922 |
void insert_before_head(HeapRegion* r) { |
|
923 |
assert(!r->is_on_free_list(), |
|
924 |
"Better not already be on free list"); |
|
925 |
assert(!r->is_on_unclean_list(), |
|
926 |
"Better not already be on unclean list"); |
|
927 |
r->set_zero_fill_needed(); |
|
928 |
r->set_on_unclean_list(true); |
|
929 |
EmptyNonHRegionList::insert_before_head(r); |
|
930 |
} |
|
931 |
void prepend_list(UncleanRegionList* new_list) { |
|
932 |
assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(), |
|
933 |
"Better not already be on free list"); |
|
934 |
assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(), |
|
935 |
"Better already be marked as on unclean list"); |
|
936 |
assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(), |
|
937 |
"Better not already be on free list"); |
|
938 |
assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(), |
|
939 |
"Better already be marked as on unclean list"); |
|
940 |
EmptyNonHRegionList::prepend_list(new_list); |
|
941 |
} |
|
942 |
HeapRegion* pop() { |
|
943 |
HeapRegion* res = RegionList::pop(); |
|
944 |
if (res != NULL) res->set_on_unclean_list(false); |
|
945 |
return res; |
|
946 |
} |
|
947 |
}; |
|
948 |
||
949 |
// Local Variables: *** |
|
950 |
// c-indentation-style: gnu *** |
|
951 |
// End: *** |
|
952 |
||
953 |
#endif // SERIALGC |