author | iveresov |
Fri, 06 Mar 2009 13:50:14 -0800 | |
changeset 2142 | 032f4652700c |
parent 2013 | 49e915da0905 |
child 2152 | 99356e7f31b1 |
child 2105 | 347008ce7984 |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
1623 | 2 |
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#ifndef SERIALGC |
|
26 |
||
27 |
// A HeapRegion is the smallest piece of a G1CollectedHeap that |
|
28 |
// can be collected independently. |
|
29 |
||
30 |
// NOTE: Although a HeapRegion is a Space, its |
|
31 |
// Space::initDirtyCardClosure method must not be called. |
|
32 |
// The problem is that the existence of this method breaks |
|
33 |
// the independence of barrier sets from remembered sets. |
|
34 |
// The solution is to remove this method from the definition |
|
35 |
// of a Space. |
|
36 |
||
37 |
class CompactibleSpace; |
|
38 |
class ContiguousSpace; |
|
39 |
class HeapRegionRemSet; |
|
40 |
class HeapRegionRemSetIterator; |
|
41 |
class HeapRegion; |
|
42 |
||
43 |
// A dirty card to oop closure for heap regions. It |
|
44 |
// knows how to get the G1 heap and how to use the bitmap |
|
45 |
// in the concurrent marker used by G1 to filter remembered |
|
46 |
// sets. |
|
47 |
||
48 |
class HeapRegionDCTOC : public ContiguousSpaceDCTOC { |
|
49 |
public: |
|
50 |
// Specification of possible DirtyCardToOopClosure filtering. |
|
51 |
enum FilterKind { |
|
52 |
NoFilterKind, |
|
53 |
IntoCSFilterKind, |
|
54 |
OutOfRegionFilterKind |
|
55 |
}; |
|
56 |
||
57 |
protected: |
|
58 |
HeapRegion* _hr; |
|
59 |
FilterKind _fk; |
|
60 |
G1CollectedHeap* _g1; |
|
61 |
||
62 |
void walk_mem_region_with_cl(MemRegion mr, |
|
63 |
HeapWord* bottom, HeapWord* top, |
|
64 |
OopClosure* cl); |
|
65 |
||
66 |
// We don't specialize this for FilteringClosure; filtering is handled by |
|
67 |
// the "FilterKind" mechanism. But we provide this to avoid a compiler |
|
68 |
// warning. |
|
69 |
void walk_mem_region_with_cl(MemRegion mr, |
|
70 |
HeapWord* bottom, HeapWord* top, |
|
71 |
FilteringClosure* cl) { |
|
72 |
HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top, |
|
73 |
(OopClosure*)cl); |
|
74 |
} |
|
75 |
||
76 |
// Get the actual top of the area on which the closure will |
|
77 |
// operate, given where the top is assumed to be (the end of the |
|
78 |
// memory region passed to do_MemRegion) and where the object |
|
79 |
// at the top is assumed to start. For example, an object may |
|
80 |
// start at the top but actually extend past the assumed top, |
|
81 |
// in which case the top becomes the end of the object. |
|
82 |
HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) { |
|
83 |
return ContiguousSpaceDCTOC::get_actual_top(top, top_obj); |
|
84 |
} |
|
85 |
||
86 |
// Walk the given memory region from bottom to (actual) top |
|
87 |
// looking for objects and applying the oop closure (_cl) to |
|
88 |
// them. The base implementation of this treats the area as |
|
89 |
// blocks, where a block may or may not be an object. Sub- |
|
90 |
// classes should override this to provide more accurate |
|
91 |
// or possibly more efficient walking. |
|
92 |
void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) { |
|
93 |
Filtering_DCTOC::walk_mem_region(mr, bottom, top); |
|
94 |
} |
|
95 |
||
96 |
public: |
|
97 |
HeapRegionDCTOC(G1CollectedHeap* g1, |
|
98 |
HeapRegion* hr, OopClosure* cl, |
|
99 |
CardTableModRefBS::PrecisionStyle precision, |
|
100 |
FilterKind fk); |
|
101 |
}; |
|
102 |
||
103 |
||
104 |
// The complicating factor is that BlockOffsetTable diverged |
|
105 |
// significantly, and we need functionality that is only in the G1 version. |
|
106 |
// So I copied that code, which led to an alternate G1 version of |
|
107 |
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could |
|
108 |
// be reconciled, then G1OffsetTableContigSpace could go away. |
|
109 |
||
110 |
// The idea behind time stamps is the following. Doing a save_marks on |
|
111 |
// all regions at every GC pause is time consuming (if I remember |
|
112 |
// well, 10ms or so). So, we would like to do that only for regions |
|
113 |
// that are GC alloc regions. To achieve this, we use time |
|
114 |
// stamps. For every evacuation pause, G1CollectedHeap generates a |
|
115 |
// unique time stamp (essentially a counter that gets |
|
116 |
// incremented). Every time we want to call save_marks on a region, |
|
117 |
// we set the saved_mark_word to top and also copy the current GC |
|
118 |
// time stamp to the time stamp field of the space. Reading the |
|
119 |
// saved_mark_word involves checking the time stamp of the |
|
120 |
// region. If it is the same as the current GC time stamp, then we |
|
121 |
// can safely read the saved_mark_word field, as it is valid. If the |
|
122 |
// time stamp of the region is not the same as the current GC time |
|
123 |
// stamp, then we instead read top, as the saved_mark_word field is |
|
124 |
// invalid. Time stamps (on the regions and also on the |
|
125 |
// G1CollectedHeap) are reset at every cleanup (we iterate over |
|
126 |
// the regions anyway) and at the end of a Full GC. The current scheme |
|
127 |
// that uses sequential unsigned ints will fail only if we have 4b |
|
128 |
// evacuation pauses between two cleanups, which is _highly_ unlikely. |
|
129 |
||
130 |
class G1OffsetTableContigSpace: public ContiguousSpace { |
|
131 |
friend class VMStructs; |
|
132 |
protected: |
|
133 |
G1BlockOffsetArrayContigSpace _offsets; |
|
134 |
Mutex _par_alloc_lock; |
|
135 |
volatile unsigned _gc_time_stamp; |
|
136 |
||
137 |
public: |
|
138 |
// Constructor. If "is_zeroed" is true, the MemRegion "mr" may be |
|
139 |
// assumed to contain zeros. |
|
140 |
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
141 |
MemRegion mr, bool is_zeroed = false); |
|
142 |
||
143 |
void set_bottom(HeapWord* value); |
|
144 |
void set_end(HeapWord* value); |
|
145 |
||
146 |
virtual HeapWord* saved_mark_word() const; |
|
147 |
virtual void set_saved_mark(); |
|
148 |
void reset_gc_time_stamp() { _gc_time_stamp = 0; } |
|
149 |
||
1388 | 150 |
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
151 |
virtual void clear(bool mangle_space); |
|
1374 | 152 |
|
153 |
HeapWord* block_start(const void* p); |
|
154 |
HeapWord* block_start_const(const void* p) const; |
|
155 |
||
156 |
// Add offset table update. |
|
157 |
virtual HeapWord* allocate(size_t word_size); |
|
158 |
HeapWord* par_allocate(size_t word_size); |
|
159 |
||
160 |
// MarkSweep support phase3 |
|
161 |
virtual HeapWord* initialize_threshold(); |
|
162 |
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
|
163 |
||
164 |
virtual void print() const; |
|
165 |
}; |
|
166 |
||
167 |
class HeapRegion: public G1OffsetTableContigSpace { |
|
168 |
friend class VMStructs; |
|
169 |
private: |
|
170 |
||
1387 | 171 |
enum HumongousType { |
172 |
NotHumongous = 0, |
|
173 |
StartsHumongous, |
|
174 |
ContinuesHumongous |
|
175 |
}; |
|
176 |
||
1374 | 177 |
// The next filter kind that should be used for a "new_dcto_cl" call with |
178 |
// the "traditional" signature. |
|
179 |
HeapRegionDCTOC::FilterKind _next_fk; |
|
180 |
||
181 |
// Requires that the region "mr" be dense with objects, and begin and end |
|
182 |
// with an object. |
|
183 |
void oops_in_mr_iterate(MemRegion mr, OopClosure* cl); |
|
184 |
||
185 |
// The remembered set for this region. |
|
186 |
// (Might want to make this "inline" later, to avoid some alloc failure |
|
187 |
// issues.) |
|
188 |
HeapRegionRemSet* _rem_set; |
|
189 |
||
190 |
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } |
|
191 |
||
192 |
protected: |
|
193 |
// If this region is a member of a HeapRegionSeq, the index in that |
|
194 |
// sequence, otherwise -1. |
|
195 |
int _hrs_index; |
|
196 |
||
1387 | 197 |
HumongousType _humongous_type; |
1374 | 198 |
// For a humongous region, region in which it starts. |
199 |
HeapRegion* _humongous_start_region; |
|
200 |
// For the start region of a humongous sequence, it's original end(). |
|
201 |
HeapWord* _orig_end; |
|
202 |
||
203 |
// True iff the region is in current collection_set. |
|
204 |
bool _in_collection_set; |
|
205 |
||
206 |
// True iff the region is on the unclean list, waiting to be zero filled. |
|
207 |
bool _is_on_unclean_list; |
|
208 |
||
209 |
// True iff the region is on the free list, ready for allocation. |
|
210 |
bool _is_on_free_list; |
|
211 |
||
212 |
// Is this or has it been an allocation region in the current collection |
|
213 |
// pause. |
|
214 |
bool _is_gc_alloc_region; |
|
215 |
||
216 |
// True iff an attempt to evacuate an object in the region failed. |
|
217 |
bool _evacuation_failed; |
|
218 |
||
219 |
// A heap region may be a member one of a number of special subsets, each |
|
220 |
// represented as linked lists through the field below. Currently, these |
|
221 |
// sets include: |
|
222 |
// The collection set. |
|
223 |
// The set of allocation regions used in a collection pause. |
|
224 |
// Spaces that may contain gray objects. |
|
225 |
HeapRegion* _next_in_special_set; |
|
226 |
||
227 |
// next region in the young "generation" region set |
|
228 |
HeapRegion* _next_young_region; |
|
229 |
||
230 |
// For parallel heapRegion traversal. |
|
231 |
jint _claimed; |
|
232 |
||
233 |
// We use concurrent marking to determine the amount of live data |
|
234 |
// in each heap region. |
|
235 |
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. |
|
236 |
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. |
|
237 |
||
238 |
// See "sort_index" method. -1 means is not in the array. |
|
239 |
int _sort_index; |
|
240 |
||
241 |
// Means it has (or at least had) a very large RS, and should not be |
|
242 |
// considered for membership in a collection set. |
|
243 |
enum PopularityState { |
|
244 |
NotPopular, |
|
245 |
PopularPending, |
|
246 |
Popular |
|
247 |
}; |
|
248 |
PopularityState _popularity; |
|
249 |
||
250 |
// <PREDICTION> |
|
251 |
double _gc_efficiency; |
|
252 |
// </PREDICTION> |
|
253 |
||
254 |
enum YoungType { |
|
255 |
NotYoung, // a region is not young |
|
256 |
ScanOnly, // a region is young and scan-only |
|
257 |
Young, // a region is young |
|
258 |
Survivor // a region is young and it contains |
|
259 |
// survivor |
|
260 |
}; |
|
261 |
||
262 |
YoungType _young_type; |
|
263 |
int _young_index_in_cset; |
|
264 |
SurvRateGroup* _surv_rate_group; |
|
265 |
int _age_index; |
|
266 |
||
267 |
// The start of the unmarked area. The unmarked area extends from this |
|
268 |
// word until the top and/or end of the region, and is the part |
|
269 |
// of the region for which no marking was done, i.e. objects may |
|
270 |
// have been allocated in this part since the last mark phase. |
|
271 |
// "prev" is the top at the start of the last completed marking. |
|
272 |
// "next" is the top at the start of the in-progress marking (if any.) |
|
273 |
HeapWord* _prev_top_at_mark_start; |
|
274 |
HeapWord* _next_top_at_mark_start; |
|
275 |
// If a collection pause is in progress, this is the top at the start |
|
276 |
// of that pause. |
|
277 |
||
278 |
// We've counted the marked bytes of objects below here. |
|
279 |
HeapWord* _top_at_conc_mark_count; |
|
280 |
||
281 |
void init_top_at_mark_start() { |
|
282 |
assert(_prev_marked_bytes == 0 && |
|
283 |
_next_marked_bytes == 0, |
|
284 |
"Must be called after zero_marked_bytes."); |
|
285 |
HeapWord* bot = bottom(); |
|
286 |
_prev_top_at_mark_start = bot; |
|
287 |
_next_top_at_mark_start = bot; |
|
288 |
_top_at_conc_mark_count = bot; |
|
289 |
} |
|
290 |
||
291 |
jint _zfs; // A member of ZeroFillState. Protected by ZF_lock. |
|
292 |
Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last) |
|
293 |
// made it so. |
|
294 |
||
295 |
void set_young_type(YoungType new_type) { |
|
296 |
//assert(_young_type != new_type, "setting the same type" ); |
|
297 |
// TODO: add more assertions here |
|
298 |
_young_type = new_type; |
|
299 |
} |
|
300 |
||
301 |
public: |
|
302 |
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. |
|
303 |
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
304 |
MemRegion mr, bool is_zeroed); |
|
305 |
||
306 |
enum SomePublicConstants { |
|
307 |
// HeapRegions are GrainBytes-aligned |
|
308 |
// and have sizes that are multiples of GrainBytes. |
|
309 |
LogOfHRGrainBytes = 20, |
|
310 |
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize, |
|
311 |
GrainBytes = 1 << LogOfHRGrainBytes, |
|
312 |
GrainWords = 1 <<LogOfHRGrainWords, |
|
313 |
MaxAge = 2, NoOfAges = MaxAge+1 |
|
314 |
}; |
|
315 |
||
1387 | 316 |
enum ClaimValues { |
317 |
InitialClaimValue = 0, |
|
318 |
FinalCountClaimValue = 1, |
|
319 |
NoteEndClaimValue = 2, |
|
1422 | 320 |
ScrubRemSetClaimValue = 3, |
321 |
ParVerifyClaimValue = 4 |
|
1387 | 322 |
}; |
323 |
||
1374 | 324 |
// Concurrent refinement requires contiguous heap regions (in which TLABs |
325 |
// might be allocated) to be zero-filled. Each region therefore has a |
|
326 |
// zero-fill-state. |
|
327 |
enum ZeroFillState { |
|
328 |
NotZeroFilled, |
|
329 |
ZeroFilling, |
|
330 |
ZeroFilled, |
|
331 |
Allocated |
|
332 |
}; |
|
333 |
||
334 |
// If this region is a member of a HeapRegionSeq, the index in that |
|
335 |
// sequence, otherwise -1. |
|
336 |
int hrs_index() const { return _hrs_index; } |
|
337 |
void set_hrs_index(int index) { _hrs_index = index; } |
|
338 |
||
339 |
// The number of bytes marked live in the region in the last marking phase. |
|
340 |
size_t marked_bytes() { return _prev_marked_bytes; } |
|
341 |
// The number of bytes counted in the next marking. |
|
342 |
size_t next_marked_bytes() { return _next_marked_bytes; } |
|
343 |
// The number of bytes live wrt the next marking. |
|
344 |
size_t next_live_bytes() { |
|
345 |
return (top() - next_top_at_mark_start()) |
|
346 |
* HeapWordSize |
|
347 |
+ next_marked_bytes(); |
|
348 |
} |
|
349 |
||
350 |
// A lower bound on the amount of garbage bytes in the region. |
|
351 |
size_t garbage_bytes() { |
|
352 |
size_t used_at_mark_start_bytes = |
|
353 |
(prev_top_at_mark_start() - bottom()) * HeapWordSize; |
|
354 |
assert(used_at_mark_start_bytes >= marked_bytes(), |
|
355 |
"Can't mark more than we have."); |
|
356 |
return used_at_mark_start_bytes - marked_bytes(); |
|
357 |
} |
|
358 |
||
359 |
// An upper bound on the number of live bytes in the region. |
|
360 |
size_t max_live_bytes() { return used() - garbage_bytes(); } |
|
361 |
||
362 |
void add_to_marked_bytes(size_t incr_bytes) { |
|
363 |
_next_marked_bytes = _next_marked_bytes + incr_bytes; |
|
364 |
guarantee( _next_marked_bytes <= used(), "invariant" ); |
|
365 |
} |
|
366 |
||
367 |
void zero_marked_bytes() { |
|
368 |
_prev_marked_bytes = _next_marked_bytes = 0; |
|
369 |
} |
|
370 |
||
1387 | 371 |
bool isHumongous() const { return _humongous_type != NotHumongous; } |
372 |
bool startsHumongous() const { return _humongous_type == StartsHumongous; } |
|
373 |
bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; } |
|
1374 | 374 |
// For a humongous region, region in which it starts. |
375 |
HeapRegion* humongous_start_region() const { |
|
376 |
return _humongous_start_region; |
|
377 |
} |
|
378 |
||
379 |
// Causes the current region to represent a humongous object spanning "n" |
|
380 |
// regions. |
|
381 |
virtual void set_startsHumongous(); |
|
382 |
||
383 |
// The regions that continue a humongous sequence should be added using |
|
384 |
// this method, in increasing address order. |
|
385 |
void set_continuesHumongous(HeapRegion* start); |
|
386 |
||
387 |
void add_continuingHumongousRegion(HeapRegion* cont); |
|
388 |
||
389 |
// If the region has a remembered set, return a pointer to it. |
|
390 |
HeapRegionRemSet* rem_set() const { |
|
391 |
return _rem_set; |
|
392 |
} |
|
393 |
||
394 |
// True iff the region is in current collection_set. |
|
395 |
bool in_collection_set() const { |
|
396 |
return _in_collection_set; |
|
397 |
} |
|
398 |
void set_in_collection_set(bool b) { |
|
399 |
_in_collection_set = b; |
|
400 |
} |
|
401 |
HeapRegion* next_in_collection_set() { |
|
402 |
assert(in_collection_set(), "should only invoke on member of CS."); |
|
403 |
assert(_next_in_special_set == NULL || |
|
404 |
_next_in_special_set->in_collection_set(), |
|
405 |
"Malformed CS."); |
|
406 |
return _next_in_special_set; |
|
407 |
} |
|
408 |
void set_next_in_collection_set(HeapRegion* r) { |
|
409 |
assert(in_collection_set(), "should only invoke on member of CS."); |
|
410 |
assert(r == NULL || r->in_collection_set(), "Malformed CS."); |
|
411 |
_next_in_special_set = r; |
|
412 |
} |
|
413 |
||
414 |
// True iff it is or has been an allocation region in the current |
|
415 |
// collection pause. |
|
416 |
bool is_gc_alloc_region() const { |
|
417 |
return _is_gc_alloc_region; |
|
418 |
} |
|
419 |
void set_is_gc_alloc_region(bool b) { |
|
420 |
_is_gc_alloc_region = b; |
|
421 |
} |
|
422 |
HeapRegion* next_gc_alloc_region() { |
|
423 |
assert(is_gc_alloc_region(), "should only invoke on member of CS."); |
|
424 |
assert(_next_in_special_set == NULL || |
|
425 |
_next_in_special_set->is_gc_alloc_region(), |
|
426 |
"Malformed CS."); |
|
427 |
return _next_in_special_set; |
|
428 |
} |
|
429 |
void set_next_gc_alloc_region(HeapRegion* r) { |
|
430 |
assert(is_gc_alloc_region(), "should only invoke on member of CS."); |
|
431 |
assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS."); |
|
432 |
_next_in_special_set = r; |
|
433 |
} |
|
434 |
||
435 |
bool is_reserved() { |
|
436 |
return popular(); |
|
437 |
} |
|
438 |
||
439 |
bool is_on_free_list() { |
|
440 |
return _is_on_free_list; |
|
441 |
} |
|
442 |
||
443 |
void set_on_free_list(bool b) { |
|
444 |
_is_on_free_list = b; |
|
445 |
} |
|
446 |
||
447 |
HeapRegion* next_from_free_list() { |
|
448 |
assert(is_on_free_list(), |
|
449 |
"Should only invoke on free space."); |
|
450 |
assert(_next_in_special_set == NULL || |
|
451 |
_next_in_special_set->is_on_free_list(), |
|
452 |
"Malformed Free List."); |
|
453 |
return _next_in_special_set; |
|
454 |
} |
|
455 |
||
456 |
void set_next_on_free_list(HeapRegion* r) { |
|
457 |
assert(r == NULL || r->is_on_free_list(), "Malformed free list."); |
|
458 |
_next_in_special_set = r; |
|
459 |
} |
|
460 |
||
461 |
bool is_on_unclean_list() { |
|
462 |
return _is_on_unclean_list; |
|
463 |
} |
|
464 |
||
465 |
void set_on_unclean_list(bool b); |
|
466 |
||
467 |
HeapRegion* next_from_unclean_list() { |
|
468 |
assert(is_on_unclean_list(), |
|
469 |
"Should only invoke on unclean space."); |
|
470 |
assert(_next_in_special_set == NULL || |
|
471 |
_next_in_special_set->is_on_unclean_list(), |
|
472 |
"Malformed unclean List."); |
|
473 |
return _next_in_special_set; |
|
474 |
} |
|
475 |
||
476 |
void set_next_on_unclean_list(HeapRegion* r); |
|
477 |
||
478 |
HeapRegion* get_next_young_region() { return _next_young_region; } |
|
479 |
void set_next_young_region(HeapRegion* hr) { |
|
480 |
_next_young_region = hr; |
|
481 |
} |
|
482 |
||
483 |
// Allows logical separation between objects allocated before and after. |
|
484 |
void save_marks(); |
|
485 |
||
486 |
// Reset HR stuff to default values. |
|
487 |
void hr_clear(bool par, bool clear_space); |
|
488 |
||
1388 | 489 |
void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
1374 | 490 |
|
491 |
// Ensure that "this" is zero-filled. |
|
492 |
void ensure_zero_filled(); |
|
493 |
// This one requires that the calling thread holds ZF_mon. |
|
494 |
void ensure_zero_filled_locked(); |
|
495 |
||
496 |
// Get the start of the unmarked area in this region. |
|
497 |
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } |
|
498 |
HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } |
|
499 |
||
500 |
// Apply "cl->do_oop" to (the addresses of) all reference fields in objects |
|
501 |
// allocated in the current region before the last call to "save_mark". |
|
502 |
void oop_before_save_marks_iterate(OopClosure* cl); |
|
503 |
||
504 |
// This call determines the "filter kind" argument that will be used for |
|
505 |
// the next call to "new_dcto_cl" on this region with the "traditional" |
|
506 |
// signature (i.e., the call below.) The default, in the absence of a |
|
507 |
// preceding call to this method, is "NoFilterKind", and a call to this |
|
508 |
// method is necessary for each such call, or else it reverts to the |
|
509 |
// default. |
|
510 |
// (This is really ugly, but all other methods I could think of changed a |
|
511 |
// lot of main-line code for G1.) |
|
512 |
void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) { |
|
513 |
_next_fk = nfk; |
|
514 |
} |
|
515 |
||
516 |
DirtyCardToOopClosure* |
|
517 |
new_dcto_closure(OopClosure* cl, |
|
518 |
CardTableModRefBS::PrecisionStyle precision, |
|
519 |
HeapRegionDCTOC::FilterKind fk); |
|
520 |
||
521 |
#if WHASSUP |
|
522 |
DirtyCardToOopClosure* |
|
523 |
new_dcto_closure(OopClosure* cl, |
|
524 |
CardTableModRefBS::PrecisionStyle precision, |
|
525 |
HeapWord* boundary) { |
|
526 |
assert(boundary == NULL, "This arg doesn't make sense here."); |
|
527 |
DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk); |
|
528 |
_next_fk = HeapRegionDCTOC::NoFilterKind; |
|
529 |
return res; |
|
530 |
} |
|
531 |
#endif |
|
532 |
||
533 |
// |
|
534 |
// Note the start or end of marking. This tells the heap region |
|
535 |
// that the collector is about to start or has finished (concurrently) |
|
536 |
// marking the heap. |
|
537 |
// |
|
538 |
||
539 |
// Note the start of a marking phase. Record the |
|
540 |
// start of the unmarked area of the region here. |
|
541 |
void note_start_of_marking(bool during_initial_mark) { |
|
542 |
init_top_at_conc_mark_count(); |
|
543 |
_next_marked_bytes = 0; |
|
544 |
if (during_initial_mark && is_young() && !is_survivor()) |
|
545 |
_next_top_at_mark_start = bottom(); |
|
546 |
else |
|
547 |
_next_top_at_mark_start = top(); |
|
548 |
} |
|
549 |
||
550 |
// Note the end of a marking phase. Install the start of |
|
551 |
// the unmarked area that was captured at start of marking. |
|
552 |
void note_end_of_marking() { |
|
553 |
_prev_top_at_mark_start = _next_top_at_mark_start; |
|
554 |
_prev_marked_bytes = _next_marked_bytes; |
|
555 |
_next_marked_bytes = 0; |
|
556 |
||
557 |
guarantee(_prev_marked_bytes <= |
|
558 |
(size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize, |
|
559 |
"invariant"); |
|
560 |
} |
|
561 |
||
562 |
// After an evacuation, we need to update _next_top_at_mark_start |
|
563 |
// to be the current top. Note this is only valid if we have only |
|
564 |
// ever evacuated into this region. If we evacuate, allocate, and |
|
565 |
// then evacuate we are in deep doodoo. |
|
566 |
void note_end_of_copying() { |
|
567 |
assert(top() >= _next_top_at_mark_start, |
|
568 |
"Increase only"); |
|
2009 | 569 |
// Survivor regions will be scanned on the start of concurrent |
570 |
// marking. |
|
571 |
if (!is_survivor()) { |
|
572 |
_next_top_at_mark_start = top(); |
|
573 |
} |
|
1374 | 574 |
} |
575 |
||
576 |
// Returns "false" iff no object in the region was allocated when the |
|
577 |
// last mark phase ended. |
|
578 |
bool is_marked() { return _prev_top_at_mark_start != bottom(); } |
|
579 |
||
580 |
// If "is_marked()" is true, then this is the index of the region in |
|
581 |
// an array constructed at the end of marking of the regions in a |
|
582 |
// "desirability" order. |
|
583 |
int sort_index() { |
|
584 |
return _sort_index; |
|
585 |
} |
|
586 |
void set_sort_index(int i) { |
|
587 |
_sort_index = i; |
|
588 |
} |
|
589 |
||
590 |
void init_top_at_conc_mark_count() { |
|
591 |
_top_at_conc_mark_count = bottom(); |
|
592 |
} |
|
593 |
||
594 |
void set_top_at_conc_mark_count(HeapWord *cur) { |
|
595 |
assert(bottom() <= cur && cur <= end(), "Sanity."); |
|
596 |
_top_at_conc_mark_count = cur; |
|
597 |
} |
|
598 |
||
599 |
HeapWord* top_at_conc_mark_count() { |
|
600 |
return _top_at_conc_mark_count; |
|
601 |
} |
|
602 |
||
603 |
void reset_during_compaction() { |
|
604 |
guarantee( isHumongous() && startsHumongous(), |
|
605 |
"should only be called for humongous regions"); |
|
606 |
||
607 |
zero_marked_bytes(); |
|
608 |
init_top_at_mark_start(); |
|
609 |
} |
|
610 |
||
611 |
bool popular() { return _popularity == Popular; } |
|
612 |
void set_popular(bool b) { |
|
613 |
if (b) { |
|
614 |
_popularity = Popular; |
|
615 |
} else { |
|
616 |
_popularity = NotPopular; |
|
617 |
} |
|
618 |
} |
|
619 |
bool popular_pending() { return _popularity == PopularPending; } |
|
620 |
void set_popular_pending(bool b) { |
|
621 |
if (b) { |
|
622 |
_popularity = PopularPending; |
|
623 |
} else { |
|
624 |
_popularity = NotPopular; |
|
625 |
} |
|
626 |
} |
|
627 |
||
628 |
// <PREDICTION> |
|
629 |
void calc_gc_efficiency(void); |
|
630 |
double gc_efficiency() { return _gc_efficiency;} |
|
631 |
// </PREDICTION> |
|
632 |
||
633 |
bool is_young() const { return _young_type != NotYoung; } |
|
634 |
bool is_scan_only() const { return _young_type == ScanOnly; } |
|
635 |
bool is_survivor() const { return _young_type == Survivor; } |
|
636 |
||
637 |
int young_index_in_cset() const { return _young_index_in_cset; } |
|
638 |
void set_young_index_in_cset(int index) { |
|
639 |
assert( (index == -1) || is_young(), "pre-condition" ); |
|
640 |
_young_index_in_cset = index; |
|
641 |
} |
|
642 |
||
643 |
int age_in_surv_rate_group() { |
|
644 |
assert( _surv_rate_group != NULL, "pre-condition" ); |
|
645 |
assert( _age_index > -1, "pre-condition" ); |
|
646 |
return _surv_rate_group->age_in_group(_age_index); |
|
647 |
} |
|
648 |
||
649 |
void recalculate_age_in_surv_rate_group() { |
|
650 |
assert( _surv_rate_group != NULL, "pre-condition" ); |
|
651 |
assert( _age_index > -1, "pre-condition" ); |
|
652 |
_age_index = _surv_rate_group->recalculate_age_index(_age_index); |
|
653 |
} |
|
654 |
||
655 |
void record_surv_words_in_group(size_t words_survived) { |
|
656 |
assert( _surv_rate_group != NULL, "pre-condition" ); |
|
657 |
assert( _age_index > -1, "pre-condition" ); |
|
658 |
int age_in_group = age_in_surv_rate_group(); |
|
659 |
_surv_rate_group->record_surviving_words(age_in_group, words_survived); |
|
660 |
} |
|
661 |
||
662 |
int age_in_surv_rate_group_cond() { |
|
663 |
if (_surv_rate_group != NULL) |
|
664 |
return age_in_surv_rate_group(); |
|
665 |
else |
|
666 |
return -1; |
|
667 |
} |
|
668 |
||
669 |
SurvRateGroup* surv_rate_group() { |
|
670 |
return _surv_rate_group; |
|
671 |
} |
|
672 |
||
673 |
void install_surv_rate_group(SurvRateGroup* surv_rate_group) { |
|
674 |
assert( surv_rate_group != NULL, "pre-condition" ); |
|
675 |
assert( _surv_rate_group == NULL, "pre-condition" ); |
|
676 |
assert( is_young(), "pre-condition" ); |
|
677 |
||
678 |
_surv_rate_group = surv_rate_group; |
|
679 |
_age_index = surv_rate_group->next_age_index(); |
|
680 |
} |
|
681 |
||
682 |
void uninstall_surv_rate_group() { |
|
683 |
if (_surv_rate_group != NULL) { |
|
684 |
assert( _age_index > -1, "pre-condition" ); |
|
685 |
assert( is_young(), "pre-condition" ); |
|
686 |
||
687 |
_surv_rate_group = NULL; |
|
688 |
_age_index = -1; |
|
689 |
} else { |
|
690 |
assert( _age_index == -1, "pre-condition" ); |
|
691 |
} |
|
692 |
} |
|
693 |
||
694 |
void set_young() { set_young_type(Young); } |
|
695 |
||
696 |
void set_scan_only() { set_young_type(ScanOnly); } |
|
697 |
||
698 |
void set_survivor() { set_young_type(Survivor); } |
|
699 |
||
700 |
void set_not_young() { set_young_type(NotYoung); } |
|
701 |
||
702 |
// Determine if an object has been allocated since the last |
|
703 |
// mark performed by the collector. This returns true iff the object |
|
704 |
// is within the unmarked area of the region. |
|
705 |
bool obj_allocated_since_prev_marking(oop obj) const { |
|
706 |
return (HeapWord *) obj >= prev_top_at_mark_start(); |
|
707 |
} |
|
708 |
bool obj_allocated_since_next_marking(oop obj) const { |
|
709 |
return (HeapWord *) obj >= next_top_at_mark_start(); |
|
710 |
} |
|
711 |
||
712 |
// For parallel heapRegion traversal. |
|
713 |
bool claimHeapRegion(int claimValue); |
|
714 |
jint claim_value() { return _claimed; } |
|
715 |
// Use this carefully: only when you're sure no one is claiming... |
|
716 |
void set_claim_value(int claimValue) { _claimed = claimValue; } |
|
717 |
||
718 |
// Returns the "evacuation_failed" property of the region. |
|
719 |
bool evacuation_failed() { return _evacuation_failed; } |
|
720 |
||
721 |
// Sets the "evacuation_failed" property of the region. |
|
722 |
void set_evacuation_failed(bool b) { |
|
723 |
_evacuation_failed = b; |
|
724 |
||
725 |
if (b) { |
|
726 |
init_top_at_conc_mark_count(); |
|
727 |
_next_marked_bytes = 0; |
|
728 |
} |
|
729 |
} |
|
730 |
||
731 |
// Requires that "mr" be entirely within the region. |
|
732 |
// Apply "cl->do_object" to all objects that intersect with "mr". |
|
733 |
// If the iteration encounters an unparseable portion of the region, |
|
734 |
// or if "cl->abort()" is true after a closure application, |
|
735 |
// terminate the iteration and return the address of the start of the |
|
736 |
// subregion that isn't done. (The two can be distinguished by querying |
|
737 |
// "cl->abort()".) Return of "NULL" indicates that the iteration |
|
738 |
// completed. |
|
739 |
HeapWord* |
|
740 |
object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); |
|
741 |
||
742 |
HeapWord* |
|
743 |
oops_on_card_seq_iterate_careful(MemRegion mr, |
|
744 |
FilterOutOfRegionClosure* cl); |
|
745 |
||
746 |
// The region "mr" is entirely in "this", and starts and ends at block |
|
747 |
// boundaries. The caller declares that all the contained blocks are |
|
748 |
// coalesced into one. |
|
749 |
void declare_filled_region_to_BOT(MemRegion mr) { |
|
750 |
_offsets.single_block(mr.start(), mr.end()); |
|
751 |
} |
|
752 |
||
753 |
// A version of block start that is guaranteed to find *some* block |
|
754 |
// boundary at or before "p", but does not object iteration, and may |
|
755 |
// therefore be used safely when the heap is unparseable. |
|
756 |
HeapWord* block_start_careful(const void* p) const { |
|
757 |
return _offsets.block_start_careful(p); |
|
758 |
} |
|
759 |
||
760 |
// Requires that "addr" is within the region. Returns the start of the |
|
761 |
// first ("careful") block that starts at or after "addr", or else the |
|
762 |
// "end" of the region if there is no such block. |
|
763 |
HeapWord* next_block_start_careful(HeapWord* addr); |
|
764 |
||
765 |
// Returns the zero-fill-state of the current region. |
|
766 |
ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; } |
|
767 |
bool zero_fill_is_allocated() { return _zfs == Allocated; } |
|
768 |
Thread* zero_filler() { return _zero_filler; } |
|
769 |
||
770 |
// Indicate that the contents of the region are unknown, and therefore |
|
771 |
// might require zero-filling. |
|
772 |
void set_zero_fill_needed() { |
|
773 |
set_zero_fill_state_work(NotZeroFilled); |
|
774 |
} |
|
775 |
void set_zero_fill_in_progress(Thread* t) { |
|
776 |
set_zero_fill_state_work(ZeroFilling); |
|
777 |
_zero_filler = t; |
|
778 |
} |
|
779 |
void set_zero_fill_complete(); |
|
780 |
void set_zero_fill_allocated() { |
|
781 |
set_zero_fill_state_work(Allocated); |
|
782 |
} |
|
783 |
||
784 |
void set_zero_fill_state_work(ZeroFillState zfs); |
|
785 |
||
786 |
// This is called when a full collection shrinks the heap. |
|
787 |
// We want to set the heap region to a value which says |
|
788 |
// it is no longer part of the heap. For now, we'll let "NotZF" fill |
|
789 |
// that role. |
|
790 |
void reset_zero_fill() { |
|
791 |
set_zero_fill_state_work(NotZeroFilled); |
|
792 |
_zero_filler = NULL; |
|
793 |
} |
|
794 |
||
795 |
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
|
796 |
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); |
|
797 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) |
|
798 |
||
799 |
CompactibleSpace* next_compaction_space() const; |
|
800 |
||
801 |
virtual void reset_after_compaction(); |
|
802 |
||
803 |
void print() const; |
|
804 |
void print_on(outputStream* st) const; |
|
805 |
||
806 |
// Override |
|
807 |
virtual void verify(bool allow_dirty) const; |
|
808 |
||
809 |
#ifdef DEBUG |
|
810 |
HeapWord* allocate(size_t size); |
|
811 |
#endif |
|
812 |
}; |
|
813 |
||
814 |
// HeapRegionClosure is used for iterating over regions. |
|
815 |
// Terminates the iteration when the "doHeapRegion" method returns "true". |
|
816 |
class HeapRegionClosure : public StackObj { |
|
817 |
friend class HeapRegionSeq; |
|
818 |
friend class G1CollectedHeap; |
|
819 |
||
820 |
bool _complete; |
|
821 |
void incomplete() { _complete = false; } |
|
822 |
||
823 |
public: |
|
824 |
HeapRegionClosure(): _complete(true) {} |
|
825 |
||
826 |
// Typically called on each region until it returns true. |
|
827 |
virtual bool doHeapRegion(HeapRegion* r) = 0; |
|
828 |
||
829 |
// True after iteration if the closure was applied to all heap regions |
|
830 |
// and returned "false" in all cases. |
|
831 |
bool complete() { return _complete; } |
|
832 |
}; |
|
833 |
||
834 |
// A linked lists of heap regions. It leaves the "next" field |
|
835 |
// unspecified; that's up to subtypes. |
|
2013
49e915da0905
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
2009
diff
changeset
|
836 |
class RegionList VALUE_OBJ_CLASS_SPEC { |
1374 | 837 |
protected: |
838 |
virtual HeapRegion* get_next(HeapRegion* chr) = 0; |
|
839 |
virtual void set_next(HeapRegion* chr, |
|
840 |
HeapRegion* new_next) = 0; |
|
841 |
||
842 |
HeapRegion* _hd; |
|
843 |
HeapRegion* _tl; |
|
844 |
size_t _sz; |
|
845 |
||
846 |
// Protected constructor because this type is only meaningful |
|
847 |
// when the _get/_set next functions are defined. |
|
848 |
RegionList() : _hd(NULL), _tl(NULL), _sz(0) {} |
|
849 |
public: |
|
850 |
void reset() { |
|
851 |
_hd = NULL; |
|
852 |
_tl = NULL; |
|
853 |
_sz = 0; |
|
854 |
} |
|
855 |
HeapRegion* hd() { return _hd; } |
|
856 |
HeapRegion* tl() { return _tl; } |
|
857 |
size_t sz() { return _sz; } |
|
858 |
size_t length(); |
|
859 |
||
860 |
bool well_formed() { |
|
861 |
return |
|
862 |
((hd() == NULL && tl() == NULL && sz() == 0) |
|
863 |
|| (hd() != NULL && tl() != NULL && sz() > 0)) |
|
864 |
&& (sz() == length()); |
|
865 |
} |
|
866 |
virtual void insert_before_head(HeapRegion* r); |
|
867 |
void prepend_list(RegionList* new_list); |
|
868 |
virtual HeapRegion* pop(); |
|
869 |
void dec_sz() { _sz--; } |
|
870 |
// Requires that "r" is an element of the list, and is not the tail. |
|
871 |
void delete_after(HeapRegion* r); |
|
872 |
}; |
|
873 |
||
874 |
class EmptyNonHRegionList: public RegionList { |
|
875 |
protected: |
|
876 |
// Protected constructor because this type is only meaningful |
|
877 |
// when the _get/_set next functions are defined. |
|
878 |
EmptyNonHRegionList() : RegionList() {} |
|
879 |
||
880 |
public: |
|
881 |
void insert_before_head(HeapRegion* r) { |
|
882 |
// assert(r->is_empty(), "Better be empty"); |
|
883 |
assert(!r->isHumongous(), "Better not be humongous."); |
|
884 |
RegionList::insert_before_head(r); |
|
885 |
} |
|
886 |
void prepend_list(EmptyNonHRegionList* new_list) { |
|
887 |
// assert(new_list->hd() == NULL || new_list->hd()->is_empty(), |
|
888 |
// "Better be empty"); |
|
889 |
assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(), |
|
890 |
"Better not be humongous."); |
|
891 |
// assert(new_list->tl() == NULL || new_list->tl()->is_empty(), |
|
892 |
// "Better be empty"); |
|
893 |
assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(), |
|
894 |
"Better not be humongous."); |
|
895 |
RegionList::prepend_list(new_list); |
|
896 |
} |
|
897 |
}; |
|
898 |
||
899 |
class UncleanRegionList: public EmptyNonHRegionList { |
|
900 |
public: |
|
901 |
HeapRegion* get_next(HeapRegion* hr) { |
|
902 |
return hr->next_from_unclean_list(); |
|
903 |
} |
|
904 |
void set_next(HeapRegion* hr, HeapRegion* new_next) { |
|
905 |
hr->set_next_on_unclean_list(new_next); |
|
906 |
} |
|
907 |
||
908 |
UncleanRegionList() : EmptyNonHRegionList() {} |
|
909 |
||
910 |
void insert_before_head(HeapRegion* r) { |
|
911 |
assert(!r->is_on_free_list(), |
|
912 |
"Better not already be on free list"); |
|
913 |
assert(!r->is_on_unclean_list(), |
|
914 |
"Better not already be on unclean list"); |
|
915 |
r->set_zero_fill_needed(); |
|
916 |
r->set_on_unclean_list(true); |
|
917 |
EmptyNonHRegionList::insert_before_head(r); |
|
918 |
} |
|
919 |
void prepend_list(UncleanRegionList* new_list) { |
|
920 |
assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(), |
|
921 |
"Better not already be on free list"); |
|
922 |
assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(), |
|
923 |
"Better already be marked as on unclean list"); |
|
924 |
assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(), |
|
925 |
"Better not already be on free list"); |
|
926 |
assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(), |
|
927 |
"Better already be marked as on unclean list"); |
|
928 |
EmptyNonHRegionList::prepend_list(new_list); |
|
929 |
} |
|
930 |
HeapRegion* pop() { |
|
931 |
HeapRegion* res = RegionList::pop(); |
|
932 |
if (res != NULL) res->set_on_unclean_list(false); |
|
933 |
return res; |
|
934 |
} |
|
935 |
}; |
|
936 |
||
937 |
// Local Variables: *** |
|
938 |
// c-indentation-style: gnu *** |
|
939 |
// End: *** |
|
940 |
||
941 |
#endif // SERIALGC |