author | tonyp |
Thu, 23 Apr 2009 16:58:16 -0400 | |
changeset 2735 | 84d9ff956137 |
parent 2344 | f2e09ba7ceab |
child 2741 | 34e2a243d69a |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
1623 | 2 |
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "incls/_precompiled.incl" |
|
26 |
#include "incls/_heapRegion.cpp.incl" |
|
27 |
||
28 |
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, |
|
29 |
HeapRegion* hr, OopClosure* cl, |
|
30 |
CardTableModRefBS::PrecisionStyle precision, |
|
31 |
FilterKind fk) : |
|
32 |
ContiguousSpaceDCTOC(hr, cl, precision, NULL), |
|
33 |
_hr(hr), _fk(fk), _g1(g1) |
|
34 |
{} |
|
35 |
||
36 |
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, |
|
37 |
OopClosure* oc) : |
|
38 |
_r_bottom(r->bottom()), _r_end(r->end()), |
|
39 |
_oc(oc), _out_of_region(0) |
|
40 |
{} |
|
41 |
||
42 |
class VerifyLiveClosure: public OopClosure { |
|
43 |
G1CollectedHeap* _g1h; |
|
44 |
CardTableModRefBS* _bs; |
|
45 |
oop _containing_obj; |
|
46 |
bool _failures; |
|
47 |
int _n_failures; |
|
48 |
public: |
|
49 |
VerifyLiveClosure(G1CollectedHeap* g1h) : |
|
50 |
_g1h(g1h), _bs(NULL), _containing_obj(NULL), |
|
51 |
_failures(false), _n_failures(0) |
|
52 |
{ |
|
53 |
BarrierSet* bs = _g1h->barrier_set(); |
|
54 |
if (bs->is_a(BarrierSet::CardTableModRef)) |
|
55 |
_bs = (CardTableModRefBS*)bs; |
|
56 |
} |
|
57 |
||
58 |
void set_containing_obj(oop obj) { |
|
59 |
_containing_obj = obj; |
|
60 |
} |
|
61 |
||
62 |
bool failures() { return _failures; } |
|
63 |
int n_failures() { return _n_failures; } |
|
64 |
||
65 |
virtual void do_oop(narrowOop* p) { |
|
66 |
guarantee(false, "NYI"); |
|
67 |
} |
|
68 |
||
69 |
void do_oop(oop* p) { |
|
70 |
assert(_containing_obj != NULL, "Precondition"); |
|
71 |
assert(!_g1h->is_obj_dead(_containing_obj), "Precondition"); |
|
72 |
oop obj = *p; |
|
73 |
if (obj != NULL) { |
|
74 |
bool failed = false; |
|
75 |
if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead(obj)) { |
|
76 |
if (!_failures) { |
|
77 |
gclog_or_tty->print_cr(""); |
|
78 |
gclog_or_tty->print_cr("----------"); |
|
79 |
} |
|
80 |
if (!_g1h->is_in_closed_subset(obj)) { |
|
81 |
gclog_or_tty->print_cr("Field "PTR_FORMAT |
|
82 |
" of live obj "PTR_FORMAT |
|
83 |
" points to obj "PTR_FORMAT |
|
84 |
" not in the heap.", |
|
85 |
p, (void*) _containing_obj, (void*) obj); |
|
86 |
} else { |
|
87 |
gclog_or_tty->print_cr("Field "PTR_FORMAT |
|
88 |
" of live obj "PTR_FORMAT |
|
89 |
" points to dead obj "PTR_FORMAT".", |
|
90 |
p, (void*) _containing_obj, (void*) obj); |
|
91 |
} |
|
92 |
gclog_or_tty->print_cr("Live obj:"); |
|
93 |
_containing_obj->print_on(gclog_or_tty); |
|
94 |
gclog_or_tty->print_cr("Bad referent:"); |
|
95 |
obj->print_on(gclog_or_tty); |
|
96 |
gclog_or_tty->print_cr("----------"); |
|
97 |
_failures = true; |
|
98 |
failed = true; |
|
99 |
_n_failures++; |
|
100 |
} |
|
101 |
||
102 |
if (!_g1h->full_collection()) { |
|
103 |
HeapRegion* from = _g1h->heap_region_containing(p); |
|
104 |
HeapRegion* to = _g1h->heap_region_containing(*p); |
|
105 |
if (from != NULL && to != NULL && |
|
106 |
from != to && |
|
107 |
!to->isHumongous()) { |
|
108 |
jbyte cv_obj = *_bs->byte_for_const(_containing_obj); |
|
109 |
jbyte cv_field = *_bs->byte_for_const(p); |
|
110 |
const jbyte dirty = CardTableModRefBS::dirty_card_val(); |
|
111 |
||
112 |
bool is_bad = !(from->is_young() |
|
113 |
|| to->rem_set()->contains_reference(p) |
|
114 |
|| !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed |
|
115 |
(_containing_obj->is_objArray() ? |
|
116 |
cv_field == dirty |
|
117 |
: cv_obj == dirty || cv_field == dirty)); |
|
118 |
if (is_bad) { |
|
119 |
if (!_failures) { |
|
120 |
gclog_or_tty->print_cr(""); |
|
121 |
gclog_or_tty->print_cr("----------"); |
|
122 |
} |
|
123 |
gclog_or_tty->print_cr("Missing rem set entry:"); |
|
124 |
gclog_or_tty->print_cr("Field "PTR_FORMAT |
|
125 |
" of obj "PTR_FORMAT |
|
126 |
", in region %d ["PTR_FORMAT |
|
127 |
", "PTR_FORMAT"),", |
|
128 |
p, (void*) _containing_obj, |
|
129 |
from->hrs_index(), |
|
130 |
from->bottom(), |
|
131 |
from->end()); |
|
132 |
_containing_obj->print_on(gclog_or_tty); |
|
133 |
gclog_or_tty->print_cr("points to obj "PTR_FORMAT |
|
134 |
" in region %d ["PTR_FORMAT |
|
135 |
", "PTR_FORMAT").", |
|
136 |
(void*) obj, to->hrs_index(), |
|
137 |
to->bottom(), to->end()); |
|
138 |
obj->print_on(gclog_or_tty); |
|
139 |
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", |
|
140 |
cv_obj, cv_field); |
|
141 |
gclog_or_tty->print_cr("----------"); |
|
142 |
_failures = true; |
|
143 |
if (!failed) _n_failures++; |
|
144 |
} |
|
145 |
} |
|
146 |
} |
|
147 |
} |
|
148 |
} |
|
149 |
}; |
|
150 |
||
151 |
template<class ClosureType> |
|
152 |
HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, |
|
153 |
HeapRegion* hr, |
|
154 |
HeapWord* cur, HeapWord* top) { |
|
155 |
oop cur_oop = oop(cur); |
|
156 |
int oop_size = cur_oop->size(); |
|
157 |
HeapWord* next_obj = cur + oop_size; |
|
158 |
while (next_obj < top) { |
|
159 |
// Keep filtering the remembered set. |
|
160 |
if (!g1h->is_obj_dead(cur_oop, hr)) { |
|
161 |
// Bottom lies entirely below top, so we can call the |
|
162 |
// non-memRegion version of oop_iterate below. |
|
163 |
#ifndef PRODUCT |
|
164 |
if (G1VerifyMarkingInEvac) { |
|
165 |
VerifyLiveClosure vl_cl(g1h); |
|
166 |
cur_oop->oop_iterate(&vl_cl); |
|
167 |
} |
|
168 |
#endif |
|
169 |
cur_oop->oop_iterate(cl); |
|
170 |
} |
|
171 |
cur = next_obj; |
|
172 |
cur_oop = oop(cur); |
|
173 |
oop_size = cur_oop->size(); |
|
174 |
next_obj = cur + oop_size; |
|
175 |
} |
|
176 |
return cur; |
|
177 |
} |
|
178 |
||
179 |
void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, |
|
180 |
HeapWord* bottom, |
|
181 |
HeapWord* top, |
|
182 |
OopClosure* cl) { |
|
183 |
G1CollectedHeap* g1h = _g1; |
|
184 |
||
185 |
int oop_size; |
|
186 |
||
187 |
OopClosure* cl2 = cl; |
|
188 |
FilterIntoCSClosure intoCSFilt(this, g1h, cl); |
|
189 |
FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); |
|
190 |
switch (_fk) { |
|
191 |
case IntoCSFilterKind: cl2 = &intoCSFilt; break; |
|
192 |
case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; |
|
193 |
} |
|
194 |
||
195 |
// Start filtering what we add to the remembered set. If the object is |
|
196 |
// not considered dead, either because it is marked (in the mark bitmap) |
|
197 |
// or it was allocated after marking finished, then we add it. Otherwise |
|
198 |
// we can safely ignore the object. |
|
199 |
if (!g1h->is_obj_dead(oop(bottom), _hr)) { |
|
200 |
#ifndef PRODUCT |
|
201 |
if (G1VerifyMarkingInEvac) { |
|
202 |
VerifyLiveClosure vl_cl(g1h); |
|
203 |
oop(bottom)->oop_iterate(&vl_cl, mr); |
|
204 |
} |
|
205 |
#endif |
|
206 |
oop_size = oop(bottom)->oop_iterate(cl2, mr); |
|
207 |
} else { |
|
208 |
oop_size = oop(bottom)->size(); |
|
209 |
} |
|
210 |
||
211 |
bottom += oop_size; |
|
212 |
||
213 |
if (bottom < top) { |
|
214 |
// We replicate the loop below for several kinds of possible filters. |
|
215 |
switch (_fk) { |
|
216 |
case NoFilterKind: |
|
217 |
bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); |
|
218 |
break; |
|
219 |
case IntoCSFilterKind: { |
|
220 |
FilterIntoCSClosure filt(this, g1h, cl); |
|
221 |
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); |
|
222 |
break; |
|
223 |
} |
|
224 |
case OutOfRegionFilterKind: { |
|
225 |
FilterOutOfRegionClosure filt(_hr, cl); |
|
226 |
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); |
|
227 |
break; |
|
228 |
} |
|
229 |
default: |
|
230 |
ShouldNotReachHere(); |
|
231 |
} |
|
232 |
||
233 |
// Last object. Need to do dead-obj filtering here too. |
|
234 |
if (!g1h->is_obj_dead(oop(bottom), _hr)) { |
|
235 |
#ifndef PRODUCT |
|
236 |
if (G1VerifyMarkingInEvac) { |
|
237 |
VerifyLiveClosure vl_cl(g1h); |
|
238 |
oop(bottom)->oop_iterate(&vl_cl, mr); |
|
239 |
} |
|
240 |
#endif |
|
241 |
oop(bottom)->oop_iterate(cl2, mr); |
|
242 |
} |
|
243 |
} |
|
244 |
} |
|
245 |
||
246 |
void HeapRegion::reset_after_compaction() { |
|
247 |
G1OffsetTableContigSpace::reset_after_compaction(); |
|
248 |
// After a compaction the mark bitmap is invalid, so we must |
|
249 |
// treat all objects as being inside the unmarked area. |
|
250 |
zero_marked_bytes(); |
|
251 |
init_top_at_mark_start(); |
|
252 |
} |
|
253 |
||
254 |
DirtyCardToOopClosure* |
|
255 |
HeapRegion::new_dcto_closure(OopClosure* cl, |
|
256 |
CardTableModRefBS::PrecisionStyle precision, |
|
257 |
HeapRegionDCTOC::FilterKind fk) { |
|
258 |
return new HeapRegionDCTOC(G1CollectedHeap::heap(), |
|
259 |
this, cl, precision, fk); |
|
260 |
} |
|
261 |
||
262 |
void HeapRegion::hr_clear(bool par, bool clear_space) { |
|
1387 | 263 |
_humongous_type = NotHumongous; |
1374 | 264 |
_humongous_start_region = NULL; |
265 |
_in_collection_set = false; |
|
266 |
_is_gc_alloc_region = false; |
|
267 |
||
268 |
// Age stuff (if parallel, this will be done separately, since it needs |
|
269 |
// to be sequential). |
|
270 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
271 |
||
272 |
set_young_index_in_cset(-1); |
|
273 |
uninstall_surv_rate_group(); |
|
274 |
set_young_type(NotYoung); |
|
275 |
||
276 |
// In case it had been the start of a humongous sequence, reset its end. |
|
277 |
set_end(_orig_end); |
|
278 |
||
279 |
if (!par) { |
|
280 |
// If this is parallel, this will be done later. |
|
281 |
HeapRegionRemSet* hrrs = rem_set(); |
|
282 |
if (hrrs != NULL) hrrs->clear(); |
|
1387 | 283 |
_claimed = InitialClaimValue; |
1374 | 284 |
} |
285 |
zero_marked_bytes(); |
|
286 |
set_sort_index(-1); |
|
287 |
||
288 |
_offsets.resize(HeapRegion::GrainWords); |
|
289 |
init_top_at_mark_start(); |
|
1388 | 290 |
if (clear_space) clear(SpaceDecorator::Mangle); |
1374 | 291 |
} |
292 |
||
293 |
// <PREDICTION> |
|
294 |
void HeapRegion::calc_gc_efficiency() { |
|
295 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
296 |
_gc_efficiency = (double) garbage_bytes() / |
|
297 |
g1h->predict_region_elapsed_time_ms(this, false); |
|
298 |
} |
|
299 |
// </PREDICTION> |
|
300 |
||
301 |
void HeapRegion::set_startsHumongous() { |
|
1387 | 302 |
_humongous_type = StartsHumongous; |
1374 | 303 |
_humongous_start_region = this; |
304 |
assert(end() == _orig_end, "Should be normal before alloc."); |
|
305 |
} |
|
306 |
||
307 |
bool HeapRegion::claimHeapRegion(jint claimValue) { |
|
308 |
jint current = _claimed; |
|
309 |
if (current != claimValue) { |
|
310 |
jint res = Atomic::cmpxchg(claimValue, &_claimed, current); |
|
311 |
if (res == current) { |
|
312 |
return true; |
|
313 |
} |
|
314 |
} |
|
315 |
return false; |
|
316 |
} |
|
317 |
||
318 |
HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { |
|
319 |
HeapWord* low = addr; |
|
320 |
HeapWord* high = end(); |
|
321 |
while (low < high) { |
|
322 |
size_t diff = pointer_delta(high, low); |
|
323 |
// Must add one below to bias toward the high amount. Otherwise, if |
|
324 |
// "high" were at the desired value, and "low" were one less, we |
|
325 |
// would not converge on "high". This is not symmetric, because |
|
326 |
// we set "high" to a block start, which might be the right one, |
|
327 |
// which we don't do for "low". |
|
328 |
HeapWord* middle = low + (diff+1)/2; |
|
329 |
if (middle == high) return high; |
|
330 |
HeapWord* mid_bs = block_start_careful(middle); |
|
331 |
if (mid_bs < addr) { |
|
332 |
low = middle; |
|
333 |
} else { |
|
334 |
high = mid_bs; |
|
335 |
} |
|
336 |
} |
|
337 |
assert(low == high && low >= addr, "Didn't work."); |
|
338 |
return low; |
|
339 |
} |
|
340 |
||
341 |
void HeapRegion::set_next_on_unclean_list(HeapRegion* r) { |
|
342 |
assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list."); |
|
343 |
_next_in_special_set = r; |
|
344 |
} |
|
345 |
||
346 |
void HeapRegion::set_on_unclean_list(bool b) { |
|
347 |
_is_on_unclean_list = b; |
|
348 |
} |
|
349 |
||
1388 | 350 |
void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
351 |
G1OffsetTableContigSpace::initialize(mr, false, mangle_space); |
|
1374 | 352 |
hr_clear(false/*par*/, clear_space); |
353 |
} |
|
354 |
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
|
355 |
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
|
356 |
#endif // _MSC_VER |
|
357 |
||
358 |
||
359 |
HeapRegion:: |
|
360 |
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
361 |
MemRegion mr, bool is_zeroed) |
|
362 |
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), |
|
363 |
_next_fk(HeapRegionDCTOC::NoFilterKind), |
|
364 |
_hrs_index(-1), |
|
1387 | 365 |
_humongous_type(NotHumongous), _humongous_start_region(NULL), |
1374 | 366 |
_in_collection_set(false), _is_gc_alloc_region(false), |
367 |
_is_on_free_list(false), _is_on_unclean_list(false), |
|
368 |
_next_in_special_set(NULL), _orig_end(NULL), |
|
1387 | 369 |
_claimed(InitialClaimValue), _evacuation_failed(false), |
1374 | 370 |
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), |
371 |
_young_type(NotYoung), _next_young_region(NULL), |
|
372 |
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), |
|
373 |
_rem_set(NULL), _zfs(NotZeroFilled) |
|
374 |
{ |
|
375 |
_orig_end = mr.end(); |
|
376 |
// Note that initialize() will set the start of the unmarked area of the |
|
377 |
// region. |
|
1388 | 378 |
this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
379 |
set_top(bottom()); |
|
380 |
set_saved_mark(); |
|
1374 | 381 |
|
382 |
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this); |
|
383 |
||
384 |
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); |
|
385 |
// In case the region is allocated during a pause, note the top. |
|
386 |
// We haven't done any counting on a brand new region. |
|
387 |
_top_at_conc_mark_count = bottom(); |
|
388 |
} |
|
389 |
||
390 |
class NextCompactionHeapRegionClosure: public HeapRegionClosure { |
|
391 |
const HeapRegion* _target; |
|
392 |
bool _target_seen; |
|
393 |
HeapRegion* _last; |
|
394 |
CompactibleSpace* _res; |
|
395 |
public: |
|
396 |
NextCompactionHeapRegionClosure(const HeapRegion* target) : |
|
397 |
_target(target), _target_seen(false), _res(NULL) {} |
|
398 |
bool doHeapRegion(HeapRegion* cur) { |
|
399 |
if (_target_seen) { |
|
400 |
if (!cur->isHumongous()) { |
|
401 |
_res = cur; |
|
402 |
return true; |
|
403 |
} |
|
404 |
} else if (cur == _target) { |
|
405 |
_target_seen = true; |
|
406 |
} |
|
407 |
return false; |
|
408 |
} |
|
409 |
CompactibleSpace* result() { return _res; } |
|
410 |
}; |
|
411 |
||
412 |
CompactibleSpace* HeapRegion::next_compaction_space() const { |
|
413 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
414 |
// cast away const-ness |
|
415 |
HeapRegion* r = (HeapRegion*) this; |
|
416 |
NextCompactionHeapRegionClosure blk(r); |
|
417 |
g1h->heap_region_iterate_from(r, &blk); |
|
418 |
return blk.result(); |
|
419 |
} |
|
420 |
||
421 |
void HeapRegion::set_continuesHumongous(HeapRegion* start) { |
|
422 |
// The order is important here. |
|
423 |
start->add_continuingHumongousRegion(this); |
|
1387 | 424 |
_humongous_type = ContinuesHumongous; |
1374 | 425 |
_humongous_start_region = start; |
426 |
} |
|
427 |
||
428 |
void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) { |
|
429 |
// Must join the blocks of the current H region seq with the block of the |
|
430 |
// added region. |
|
431 |
offsets()->join_blocks(bottom(), cont->bottom()); |
|
432 |
arrayOop obj = (arrayOop)(bottom()); |
|
433 |
obj->set_length((int) (obj->length() + cont->capacity()/jintSize)); |
|
434 |
set_end(cont->end()); |
|
435 |
set_top(cont->end()); |
|
436 |
} |
|
437 |
||
438 |
void HeapRegion::save_marks() { |
|
439 |
set_saved_mark(); |
|
440 |
} |
|
441 |
||
442 |
void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) { |
|
443 |
HeapWord* p = mr.start(); |
|
444 |
HeapWord* e = mr.end(); |
|
445 |
oop obj; |
|
446 |
while (p < e) { |
|
447 |
obj = oop(p); |
|
448 |
p += obj->oop_iterate(cl); |
|
449 |
} |
|
450 |
assert(p == e, "bad memregion: doesn't end on obj boundary"); |
|
451 |
} |
|
452 |
||
453 |
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
|
454 |
void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ |
|
455 |
ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ |
|
456 |
} |
|
457 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) |
|
458 |
||
459 |
||
460 |
void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { |
|
461 |
oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); |
|
462 |
} |
|
463 |
||
464 |
#ifdef DEBUG |
|
465 |
HeapWord* HeapRegion::allocate(size_t size) { |
|
466 |
jint state = zero_fill_state(); |
|
467 |
assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() || |
|
468 |
zero_fill_is_allocated(), |
|
469 |
"When ZF is on, only alloc in ZF'd regions"); |
|
470 |
return G1OffsetTableContigSpace::allocate(size); |
|
471 |
} |
|
472 |
#endif |
|
473 |
||
474 |
void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { |
|
475 |
assert(top() == bottom() || zfs == Allocated, |
|
476 |
"Region must be empty, or we must be setting it to allocated."); |
|
477 |
assert(ZF_mon->owned_by_self() || |
|
478 |
Universe::heap()->is_gc_active(), |
|
479 |
"Must hold the lock or be a full GC to modify."); |
|
480 |
_zfs = zfs; |
|
481 |
} |
|
482 |
||
483 |
void HeapRegion::set_zero_fill_complete() { |
|
484 |
set_zero_fill_state_work(ZeroFilled); |
|
485 |
if (ZF_mon->owned_by_self()) { |
|
486 |
ZF_mon->notify_all(); |
|
487 |
} |
|
488 |
} |
|
489 |
||
490 |
||
491 |
void HeapRegion::ensure_zero_filled() { |
|
492 |
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
|
493 |
ensure_zero_filled_locked(); |
|
494 |
} |
|
495 |
||
496 |
void HeapRegion::ensure_zero_filled_locked() { |
|
497 |
assert(ZF_mon->owned_by_self(), "Precondition"); |
|
498 |
bool should_ignore_zf = SafepointSynchronize::is_at_safepoint(); |
|
499 |
assert(should_ignore_zf || Heap_lock->is_locked(), |
|
500 |
"Either we're in a GC or we're allocating a region."); |
|
501 |
switch (zero_fill_state()) { |
|
502 |
case HeapRegion::NotZeroFilled: |
|
503 |
set_zero_fill_in_progress(Thread::current()); |
|
504 |
{ |
|
505 |
ZF_mon->unlock(); |
|
506 |
Copy::fill_to_words(bottom(), capacity()/HeapWordSize); |
|
507 |
ZF_mon->lock_without_safepoint_check(); |
|
508 |
} |
|
509 |
// A trap. |
|
510 |
guarantee(zero_fill_state() == HeapRegion::ZeroFilling |
|
511 |
&& zero_filler() == Thread::current(), |
|
512 |
"AHA! Tell Dave D if you see this..."); |
|
513 |
set_zero_fill_complete(); |
|
514 |
// gclog_or_tty->print_cr("Did sync ZF."); |
|
515 |
ConcurrentZFThread::note_sync_zfs(); |
|
516 |
break; |
|
517 |
case HeapRegion::ZeroFilling: |
|
518 |
if (should_ignore_zf) { |
|
519 |
// We can "break" the lock and take over the work. |
|
520 |
Copy::fill_to_words(bottom(), capacity()/HeapWordSize); |
|
521 |
set_zero_fill_complete(); |
|
522 |
ConcurrentZFThread::note_sync_zfs(); |
|
523 |
break; |
|
524 |
} else { |
|
525 |
ConcurrentZFThread::wait_for_ZF_completed(this); |
|
526 |
} |
|
527 |
case HeapRegion::ZeroFilled: |
|
528 |
// Nothing to do. |
|
529 |
break; |
|
530 |
case HeapRegion::Allocated: |
|
531 |
guarantee(false, "Should not call on allocated regions."); |
|
532 |
} |
|
533 |
assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post"); |
|
534 |
} |
|
535 |
||
536 |
HeapWord* |
|
537 |
HeapRegion::object_iterate_mem_careful(MemRegion mr, |
|
538 |
ObjectClosure* cl) { |
|
539 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
540 |
// We used to use "block_start_careful" here. But we're actually happy |
|
541 |
// to update the BOT while we do this... |
|
542 |
HeapWord* cur = block_start(mr.start()); |
|
543 |
mr = mr.intersection(used_region()); |
|
544 |
if (mr.is_empty()) return NULL; |
|
545 |
// Otherwise, find the obj that extends onto mr.start(). |
|
546 |
||
547 |
assert(cur <= mr.start() |
|
548 |
&& (oop(cur)->klass() == NULL || |
|
549 |
cur + oop(cur)->size() > mr.start()), |
|
550 |
"postcondition of block_start"); |
|
551 |
oop obj; |
|
552 |
while (cur < mr.end()) { |
|
553 |
obj = oop(cur); |
|
554 |
if (obj->klass() == NULL) { |
|
555 |
// Ran into an unparseable point. |
|
556 |
return cur; |
|
557 |
} else if (!g1h->is_obj_dead(obj)) { |
|
558 |
cl->do_object(obj); |
|
559 |
} |
|
560 |
if (cl->abort()) return cur; |
|
561 |
// The check above must occur before the operation below, since an |
|
562 |
// abort might invalidate the "size" operation. |
|
563 |
cur += obj->size(); |
|
564 |
} |
|
565 |
return NULL; |
|
566 |
} |
|
567 |
||
568 |
HeapWord* |
|
569 |
HeapRegion:: |
|
570 |
oops_on_card_seq_iterate_careful(MemRegion mr, |
|
571 |
FilterOutOfRegionClosure* cl) { |
|
572 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
573 |
||
574 |
// If we're within a stop-world GC, then we might look at a card in a |
|
575 |
// GC alloc region that extends onto a GC LAB, which may not be |
|
576 |
// parseable. Stop such at the "saved_mark" of the region. |
|
577 |
if (G1CollectedHeap::heap()->is_gc_active()) { |
|
578 |
mr = mr.intersection(used_region_at_save_marks()); |
|
579 |
} else { |
|
580 |
mr = mr.intersection(used_region()); |
|
581 |
} |
|
582 |
if (mr.is_empty()) return NULL; |
|
583 |
// Otherwise, find the obj that extends onto mr.start(). |
|
584 |
||
585 |
// We used to use "block_start_careful" here. But we're actually happy |
|
586 |
// to update the BOT while we do this... |
|
587 |
HeapWord* cur = block_start(mr.start()); |
|
588 |
assert(cur <= mr.start(), "Postcondition"); |
|
589 |
||
590 |
while (cur <= mr.start()) { |
|
591 |
if (oop(cur)->klass() == NULL) { |
|
592 |
// Ran into an unparseable point. |
|
593 |
return cur; |
|
594 |
} |
|
595 |
// Otherwise... |
|
596 |
int sz = oop(cur)->size(); |
|
597 |
if (cur + sz > mr.start()) break; |
|
598 |
// Otherwise, go on. |
|
599 |
cur = cur + sz; |
|
600 |
} |
|
601 |
oop obj; |
|
602 |
obj = oop(cur); |
|
603 |
// If we finish this loop... |
|
604 |
assert(cur <= mr.start() |
|
605 |
&& obj->klass() != NULL |
|
606 |
&& cur + obj->size() > mr.start(), |
|
607 |
"Loop postcondition"); |
|
608 |
if (!g1h->is_obj_dead(obj)) { |
|
609 |
obj->oop_iterate(cl, mr); |
|
610 |
} |
|
611 |
||
612 |
HeapWord* next; |
|
613 |
while (cur < mr.end()) { |
|
614 |
obj = oop(cur); |
|
615 |
if (obj->klass() == NULL) { |
|
616 |
// Ran into an unparseable point. |
|
617 |
return cur; |
|
618 |
}; |
|
619 |
// Otherwise: |
|
620 |
next = (cur + obj->size()); |
|
621 |
if (!g1h->is_obj_dead(obj)) { |
|
622 |
if (next < mr.end()) { |
|
623 |
obj->oop_iterate(cl); |
|
624 |
} else { |
|
625 |
// this obj spans the boundary. If it's an array, stop at the |
|
626 |
// boundary. |
|
627 |
if (obj->is_objArray()) { |
|
628 |
obj->oop_iterate(cl, mr); |
|
629 |
} else { |
|
630 |
obj->oop_iterate(cl); |
|
631 |
} |
|
632 |
} |
|
633 |
} |
|
634 |
cur = next; |
|
635 |
} |
|
636 |
return NULL; |
|
637 |
} |
|
638 |
||
639 |
void HeapRegion::print() const { print_on(gclog_or_tty); } |
|
640 |
void HeapRegion::print_on(outputStream* st) const { |
|
641 |
if (isHumongous()) { |
|
642 |
if (startsHumongous()) |
|
643 |
st->print(" HS"); |
|
644 |
else |
|
645 |
st->print(" HC"); |
|
646 |
} else { |
|
647 |
st->print(" "); |
|
648 |
} |
|
649 |
if (in_collection_set()) |
|
650 |
st->print(" CS"); |
|
651 |
else if (is_gc_alloc_region()) |
|
652 |
st->print(" A "); |
|
653 |
else |
|
654 |
st->print(" "); |
|
655 |
if (is_young()) |
|
656 |
st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y ")); |
|
657 |
else |
|
658 |
st->print(" "); |
|
659 |
if (is_empty()) |
|
660 |
st->print(" F"); |
|
661 |
else |
|
662 |
st->print(" "); |
|
663 |
st->print(" %d", _gc_time_stamp); |
|
664 |
G1OffsetTableContigSpace::print_on(st); |
|
665 |
} |
|
666 |
||
667 |
#define OBJ_SAMPLE_INTERVAL 0 |
|
668 |
#define BLOCK_SAMPLE_INTERVAL 100 |
|
669 |
||
670 |
// This really ought to be commoned up into OffsetTableContigSpace somehow. |
|
671 |
// We would need a mechanism to make that code skip dead objects. |
|
672 |
||
673 |
void HeapRegion::verify(bool allow_dirty) const { |
|
674 |
G1CollectedHeap* g1 = G1CollectedHeap::heap(); |
|
675 |
HeapWord* p = bottom(); |
|
676 |
HeapWord* prev_p = NULL; |
|
677 |
int objs = 0; |
|
678 |
int blocks = 0; |
|
679 |
VerifyLiveClosure vl_cl(g1); |
|
680 |
while (p < top()) { |
|
681 |
size_t size = oop(p)->size(); |
|
682 |
if (blocks == BLOCK_SAMPLE_INTERVAL) { |
|
683 |
guarantee(p == block_start_const(p + (size/2)), |
|
684 |
"check offset computation"); |
|
685 |
blocks = 0; |
|
686 |
} else { |
|
687 |
blocks++; |
|
688 |
} |
|
689 |
if (objs == OBJ_SAMPLE_INTERVAL) { |
|
690 |
oop obj = oop(p); |
|
691 |
if (!g1->is_obj_dead(obj, this)) { |
|
692 |
obj->verify(); |
|
693 |
vl_cl.set_containing_obj(obj); |
|
694 |
obj->oop_iterate(&vl_cl); |
|
695 |
if (G1MaxVerifyFailures >= 0 |
|
696 |
&& vl_cl.n_failures() >= G1MaxVerifyFailures) break; |
|
697 |
} |
|
698 |
objs = 0; |
|
699 |
} else { |
|
700 |
objs++; |
|
701 |
} |
|
702 |
prev_p = p; |
|
703 |
p += size; |
|
704 |
} |
|
705 |
HeapWord* rend = end(); |
|
706 |
HeapWord* rtop = top(); |
|
707 |
if (rtop < rend) { |
|
708 |
guarantee(block_start_const(rtop + (rend - rtop) / 2) == rtop, |
|
709 |
"check offset computation"); |
|
710 |
} |
|
711 |
if (vl_cl.failures()) { |
|
712 |
gclog_or_tty->print_cr("Heap:"); |
|
713 |
G1CollectedHeap::heap()->print(); |
|
714 |
gclog_or_tty->print_cr(""); |
|
715 |
} |
|
716 |
if (G1VerifyConcMark && |
|
717 |
G1VerifyConcMarkPrintReachable && |
|
718 |
vl_cl.failures()) { |
|
719 |
g1->concurrent_mark()->print_prev_bitmap_reachable(); |
|
720 |
} |
|
2735
84d9ff956137
6829013: G1: set the default value of G1VerifyConcMarkPrintRechable to false
tonyp
parents:
2344
diff
changeset
|
721 |
guarantee(!vl_cl.failures(), "region verification failed"); |
1374 | 722 |
guarantee(p == top(), "end of last object must match end of space"); |
723 |
} |
|
724 |
||
725 |
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go |
|
726 |
// away eventually. |
|
727 |
||
1388 | 728 |
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
1374 | 729 |
// false ==> we'll do the clearing if there's clearing to be done. |
1388 | 730 |
ContiguousSpace::initialize(mr, false, mangle_space); |
1374 | 731 |
_offsets.zero_bottom_entry(); |
732 |
_offsets.initialize_threshold(); |
|
1388 | 733 |
if (clear_space) clear(mangle_space); |
1374 | 734 |
} |
735 |
||
1388 | 736 |
void G1OffsetTableContigSpace::clear(bool mangle_space) { |
737 |
ContiguousSpace::clear(mangle_space); |
|
1374 | 738 |
_offsets.zero_bottom_entry(); |
739 |
_offsets.initialize_threshold(); |
|
740 |
} |
|
741 |
||
742 |
void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { |
|
743 |
Space::set_bottom(new_bottom); |
|
744 |
_offsets.set_bottom(new_bottom); |
|
745 |
} |
|
746 |
||
747 |
void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { |
|
748 |
Space::set_end(new_end); |
|
749 |
_offsets.resize(new_end - bottom()); |
|
750 |
} |
|
751 |
||
752 |
void G1OffsetTableContigSpace::print() const { |
|
753 |
print_short(); |
|
754 |
gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
|
755 |
INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
|
756 |
bottom(), top(), _offsets.threshold(), end()); |
|
757 |
} |
|
758 |
||
759 |
HeapWord* G1OffsetTableContigSpace::initialize_threshold() { |
|
760 |
return _offsets.initialize_threshold(); |
|
761 |
} |
|
762 |
||
763 |
HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, |
|
764 |
HeapWord* end) { |
|
765 |
_offsets.alloc_block(start, end); |
|
766 |
return _offsets.threshold(); |
|
767 |
} |
|
768 |
||
769 |
HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { |
|
770 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
771 |
assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); |
|
772 |
if (_gc_time_stamp < g1h->get_gc_time_stamp()) |
|
773 |
return top(); |
|
774 |
else |
|
775 |
return ContiguousSpace::saved_mark_word(); |
|
776 |
} |
|
777 |
||
778 |
void G1OffsetTableContigSpace::set_saved_mark() { |
|
779 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
780 |
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); |
|
781 |
||
782 |
if (_gc_time_stamp < curr_gc_time_stamp) { |
|
783 |
// The order of these is important, as another thread might be |
|
784 |
// about to start scanning this region. If it does so after |
|
785 |
// set_saved_mark and before _gc_time_stamp = ..., then the latter |
|
786 |
// will be false, and it will pick up top() as the high water mark |
|
787 |
// of region. If it does so after _gc_time_stamp = ..., then it |
|
788 |
// will pick up the right saved_mark_word() as the high water mark |
|
789 |
// of the region. Either way, the behaviour will be correct. |
|
790 |
ContiguousSpace::set_saved_mark(); |
|
1385
1751733b089b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
1374
diff
changeset
|
791 |
_gc_time_stamp = curr_gc_time_stamp; |
1751733b089b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
1374
diff
changeset
|
792 |
OrderAccess::fence(); |
1374 | 793 |
} |
794 |
} |
|
795 |
||
796 |
G1OffsetTableContigSpace:: |
|
797 |
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
798 |
MemRegion mr, bool is_zeroed) : |
|
799 |
_offsets(sharedOffsetArray, mr), |
|
800 |
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), |
|
801 |
_gc_time_stamp(0) |
|
802 |
{ |
|
803 |
_offsets.set_space(this); |
|
1388 | 804 |
initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
1374 | 805 |
} |
806 |
||
807 |
size_t RegionList::length() { |
|
808 |
size_t len = 0; |
|
809 |
HeapRegion* cur = hd(); |
|
810 |
DEBUG_ONLY(HeapRegion* last = NULL); |
|
811 |
while (cur != NULL) { |
|
812 |
len++; |
|
813 |
DEBUG_ONLY(last = cur); |
|
814 |
cur = get_next(cur); |
|
815 |
} |
|
816 |
assert(last == tl(), "Invariant"); |
|
817 |
return len; |
|
818 |
} |
|
819 |
||
820 |
void RegionList::insert_before_head(HeapRegion* r) { |
|
821 |
assert(well_formed(), "Inv"); |
|
822 |
set_next(r, hd()); |
|
823 |
_hd = r; |
|
824 |
_sz++; |
|
825 |
if (tl() == NULL) _tl = r; |
|
826 |
assert(well_formed(), "Inv"); |
|
827 |
} |
|
828 |
||
829 |
void RegionList::prepend_list(RegionList* new_list) { |
|
830 |
assert(well_formed(), "Precondition"); |
|
831 |
assert(new_list->well_formed(), "Precondition"); |
|
832 |
HeapRegion* new_tl = new_list->tl(); |
|
833 |
if (new_tl != NULL) { |
|
834 |
set_next(new_tl, hd()); |
|
835 |
_hd = new_list->hd(); |
|
836 |
_sz += new_list->sz(); |
|
837 |
if (tl() == NULL) _tl = new_list->tl(); |
|
838 |
} else { |
|
839 |
assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv"); |
|
840 |
} |
|
841 |
assert(well_formed(), "Inv"); |
|
842 |
} |
|
843 |
||
844 |
void RegionList::delete_after(HeapRegion* r) { |
|
845 |
assert(well_formed(), "Precondition"); |
|
846 |
HeapRegion* next = get_next(r); |
|
847 |
assert(r != NULL, "Precondition"); |
|
848 |
HeapRegion* next_tl = get_next(next); |
|
849 |
set_next(r, next_tl); |
|
850 |
dec_sz(); |
|
851 |
if (next == tl()) { |
|
852 |
assert(next_tl == NULL, "Inv"); |
|
853 |
_tl = r; |
|
854 |
} |
|
855 |
assert(well_formed(), "Inv"); |
|
856 |
} |
|
857 |
||
858 |
HeapRegion* RegionList::pop() { |
|
859 |
assert(well_formed(), "Inv"); |
|
860 |
HeapRegion* res = hd(); |
|
861 |
if (res != NULL) { |
|
862 |
_hd = get_next(res); |
|
863 |
_sz--; |
|
864 |
set_next(res, NULL); |
|
865 |
if (sz() == 0) _tl = NULL; |
|
866 |
} |
|
867 |
assert(well_formed(), "Inv"); |
|
868 |
return res; |
|
869 |
} |