author | tonyp |
Wed, 18 Mar 2009 11:37:48 -0400 | |
changeset 2252 | 703d28e44a42 |
parent 2154 | 72a9b7284ccf |
parent 2250 | 36d1eb1bb075 |
child 2260 | 219013f6a311 |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
2105 | 2 |
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "incls/_precompiled.incl" |
|
26 |
#include "incls/_g1RemSet.cpp.incl" |
|
27 |
||
28 |
#define CARD_REPEAT_HISTO 0 |
|
29 |
||
30 |
#if CARD_REPEAT_HISTO |
|
31 |
static size_t ct_freq_sz; |
|
32 |
static jbyte* ct_freq = NULL; |
|
33 |
||
34 |
void init_ct_freq_table(size_t heap_sz_bytes) { |
|
35 |
if (ct_freq == NULL) { |
|
36 |
ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size; |
|
37 |
ct_freq = new jbyte[ct_freq_sz]; |
|
38 |
for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0; |
|
39 |
} |
|
40 |
} |
|
41 |
||
42 |
void ct_freq_note_card(size_t index) { |
|
43 |
assert(0 <= index && index < ct_freq_sz, "Bounds error."); |
|
44 |
if (ct_freq[index] < 100) { ct_freq[index]++; } |
|
45 |
} |
|
46 |
||
47 |
static IntHistogram card_repeat_count(10, 10); |
|
48 |
||
49 |
void ct_freq_update_histo_and_reset() { |
|
50 |
for (size_t j = 0; j < ct_freq_sz; j++) { |
|
51 |
card_repeat_count.add_entry(ct_freq[j]); |
|
52 |
ct_freq[j] = 0; |
|
53 |
} |
|
54 |
||
55 |
} |
|
56 |
#endif |
|
57 |
||
58 |
||
59 |
class IntoCSOopClosure: public OopsInHeapRegionClosure { |
|
60 |
OopsInHeapRegionClosure* _blk; |
|
61 |
G1CollectedHeap* _g1; |
|
62 |
public: |
|
63 |
IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : |
|
64 |
_g1(g1), _blk(blk) {} |
|
65 |
void set_region(HeapRegion* from) { |
|
66 |
_blk->set_region(from); |
|
67 |
} |
|
68 |
virtual void do_oop(narrowOop* p) { |
|
69 |
guarantee(false, "NYI"); |
|
70 |
} |
|
71 |
virtual void do_oop(oop* p) { |
|
72 |
oop obj = *p; |
|
73 |
if (_g1->obj_in_cs(obj)) _blk->do_oop(p); |
|
74 |
} |
|
75 |
bool apply_to_weak_ref_discovered_field() { return true; } |
|
76 |
bool idempotent() { return true; } |
|
77 |
}; |
|
78 |
||
79 |
class IntoCSRegionClosure: public HeapRegionClosure { |
|
80 |
IntoCSOopClosure _blk; |
|
81 |
G1CollectedHeap* _g1; |
|
82 |
public: |
|
83 |
IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : |
|
84 |
_g1(g1), _blk(g1, blk) {} |
|
85 |
bool doHeapRegion(HeapRegion* r) { |
|
86 |
if (!r->in_collection_set()) { |
|
87 |
_blk.set_region(r); |
|
88 |
if (r->isHumongous()) { |
|
89 |
if (r->startsHumongous()) { |
|
90 |
oop obj = oop(r->bottom()); |
|
91 |
obj->oop_iterate(&_blk); |
|
92 |
} |
|
93 |
} else { |
|
94 |
r->oop_before_save_marks_iterate(&_blk); |
|
95 |
} |
|
96 |
} |
|
97 |
return false; |
|
98 |
} |
|
99 |
}; |
|
100 |
||
101 |
void |
|
102 |
StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, |
|
103 |
int worker_i) { |
|
104 |
IntoCSRegionClosure rc(_g1, oc); |
|
105 |
_g1->heap_region_iterate(&rc); |
|
106 |
} |
|
107 |
||
108 |
class UpdateRSOutOfRegionClosure: public HeapRegionClosure { |
|
109 |
G1CollectedHeap* _g1h; |
|
110 |
ModRefBarrierSet* _mr_bs; |
|
111 |
UpdateRSOopClosure _cl; |
|
112 |
int _worker_i; |
|
113 |
public: |
|
114 |
UpdateRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
|
115 |
_cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), |
|
116 |
_mr_bs(g1->mr_bs()), |
|
117 |
_worker_i(worker_i), |
|
118 |
_g1h(g1) |
|
119 |
{} |
|
120 |
bool doHeapRegion(HeapRegion* r) { |
|
121 |
if (!r->in_collection_set() && !r->continuesHumongous()) { |
|
122 |
_cl.set_from(r); |
|
123 |
r->set_next_filter_kind(HeapRegionDCTOC::OutOfRegionFilterKind); |
|
124 |
_mr_bs->mod_oop_in_space_iterate(r, &_cl, true, true); |
|
125 |
} |
|
126 |
return false; |
|
127 |
} |
|
128 |
}; |
|
129 |
||
130 |
class VerifyRSCleanCardOopClosure: public OopClosure { |
|
131 |
G1CollectedHeap* _g1; |
|
132 |
public: |
|
133 |
VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {} |
|
134 |
||
135 |
virtual void do_oop(narrowOop* p) { |
|
136 |
guarantee(false, "NYI"); |
|
137 |
} |
|
138 |
virtual void do_oop(oop* p) { |
|
139 |
oop obj = *p; |
|
140 |
HeapRegion* to = _g1->heap_region_containing(obj); |
|
141 |
guarantee(to == NULL || !to->in_collection_set(), |
|
142 |
"Missed a rem set member."); |
|
143 |
} |
|
144 |
}; |
|
145 |
||
146 |
HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) |
|
147 |
: G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()), |
|
148 |
_cg1r(g1->concurrent_g1_refine()), |
|
149 |
_par_traversal_in_progress(false), _new_refs(NULL), |
|
150 |
_cards_scanned(NULL), _total_cards_scanned(0) |
|
151 |
{ |
|
152 |
_seq_task = new SubTasksDone(NumSeqTasks); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
153 |
guarantee(n_workers() > 0, "There should be some workers"); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
154 |
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers()); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
155 |
for (uint i = 0; i < n_workers(); i++) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
156 |
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
157 |
} |
1374 | 158 |
} |
159 |
||
160 |
HRInto_G1RemSet::~HRInto_G1RemSet() { |
|
161 |
delete _seq_task; |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
162 |
for (uint i = 0; i < n_workers(); i++) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
163 |
delete _new_refs[i]; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
164 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
165 |
FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs); |
1374 | 166 |
} |
167 |
||
168 |
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { |
|
169 |
if (_g1->is_in_g1_reserved(mr.start())) { |
|
170 |
_n += (int) ((mr.byte_size() / CardTableModRefBS::card_size)); |
|
171 |
if (_start_first == NULL) _start_first = mr.start(); |
|
172 |
} |
|
173 |
} |
|
174 |
||
175 |
class ScanRSClosure : public HeapRegionClosure { |
|
176 |
size_t _cards_done, _cards; |
|
177 |
G1CollectedHeap* _g1h; |
|
178 |
OopsInHeapRegionClosure* _oc; |
|
179 |
G1BlockOffsetSharedArray* _bot_shared; |
|
180 |
CardTableModRefBS *_ct_bs; |
|
181 |
int _worker_i; |
|
182 |
bool _try_claimed; |
|
183 |
public: |
|
184 |
ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) : |
|
185 |
_oc(oc), |
|
186 |
_cards(0), |
|
187 |
_cards_done(0), |
|
188 |
_worker_i(worker_i), |
|
189 |
_try_claimed(false) |
|
190 |
{ |
|
191 |
_g1h = G1CollectedHeap::heap(); |
|
192 |
_bot_shared = _g1h->bot_shared(); |
|
193 |
_ct_bs = (CardTableModRefBS*) (_g1h->barrier_set()); |
|
194 |
} |
|
195 |
||
196 |
void set_try_claimed() { _try_claimed = true; } |
|
197 |
||
198 |
void scanCard(size_t index, HeapRegion *r) { |
|
199 |
_cards_done++; |
|
200 |
DirtyCardToOopClosure* cl = |
|
201 |
r->new_dcto_closure(_oc, |
|
202 |
CardTableModRefBS::Precise, |
|
203 |
HeapRegionDCTOC::IntoCSFilterKind); |
|
204 |
||
205 |
// Set the "from" region in the closure. |
|
206 |
_oc->set_region(r); |
|
207 |
HeapWord* card_start = _bot_shared->address_for_index(index); |
|
208 |
HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; |
|
209 |
Space *sp = SharedHeap::heap()->space_containing(card_start); |
|
210 |
MemRegion sm_region; |
|
211 |
if (ParallelGCThreads > 0) { |
|
212 |
// first find the used area |
|
213 |
sm_region = sp->used_region_at_save_marks(); |
|
214 |
} else { |
|
215 |
// The closure is not idempotent. We shouldn't look at objects |
|
216 |
// allocated during the GC. |
|
217 |
sm_region = sp->used_region_at_save_marks(); |
|
218 |
} |
|
219 |
MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); |
|
220 |
if (!mr.is_empty()) { |
|
221 |
cl->do_MemRegion(mr); |
|
222 |
} |
|
223 |
} |
|
224 |
||
225 |
void printCard(HeapRegion* card_region, size_t card_index, |
|
226 |
HeapWord* card_start) { |
|
227 |
gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") " |
|
228 |
"RS names card %p: " |
|
229 |
"[" PTR_FORMAT ", " PTR_FORMAT ")", |
|
230 |
_worker_i, |
|
231 |
card_region->bottom(), card_region->end(), |
|
232 |
card_index, |
|
233 |
card_start, card_start + G1BlockOffsetSharedArray::N_words); |
|
234 |
} |
|
235 |
||
236 |
bool doHeapRegion(HeapRegion* r) { |
|
237 |
assert(r->in_collection_set(), "should only be called on elements of CS."); |
|
238 |
HeapRegionRemSet* hrrs = r->rem_set(); |
|
239 |
if (hrrs->iter_is_complete()) return false; // All done. |
|
240 |
if (!_try_claimed && !hrrs->claim_iter()) return false; |
|
241 |
// If we didn't return above, then |
|
242 |
// _try_claimed || r->claim_iter() |
|
243 |
// is true: either we're supposed to work on claimed-but-not-complete |
|
244 |
// regions, or we successfully claimed the region. |
|
245 |
HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); |
|
246 |
hrrs->init_iterator(iter); |
|
247 |
size_t card_index; |
|
248 |
while (iter->has_next(card_index)) { |
|
249 |
HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index); |
|
250 |
||
251 |
#if 0 |
|
252 |
gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n", |
|
253 |
card_start, card_start + CardTableModRefBS::card_size_in_words); |
|
254 |
#endif |
|
255 |
||
256 |
HeapRegion* card_region = _g1h->heap_region_containing(card_start); |
|
257 |
assert(card_region != NULL, "Yielding cards not in the heap?"); |
|
258 |
_cards++; |
|
259 |
||
260 |
if (!card_region->in_collection_set()) { |
|
261 |
// If the card is dirty, then we will scan it during updateRS. |
|
262 |
if (!_ct_bs->is_card_claimed(card_index) && |
|
263 |
!_ct_bs->is_card_dirty(card_index)) { |
|
264 |
assert(_ct_bs->is_card_clean(card_index) || |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
265 |
_ct_bs->is_card_claimed(card_index) || |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
266 |
_ct_bs->is_card_deferred(card_index), |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
267 |
"Card is either clean, claimed or deferred"); |
1374 | 268 |
if (_ct_bs->claim_card(card_index)) |
269 |
scanCard(card_index, card_region); |
|
270 |
} |
|
271 |
} |
|
272 |
} |
|
273 |
hrrs->set_iter_complete(); |
|
274 |
return false; |
|
275 |
} |
|
276 |
// Set all cards back to clean. |
|
277 |
void cleanup() {_g1h->cleanUpCardTable();} |
|
278 |
size_t cards_done() { return _cards_done;} |
|
279 |
size_t cards_looked_up() { return _cards;} |
|
280 |
}; |
|
281 |
||
282 |
// We want the parallel threads to start their scanning at |
|
283 |
// different collection set regions to avoid contention. |
|
284 |
// If we have: |
|
285 |
// n collection set regions |
|
286 |
// p threads |
|
287 |
// Then thread t will start at region t * floor (n/p) |
|
288 |
||
289 |
HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) { |
|
290 |
HeapRegion* result = _g1p->collection_set(); |
|
291 |
if (ParallelGCThreads > 0) { |
|
292 |
size_t cs_size = _g1p->collection_set_size(); |
|
293 |
int n_workers = _g1->workers()->total_workers(); |
|
294 |
size_t cs_spans = cs_size / n_workers; |
|
295 |
size_t ind = cs_spans * worker_i; |
|
296 |
for (size_t i = 0; i < ind; i++) |
|
297 |
result = result->next_in_collection_set(); |
|
298 |
} |
|
299 |
return result; |
|
300 |
} |
|
301 |
||
302 |
void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) { |
|
303 |
double rs_time_start = os::elapsedTime(); |
|
304 |
HeapRegion *startRegion = calculateStartRegion(worker_i); |
|
305 |
||
306 |
BufferingOopsInHeapRegionClosure boc(oc); |
|
307 |
ScanRSClosure scanRScl(&boc, worker_i); |
|
308 |
_g1->collection_set_iterate_from(startRegion, &scanRScl); |
|
309 |
scanRScl.set_try_claimed(); |
|
310 |
_g1->collection_set_iterate_from(startRegion, &scanRScl); |
|
311 |
||
312 |
boc.done(); |
|
313 |
double closure_app_time_sec = boc.closure_app_seconds(); |
|
314 |
double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) - |
|
315 |
closure_app_time_sec; |
|
316 |
double closure_app_time_ms = closure_app_time_sec * 1000.0; |
|
317 |
||
318 |
assert( _cards_scanned != NULL, "invariant" ); |
|
319 |
_cards_scanned[worker_i] = scanRScl.cards_done(); |
|
320 |
||
321 |
_g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0); |
|
322 |
_g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
323 |
|
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
324 |
double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
325 |
if (scan_new_refs_time_ms > 0.0) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
326 |
closure_app_time_ms += scan_new_refs_time_ms; |
1374 | 327 |
} |
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
328 |
|
1374 | 329 |
_g1p->record_obj_copy_time(worker_i, closure_app_time_ms); |
330 |
} |
|
331 |
||
332 |
void HRInto_G1RemSet::updateRS(int worker_i) { |
|
333 |
ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); |
|
334 |
||
335 |
double start = os::elapsedTime(); |
|
336 |
_g1p->record_update_rs_start_time(worker_i, start * 1000.0); |
|
337 |
||
338 |
if (G1RSBarrierUseQueue && !cg1r->do_traversal()) { |
|
339 |
// Apply the appropriate closure to all remaining log entries. |
|
340 |
_g1->iterate_dirty_card_closure(false, worker_i); |
|
341 |
// Now there should be no dirty cards. |
|
342 |
if (G1RSLogCheckCardTable) { |
|
343 |
CountNonCleanMemRegionClosure cl(_g1); |
|
344 |
_ct_bs->mod_card_iterate(&cl); |
|
345 |
// XXX This isn't true any more: keeping cards of young regions |
|
346 |
// marked dirty broke it. Need some reasonable fix. |
|
347 |
guarantee(cl.n() == 0, "Card table should be clean."); |
|
348 |
} |
|
349 |
} else { |
|
350 |
UpdateRSOutOfRegionClosure update_rs(_g1, worker_i); |
|
351 |
_g1->heap_region_iterate(&update_rs); |
|
352 |
// We did a traversal; no further one is necessary. |
|
353 |
if (G1RSBarrierUseQueue) { |
|
354 |
assert(cg1r->do_traversal(), "Or we shouldn't have gotten here."); |
|
355 |
cg1r->set_pya_cancel(); |
|
356 |
} |
|
357 |
if (_cg1r->use_cache()) { |
|
358 |
_cg1r->clear_and_record_card_counts(); |
|
359 |
_cg1r->clear_hot_cache(); |
|
360 |
} |
|
361 |
} |
|
362 |
_g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); |
|
363 |
} |
|
364 |
||
365 |
#ifndef PRODUCT |
|
366 |
class PrintRSClosure : public HeapRegionClosure { |
|
367 |
int _count; |
|
368 |
public: |
|
369 |
PrintRSClosure() : _count(0) {} |
|
370 |
bool doHeapRegion(HeapRegion* r) { |
|
371 |
HeapRegionRemSet* hrrs = r->rem_set(); |
|
372 |
_count += (int) hrrs->occupied(); |
|
373 |
if (hrrs->occupied() == 0) { |
|
374 |
gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") " |
|
375 |
"has no remset entries\n", |
|
376 |
r->bottom(), r->end()); |
|
377 |
} else { |
|
378 |
gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n", |
|
379 |
r->bottom(), r->end()); |
|
380 |
r->print(); |
|
381 |
hrrs->print(); |
|
382 |
gclog_or_tty->print("\nDone printing rem set\n"); |
|
383 |
} |
|
384 |
return false; |
|
385 |
} |
|
386 |
int occupied() {return _count;} |
|
387 |
}; |
|
388 |
#endif |
|
389 |
||
390 |
class CountRSSizeClosure: public HeapRegionClosure { |
|
391 |
size_t _n; |
|
392 |
size_t _tot; |
|
393 |
size_t _max; |
|
394 |
HeapRegion* _max_r; |
|
395 |
enum { |
|
396 |
N = 20, |
|
397 |
MIN = 6 |
|
398 |
}; |
|
399 |
int _histo[N]; |
|
400 |
public: |
|
401 |
CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) { |
|
402 |
for (int i = 0; i < N; i++) _histo[i] = 0; |
|
403 |
} |
|
404 |
bool doHeapRegion(HeapRegion* r) { |
|
405 |
if (!r->continuesHumongous()) { |
|
406 |
size_t occ = r->rem_set()->occupied(); |
|
407 |
_n++; |
|
408 |
_tot += occ; |
|
409 |
if (occ > _max) { |
|
410 |
_max = occ; |
|
411 |
_max_r = r; |
|
412 |
} |
|
413 |
// Fit it into a histo bin. |
|
414 |
int s = 1 << MIN; |
|
415 |
int i = 0; |
|
416 |
while (occ > (size_t) s && i < (N-1)) { |
|
417 |
s = s << 1; |
|
418 |
i++; |
|
419 |
} |
|
420 |
_histo[i]++; |
|
421 |
} |
|
422 |
return false; |
|
423 |
} |
|
424 |
size_t n() { return _n; } |
|
425 |
size_t tot() { return _tot; } |
|
426 |
size_t mx() { return _max; } |
|
427 |
HeapRegion* mxr() { return _max_r; } |
|
428 |
void print_histo() { |
|
429 |
int mx = N; |
|
430 |
while (mx >= 0) { |
|
431 |
if (_histo[mx-1] > 0) break; |
|
432 |
mx--; |
|
433 |
} |
|
434 |
gclog_or_tty->print_cr("Number of regions with given RS sizes:"); |
|
435 |
gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]); |
|
436 |
for (int i = 1; i < mx-1; i++) { |
|
437 |
gclog_or_tty->print_cr(" %8d - %8d %8d", |
|
438 |
(1 << (MIN + i - 1)) + 1, |
|
439 |
1 << (MIN + i), |
|
440 |
_histo[i]); |
|
441 |
} |
|
442 |
gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]); |
|
443 |
} |
|
444 |
}; |
|
445 |
||
446 |
void |
|
447 |
HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc, |
|
448 |
int worker_i) { |
|
449 |
double scan_new_refs_start_sec = os::elapsedTime(); |
|
450 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
451 |
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set()); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
452 |
for (int i = 0; i < _new_refs[worker_i]->length(); i++) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
453 |
oop* p = _new_refs[worker_i]->at(i); |
1374 | 454 |
oop obj = *p; |
455 |
// *p was in the collection set when p was pushed on "_new_refs", but |
|
456 |
// another thread may have processed this location from an RS, so it |
|
457 |
// might not point into the CS any longer. If so, it's obviously been |
|
458 |
// processed, and we don't need to do anything further. |
|
459 |
if (g1h->obj_in_cs(obj)) { |
|
460 |
HeapRegion* r = g1h->heap_region_containing(p); |
|
461 |
||
462 |
DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj)); |
|
463 |
oc->set_region(r); |
|
464 |
// If "p" has already been processed concurrently, this is |
|
465 |
// idempotent. |
|
466 |
oc->do_oop(p); |
|
467 |
} |
|
468 |
} |
|
469 |
_g1p->record_scan_new_refs_time(worker_i, |
|
470 |
(os::elapsedTime() - scan_new_refs_start_sec) |
|
471 |
* 1000.0); |
|
472 |
} |
|
473 |
||
474 |
void HRInto_G1RemSet::set_par_traversal(bool b) { |
|
475 |
_par_traversal_in_progress = b; |
|
476 |
HeapRegionRemSet::set_par_traversal(b); |
|
477 |
} |
|
478 |
||
479 |
void HRInto_G1RemSet::cleanupHRRS() { |
|
480 |
HeapRegionRemSet::cleanup(); |
|
481 |
} |
|
482 |
||
483 |
void |
|
484 |
HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, |
|
485 |
int worker_i) { |
|
486 |
#if CARD_REPEAT_HISTO |
|
487 |
ct_freq_update_histo_and_reset(); |
|
488 |
#endif |
|
489 |
if (worker_i == 0) { |
|
490 |
_cg1r->clear_and_record_card_counts(); |
|
491 |
} |
|
492 |
||
493 |
// Make this into a command-line flag... |
|
494 |
if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) { |
|
495 |
CountRSSizeClosure count_cl; |
|
496 |
_g1->heap_region_iterate(&count_cl); |
|
497 |
gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, " |
|
498 |
"max region is " PTR_FORMAT, |
|
499 |
count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(), |
|
500 |
count_cl.mx(), count_cl.mxr()); |
|
501 |
count_cl.print_histo(); |
|
502 |
} |
|
503 |
||
504 |
if (ParallelGCThreads > 0) { |
|
2250
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
505 |
// The two flags below were introduced temporarily to serialize |
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
506 |
// the updating and scanning of remembered sets. There are some |
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
507 |
// race conditions when these two operations are done in parallel |
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
508 |
// and they are causing failures. When we resolve said race |
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
509 |
// conditions, we'll revert back to parallel remembered set |
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
510 |
// updating and scanning. See CRs 6677707 and 6677708. |
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
511 |
if (G1EnableParallelRSetUpdating || (worker_i == 0)) { |
1374 | 512 |
updateRS(worker_i); |
513 |
scanNewRefsRS(oc, worker_i); |
|
2250
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
514 |
} |
36d1eb1bb075
6816154: G1: introduce flags to enable/disable RSet updating and scanning
tonyp
parents:
2249
diff
changeset
|
515 |
if (G1EnableParallelRSetScanning || (worker_i == 0)) { |
1374 | 516 |
scanRS(oc, worker_i); |
517 |
} |
|
518 |
} else { |
|
519 |
assert(worker_i == 0, "invariant"); |
|
520 |
updateRS(0); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
521 |
scanNewRefsRS(oc, 0); |
1374 | 522 |
scanRS(oc, 0); |
523 |
} |
|
524 |
} |
|
525 |
||
526 |
void HRInto_G1RemSet:: |
|
527 |
prepare_for_oops_into_collection_set_do() { |
|
528 |
#if G1_REM_SET_LOGGING |
|
529 |
PrintRSClosure cl; |
|
530 |
_g1->collection_set_iterate(&cl); |
|
531 |
#endif |
|
532 |
cleanupHRRS(); |
|
533 |
ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); |
|
534 |
_g1->set_refine_cte_cl_concurrency(false); |
|
535 |
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
|
536 |
dcqs.concatenate_logs(); |
|
537 |
||
538 |
assert(!_par_traversal_in_progress, "Invariant between iterations."); |
|
539 |
if (ParallelGCThreads > 0) { |
|
540 |
set_par_traversal(true); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
541 |
_seq_task->set_par_threads((int)n_workers()); |
1374 | 542 |
if (cg1r->do_traversal()) { |
543 |
updateRS(0); |
|
544 |
// Have to do this again after updaters |
|
545 |
cleanupHRRS(); |
|
546 |
} |
|
547 |
} |
|
548 |
guarantee( _cards_scanned == NULL, "invariant" ); |
|
549 |
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers()); |
|
2009 | 550 |
for (uint i = 0; i < n_workers(); ++i) { |
551 |
_cards_scanned[i] = 0; |
|
552 |
} |
|
1374 | 553 |
_total_cards_scanned = 0; |
554 |
} |
|
555 |
||
556 |
||
557 |
class cleanUpIteratorsClosure : public HeapRegionClosure { |
|
558 |
bool doHeapRegion(HeapRegion *r) { |
|
559 |
HeapRegionRemSet* hrrs = r->rem_set(); |
|
560 |
hrrs->init_for_par_iteration(); |
|
561 |
return false; |
|
562 |
} |
|
563 |
}; |
|
564 |
||
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
565 |
class UpdateRSetOopsIntoCSImmediate : public OopClosure { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
566 |
G1CollectedHeap* _g1; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
567 |
public: |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
568 |
UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { } |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
569 |
virtual void do_oop(narrowOop* p) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
570 |
guarantee(false, "NYI"); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
571 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
572 |
virtual void do_oop(oop* p) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
573 |
HeapRegion* to = _g1->heap_region_containing(*p); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
574 |
if (to->in_collection_set()) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
575 |
if (to->rem_set()->add_reference(p, 0)) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
576 |
_g1->schedule_popular_region_evac(to); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
577 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
578 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
579 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
580 |
}; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
581 |
|
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
582 |
class UpdateRSetOopsIntoCSDeferred : public OopClosure { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
583 |
G1CollectedHeap* _g1; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
584 |
CardTableModRefBS* _ct_bs; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
585 |
DirtyCardQueue* _dcq; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
586 |
public: |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
587 |
UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
588 |
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { } |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
589 |
virtual void do_oop(narrowOop* p) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
590 |
guarantee(false, "NYI"); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
591 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
592 |
virtual void do_oop(oop* p) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
593 |
oop obj = *p; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
594 |
if (_g1->obj_in_cs(obj)) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
595 |
size_t card_index = _ct_bs->index_for(p); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
596 |
if (_ct_bs->mark_card_deferred(card_index)) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
597 |
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
598 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
599 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
600 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
601 |
}; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
602 |
|
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
603 |
void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
604 |
for (size_t i = 0; i < n_workers(); i++) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
605 |
for (int j = 0; j < _new_refs[i]->length(); j++) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
606 |
oop* p = _new_refs[i]->at(j); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
607 |
cl->do_oop(p); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
608 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
609 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
610 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
611 |
|
1374 | 612 |
void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() { |
613 |
guarantee( _cards_scanned != NULL, "invariant" ); |
|
614 |
_total_cards_scanned = 0; |
|
615 |
for (uint i = 0; i < n_workers(); ++i) |
|
616 |
_total_cards_scanned += _cards_scanned[i]; |
|
617 |
FREE_C_HEAP_ARRAY(size_t, _cards_scanned); |
|
618 |
_cards_scanned = NULL; |
|
619 |
// Cleanup after copy |
|
620 |
#if G1_REM_SET_LOGGING |
|
621 |
PrintRSClosure cl; |
|
622 |
_g1->heap_region_iterate(&cl); |
|
623 |
#endif |
|
624 |
_g1->set_refine_cte_cl_concurrency(true); |
|
625 |
cleanUpIteratorsClosure iterClosure; |
|
626 |
_g1->collection_set_iterate(&iterClosure); |
|
627 |
// Set all cards back to clean. |
|
628 |
_g1->cleanUpCardTable(); |
|
629 |
if (ParallelGCThreads > 0) { |
|
630 |
ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); |
|
631 |
if (cg1r->do_traversal()) { |
|
632 |
cg1r->cg1rThread()->set_do_traversal(false); |
|
633 |
} |
|
634 |
set_par_traversal(false); |
|
635 |
} |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
636 |
|
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
637 |
if (_g1->evacuation_failed()) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
638 |
// Restore remembered sets for the regions pointing into |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
639 |
// the collection set. |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
640 |
if (G1DeferredRSUpdate) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
641 |
DirtyCardQueue dcq(&_g1->dirty_card_queue_set()); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
642 |
UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
643 |
new_refs_iterate(&deferred_update); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
644 |
} else { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
645 |
UpdateRSetOopsIntoCSImmediate immediate_update(_g1); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
646 |
new_refs_iterate(&immediate_update); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
647 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
648 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
649 |
for (uint i = 0; i < n_workers(); i++) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
650 |
_new_refs[i]->clear(); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
651 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
2009
diff
changeset
|
652 |
|
1374 | 653 |
assert(!_par_traversal_in_progress, "Invariant between iterations."); |
654 |
} |
|
655 |
||
656 |
class UpdateRSObjectClosure: public ObjectClosure { |
|
657 |
UpdateRSOopClosure* _update_rs_oop_cl; |
|
658 |
public: |
|
659 |
UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) : |
|
660 |
_update_rs_oop_cl(update_rs_oop_cl) {} |
|
661 |
void do_object(oop obj) { |
|
662 |
obj->oop_iterate(_update_rs_oop_cl); |
|
663 |
} |
|
664 |
||
665 |
}; |
|
666 |
||
667 |
class ScrubRSClosure: public HeapRegionClosure { |
|
668 |
G1CollectedHeap* _g1h; |
|
669 |
BitMap* _region_bm; |
|
670 |
BitMap* _card_bm; |
|
671 |
CardTableModRefBS* _ctbs; |
|
672 |
public: |
|
673 |
ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) : |
|
674 |
_g1h(G1CollectedHeap::heap()), |
|
675 |
_region_bm(region_bm), _card_bm(card_bm), |
|
676 |
_ctbs(NULL) |
|
677 |
{ |
|
678 |
ModRefBarrierSet* bs = _g1h->mr_bs(); |
|
679 |
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); |
|
680 |
_ctbs = (CardTableModRefBS*)bs; |
|
681 |
} |
|
682 |
||
683 |
bool doHeapRegion(HeapRegion* r) { |
|
684 |
if (!r->continuesHumongous()) { |
|
685 |
r->rem_set()->scrub(_ctbs, _region_bm, _card_bm); |
|
686 |
} |
|
687 |
return false; |
|
688 |
} |
|
689 |
}; |
|
690 |
||
691 |
void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) { |
|
692 |
ScrubRSClosure scrub_cl(region_bm, card_bm); |
|
693 |
_g1->heap_region_iterate(&scrub_cl); |
|
694 |
} |
|
695 |
||
696 |
void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, |
|
697 |
int worker_num, int claim_val) { |
|
698 |
ScrubRSClosure scrub_cl(region_bm, card_bm); |
|
699 |
_g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val); |
|
700 |
} |
|
701 |
||
702 |
||
703 |
class ConcRefineRegionClosure: public HeapRegionClosure { |
|
704 |
G1CollectedHeap* _g1h; |
|
705 |
CardTableModRefBS* _ctbs; |
|
706 |
ConcurrentGCThread* _cgc_thrd; |
|
707 |
ConcurrentG1Refine* _cg1r; |
|
708 |
unsigned _cards_processed; |
|
709 |
UpdateRSOopClosure _update_rs_oop_cl; |
|
710 |
public: |
|
711 |
ConcRefineRegionClosure(CardTableModRefBS* ctbs, |
|
712 |
ConcurrentG1Refine* cg1r, |
|
713 |
HRInto_G1RemSet* g1rs) : |
|
714 |
_ctbs(ctbs), _cg1r(cg1r), _cgc_thrd(cg1r->cg1rThread()), |
|
715 |
_update_rs_oop_cl(g1rs), _cards_processed(0), |
|
716 |
_g1h(G1CollectedHeap::heap()) |
|
717 |
{} |
|
718 |
||
719 |
bool doHeapRegion(HeapRegion* r) { |
|
720 |
if (!r->in_collection_set() && |
|
721 |
!r->continuesHumongous() && |
|
2249
fb8abed44792
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
2152
diff
changeset
|
722 |
!r->is_young()) { |
1374 | 723 |
_update_rs_oop_cl.set_from(r); |
724 |
UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl); |
|
725 |
||
726 |
// For each run of dirty card in the region: |
|
727 |
// 1) Clear the cards. |
|
728 |
// 2) Process the range corresponding to the run, adding any |
|
729 |
// necessary RS entries. |
|
730 |
// 1 must precede 2, so that a concurrent modification redirties the |
|
731 |
// card. If a processing attempt does not succeed, because it runs |
|
732 |
// into an unparseable region, we will do binary search to find the |
|
733 |
// beginning of the next parseable region. |
|
734 |
HeapWord* startAddr = r->bottom(); |
|
735 |
HeapWord* endAddr = r->used_region().end(); |
|
736 |
HeapWord* lastAddr; |
|
737 |
HeapWord* nextAddr; |
|
738 |
||
739 |
for (nextAddr = lastAddr = startAddr; |
|
740 |
nextAddr < endAddr; |
|
741 |
nextAddr = lastAddr) { |
|
742 |
MemRegion dirtyRegion; |
|
743 |
||
744 |
// Get and clear dirty region from card table |
|
745 |
MemRegion next_mr(nextAddr, endAddr); |
|
746 |
dirtyRegion = |
|
747 |
_ctbs->dirty_card_range_after_reset( |
|
748 |
next_mr, |
|
749 |
true, CardTableModRefBS::clean_card_val()); |
|
750 |
assert(dirtyRegion.start() >= nextAddr, |
|
751 |
"returned region inconsistent?"); |
|
752 |
||
753 |
if (!dirtyRegion.is_empty()) { |
|
754 |
HeapWord* stop_point = |
|
755 |
r->object_iterate_mem_careful(dirtyRegion, |
|
756 |
&update_rs_obj_cl); |
|
757 |
if (stop_point == NULL) { |
|
758 |
lastAddr = dirtyRegion.end(); |
|
759 |
_cards_processed += |
|
760 |
(int) (dirtyRegion.word_size() / CardTableModRefBS::card_size_in_words); |
|
761 |
} else { |
|
762 |
// We're going to skip one or more cards that we can't parse. |
|
763 |
HeapWord* next_parseable_card = |
|
764 |
r->next_block_start_careful(stop_point); |
|
765 |
// Round this up to a card boundary. |
|
766 |
next_parseable_card = |
|
767 |
_ctbs->addr_for(_ctbs->byte_after_const(next_parseable_card)); |
|
768 |
// Now we invalidate the intervening cards so we'll see them |
|
769 |
// again. |
|
770 |
MemRegion remaining_dirty = |
|
771 |
MemRegion(stop_point, dirtyRegion.end()); |
|
772 |
MemRegion skipped = |
|
773 |
MemRegion(stop_point, next_parseable_card); |
|
774 |
_ctbs->invalidate(skipped.intersection(remaining_dirty)); |
|
775 |
||
776 |
// Now start up again where we can parse. |
|
777 |
lastAddr = next_parseable_card; |
|
778 |
||
779 |
// Count how many we did completely. |
|
780 |
_cards_processed += |
|
781 |
(stop_point - dirtyRegion.start()) / |
|
782 |
CardTableModRefBS::card_size_in_words; |
|
783 |
} |
|
784 |
// Allow interruption at regular intervals. |
|
785 |
// (Might need to make them more regular, if we get big |
|
786 |
// dirty regions.) |
|
787 |
if (_cgc_thrd != NULL) { |
|
788 |
if (_cgc_thrd->should_yield()) { |
|
789 |
_cgc_thrd->yield(); |
|
790 |
switch (_cg1r->get_pya()) { |
|
791 |
case PYA_continue: |
|
792 |
// This may have changed: re-read. |
|
793 |
endAddr = r->used_region().end(); |
|
794 |
continue; |
|
795 |
case PYA_restart: case PYA_cancel: |
|
796 |
return true; |
|
797 |
} |
|
798 |
} |
|
799 |
} |
|
800 |
} else { |
|
801 |
break; |
|
802 |
} |
|
803 |
} |
|
804 |
} |
|
805 |
// A good yield opportunity. |
|
806 |
if (_cgc_thrd != NULL) { |
|
807 |
if (_cgc_thrd->should_yield()) { |
|
808 |
_cgc_thrd->yield(); |
|
809 |
switch (_cg1r->get_pya()) { |
|
810 |
case PYA_restart: case PYA_cancel: |
|
811 |
return true; |
|
812 |
default: |
|
813 |
break; |
|
814 |
} |
|
815 |
||
816 |
} |
|
817 |
} |
|
818 |
return false; |
|
819 |
} |
|
820 |
||
821 |
unsigned cards_processed() { return _cards_processed; } |
|
822 |
}; |
|
823 |
||
824 |
||
825 |
void HRInto_G1RemSet::concurrentRefinementPass(ConcurrentG1Refine* cg1r) { |
|
826 |
ConcRefineRegionClosure cr_cl(ct_bs(), cg1r, this); |
|
827 |
_g1->heap_region_iterate(&cr_cl); |
|
828 |
_conc_refine_traversals++; |
|
829 |
_conc_refine_cards += cr_cl.cards_processed(); |
|
830 |
} |
|
831 |
||
832 |
static IntHistogram out_of_histo(50, 50); |
|
833 |
||
834 |
||
835 |
||
836 |
void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) { |
|
837 |
// If the card is no longer dirty, nothing to do. |
|
838 |
if (*card_ptr != CardTableModRefBS::dirty_card_val()) return; |
|
839 |
||
840 |
// Construct the region representing the card. |
|
841 |
HeapWord* start = _ct_bs->addr_for(card_ptr); |
|
842 |
// And find the region containing it. |
|
843 |
HeapRegion* r = _g1->heap_region_containing(start); |
|
844 |
if (r == NULL) { |
|
845 |
guarantee(_g1->is_in_permanent(start), "Or else where?"); |
|
846 |
return; // Not in the G1 heap (might be in perm, for example.) |
|
847 |
} |
|
848 |
// Why do we have to check here whether a card is on a young region, |
|
849 |
// given that we dirty young regions and, as a result, the |
|
850 |
// post-barrier is supposed to filter them out and never to enqueue |
|
851 |
// them? When we allocate a new region as the "allocation region" we |
|
852 |
// actually dirty its cards after we release the lock, since card |
|
853 |
// dirtying while holding the lock was a performance bottleneck. So, |
|
854 |
// as a result, it is possible for other threads to actually |
|
855 |
// allocate objects in the region (after the acquire the lock) |
|
856 |
// before all the cards on the region are dirtied. This is unlikely, |
|
857 |
// and it doesn't happen often, but it can happen. So, the extra |
|
858 |
// check below filters out those cards. |
|
2249
fb8abed44792
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
2152
diff
changeset
|
859 |
if (r->is_young()) { |
1374 | 860 |
return; |
861 |
} |
|
862 |
// While we are processing RSet buffers during the collection, we |
|
863 |
// actually don't want to scan any cards on the collection set, |
|
864 |
// since we don't want to update remebered sets with entries that |
|
865 |
// point into the collection set, given that live objects from the |
|
866 |
// collection set are about to move and such entries will be stale |
|
867 |
// very soon. This change also deals with a reliability issue which |
|
868 |
// involves scanning a card in the collection set and coming across |
|
869 |
// an array that was being chunked and looking malformed. Note, |
|
870 |
// however, that if evacuation fails, we have to scan any objects |
|
871 |
// that were not moved and create any missing entries. |
|
872 |
if (r->in_collection_set()) { |
|
873 |
return; |
|
874 |
} |
|
875 |
||
876 |
// Should we defer it? |
|
877 |
if (_cg1r->use_cache()) { |
|
878 |
card_ptr = _cg1r->cache_insert(card_ptr); |
|
879 |
// If it was not an eviction, nothing to do. |
|
880 |
if (card_ptr == NULL) return; |
|
881 |
||
882 |
// OK, we have to reset the card start, region, etc. |
|
883 |
start = _ct_bs->addr_for(card_ptr); |
|
884 |
r = _g1->heap_region_containing(start); |
|
885 |
if (r == NULL) { |
|
886 |
guarantee(_g1->is_in_permanent(start), "Or else where?"); |
|
887 |
return; // Not in the G1 heap (might be in perm, for example.) |
|
888 |
} |
|
889 |
guarantee(!r->is_young(), "It was evicted in the current minor cycle."); |
|
890 |
} |
|
891 |
||
892 |
HeapWord* end = _ct_bs->addr_for(card_ptr + 1); |
|
893 |
MemRegion dirtyRegion(start, end); |
|
894 |
||
895 |
#if CARD_REPEAT_HISTO |
|
896 |
init_ct_freq_table(_g1->g1_reserved_obj_bytes()); |
|
897 |
ct_freq_note_card(_ct_bs->index_for(start)); |
|
898 |
#endif |
|
899 |
||
900 |
UpdateRSOopClosure update_rs_oop_cl(this, worker_i); |
|
901 |
update_rs_oop_cl.set_from(r); |
|
902 |
FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, &update_rs_oop_cl); |
|
903 |
||
904 |
// Undirty the card. |
|
905 |
*card_ptr = CardTableModRefBS::clean_card_val(); |
|
906 |
// We must complete this write before we do any of the reads below. |
|
907 |
OrderAccess::storeload(); |
|
908 |
// And process it, being careful of unallocated portions of TLAB's. |
|
909 |
HeapWord* stop_point = |
|
910 |
r->oops_on_card_seq_iterate_careful(dirtyRegion, |
|
911 |
&filter_then_update_rs_oop_cl); |
|
912 |
// If stop_point is non-null, then we encountered an unallocated region |
|
913 |
// (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the |
|
914 |
// card and re-enqueue: if we put off the card until a GC pause, then the |
|
915 |
// unallocated portion will be filled in. Alternatively, we might try |
|
916 |
// the full complexity of the technique used in "regular" precleaning. |
|
917 |
if (stop_point != NULL) { |
|
918 |
// The card might have gotten re-dirtied and re-enqueued while we |
|
919 |
// worked. (In fact, it's pretty likely.) |
|
920 |
if (*card_ptr != CardTableModRefBS::dirty_card_val()) { |
|
921 |
*card_ptr = CardTableModRefBS::dirty_card_val(); |
|
922 |
MutexLockerEx x(Shared_DirtyCardQ_lock, |
|
923 |
Mutex::_no_safepoint_check_flag); |
|
924 |
DirtyCardQueue* sdcq = |
|
925 |
JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); |
|
926 |
sdcq->enqueue(card_ptr); |
|
927 |
} |
|
928 |
} else { |
|
929 |
out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); |
|
930 |
_conc_refine_cards++; |
|
931 |
} |
|
932 |
} |
|
933 |
||
934 |
class HRRSStatsIter: public HeapRegionClosure { |
|
935 |
size_t _occupied; |
|
936 |
size_t _total_mem_sz; |
|
937 |
size_t _max_mem_sz; |
|
938 |
HeapRegion* _max_mem_sz_region; |
|
939 |
public: |
|
940 |
HRRSStatsIter() : |
|
941 |
_occupied(0), |
|
942 |
_total_mem_sz(0), |
|
943 |
_max_mem_sz(0), |
|
944 |
_max_mem_sz_region(NULL) |
|
945 |
{} |
|
946 |
||
947 |
bool doHeapRegion(HeapRegion* r) { |
|
948 |
if (r->continuesHumongous()) return false; |
|
949 |
size_t mem_sz = r->rem_set()->mem_size(); |
|
950 |
if (mem_sz > _max_mem_sz) { |
|
951 |
_max_mem_sz = mem_sz; |
|
952 |
_max_mem_sz_region = r; |
|
953 |
} |
|
954 |
_total_mem_sz += mem_sz; |
|
955 |
size_t occ = r->rem_set()->occupied(); |
|
956 |
_occupied += occ; |
|
957 |
return false; |
|
958 |
} |
|
959 |
size_t total_mem_sz() { return _total_mem_sz; } |
|
960 |
size_t max_mem_sz() { return _max_mem_sz; } |
|
961 |
size_t occupied() { return _occupied; } |
|
962 |
HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; } |
|
963 |
}; |
|
964 |
||
965 |
void HRInto_G1RemSet::print_summary_info() { |
|
966 |
G1CollectedHeap* g1 = G1CollectedHeap::heap(); |
|
967 |
ConcurrentG1RefineThread* cg1r_thrd = |
|
968 |
g1->concurrent_g1_refine()->cg1rThread(); |
|
969 |
||
970 |
#if CARD_REPEAT_HISTO |
|
971 |
gclog_or_tty->print_cr("\nG1 card_repeat count histogram: "); |
|
972 |
gclog_or_tty->print_cr(" # of repeats --> # of cards with that number."); |
|
973 |
card_repeat_count.print_on(gclog_or_tty); |
|
974 |
#endif |
|
975 |
||
976 |
if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) { |
|
977 |
gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: "); |
|
978 |
gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number."); |
|
979 |
out_of_histo.print_on(gclog_or_tty); |
|
980 |
} |
|
981 |
gclog_or_tty->print_cr("\n Concurrent RS processed %d cards in " |
|
982 |
"%5.2fs.", |
|
983 |
_conc_refine_cards, cg1r_thrd->vtime_accum()); |
|
984 |
||
985 |
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
|
986 |
jint tot_processed_buffers = |
|
987 |
dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread(); |
|
988 |
gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers); |
|
989 |
gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS thread.", |
|
990 |
dcqs.processed_buffers_rs_thread(), |
|
991 |
100.0*(float)dcqs.processed_buffers_rs_thread()/ |
|
992 |
(float)tot_processed_buffers); |
|
993 |
gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.", |
|
994 |
dcqs.processed_buffers_mut(), |
|
995 |
100.0*(float)dcqs.processed_buffers_mut()/ |
|
996 |
(float)tot_processed_buffers); |
|
997 |
gclog_or_tty->print_cr(" Did %d concurrent refinement traversals.", |
|
998 |
_conc_refine_traversals); |
|
999 |
if (!G1RSBarrierUseQueue) { |
|
1000 |
gclog_or_tty->print_cr(" Scanned %8.2f cards/traversal.", |
|
1001 |
_conc_refine_traversals > 0 ? |
|
1002 |
(float)_conc_refine_cards/(float)_conc_refine_traversals : |
|
1003 |
0); |
|
1004 |
} |
|
1005 |
gclog_or_tty->print_cr(""); |
|
1006 |
if (G1UseHRIntoRS) { |
|
1007 |
HRRSStatsIter blk; |
|
1008 |
g1->heap_region_iterate(&blk); |
|
1009 |
gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K." |
|
1010 |
" Max = " SIZE_FORMAT "K.", |
|
1011 |
blk.total_mem_sz()/K, blk.max_mem_sz()/K); |
|
1012 |
gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K," |
|
1013 |
" free_lists = " SIZE_FORMAT "K.", |
|
1014 |
HeapRegionRemSet::static_mem_size()/K, |
|
1015 |
HeapRegionRemSet::fl_mem_size()/K); |
|
1016 |
gclog_or_tty->print_cr(" %d occupied cards represented.", |
|
1017 |
blk.occupied()); |
|
1018 |
gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )" |
|
1019 |
" %s, cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.", |
|
1020 |
blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(), |
|
1021 |
(blk.max_mem_sz_region()->popular() ? "POP" : ""), |
|
1022 |
(blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K, |
|
1023 |
(blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K); |
|
1024 |
gclog_or_tty->print_cr(" Did %d coarsenings.", |
|
1025 |
HeapRegionRemSet::n_coarsenings()); |
|
1026 |
||
1027 |
} |
|
1028 |
} |
|
1029 |
void HRInto_G1RemSet::prepare_for_verify() { |
|
2249
fb8abed44792
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
2152
diff
changeset
|
1030 |
if (G1HRRSFlushLogBuffersOnVerify && |
fb8abed44792
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
2152
diff
changeset
|
1031 |
(VerifyBeforeGC || VerifyAfterGC) |
fb8abed44792
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
2152
diff
changeset
|
1032 |
&& !_g1->full_collection()) { |
1374 | 1033 |
cleanupHRRS(); |
1034 |
_g1->set_refine_cte_cl_concurrency(false); |
|
1035 |
if (SafepointSynchronize::is_at_safepoint()) { |
|
1036 |
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
|
1037 |
dcqs.concatenate_logs(); |
|
1038 |
} |
|
1039 |
bool cg1r_use_cache = _cg1r->use_cache(); |
|
1040 |
_cg1r->set_use_cache(false); |
|
1041 |
updateRS(0); |
|
1042 |
_cg1r->set_use_cache(cg1r_use_cache); |
|
2249
fb8abed44792
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
2152
diff
changeset
|
1043 |
|
fb8abed44792
6817419: G1: Enable extensive verification for humongous regions
iveresov
parents:
2152
diff
changeset
|
1044 |
assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
1374 | 1045 |
} |
1046 |
} |