215 jbyte* const new_end = byte_after(new_region.last()); |
215 jbyte* const new_end = byte_after(new_region.last()); |
216 HeapWord* new_end_aligned = |
216 HeapWord* new_end_aligned = |
217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); |
217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); |
218 assert(new_end_aligned >= (HeapWord*) new_end, |
218 assert(new_end_aligned >= (HeapWord*) new_end, |
219 "align up, but less"); |
219 "align up, but less"); |
|
220 // Check the other regions (excludes "ind") to ensure that |
|
221 // the new_end_aligned does not intrude onto the committed |
|
222 // space of another region. |
220 int ri = 0; |
223 int ri = 0; |
221 for (ri = 0; ri < _cur_covered_regions; ri++) { |
224 for (ri = 0; ri < _cur_covered_regions; ri++) { |
222 if (ri != ind) { |
225 if (ri != ind) { |
223 if (_committed[ri].contains(new_end_aligned)) { |
226 if (_committed[ri].contains(new_end_aligned)) { |
224 assert((new_end_aligned >= _committed[ri].start()) && |
227 // The prior check included in the assert |
225 (_committed[ri].start() > _committed[ind].start()), |
228 // (new_end_aligned >= _committed[ri].start()) |
|
229 // is redundant with the "contains" test. |
|
230 // Any region containing the new end |
|
231 // should start at or beyond the region found (ind) |
|
232 // for the new end (committed regions are not expected to |
|
233 // be proper subsets of other committed regions). |
|
234 assert(_committed[ri].start() >= _committed[ind].start(), |
226 "New end of committed region is inconsistent"); |
235 "New end of committed region is inconsistent"); |
227 new_end_aligned = _committed[ri].start(); |
236 new_end_aligned = _committed[ri].start(); |
228 assert(new_end_aligned > _committed[ind].start(), |
237 // new_end_aligned can be equal to the start of its |
|
238 // committed region (i.e., of "ind") if a second |
|
239 // region following "ind" also start at the same location |
|
240 // as "ind". |
|
241 assert(new_end_aligned >= _committed[ind].start(), |
229 "New end of committed region is before start"); |
242 "New end of committed region is before start"); |
230 debug_only(collided = true;) |
243 debug_only(collided = true;) |
231 // Should only collide with 1 region |
244 // Should only collide with 1 region |
232 break; |
245 break; |
233 } |
246 } |
341 |
354 |
342 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { |
355 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { |
343 inline_write_ref_field(field, newVal); |
356 inline_write_ref_field(field, newVal); |
344 } |
357 } |
345 |
358 |
|
359 /* |
|
360 Claimed and deferred bits are used together in G1 during the evacuation |
|
361 pause. These bits can have the following state transitions: |
|
362 1. The claimed bit can be put over any other card state. Except that |
|
363 the "dirty -> dirty and claimed" transition is checked for in |
|
364 G1 code and is not used. |
|
365 2. Deferred bit can be set only if the previous state of the card |
|
366 was either clean or claimed. mark_card_deferred() is wait-free. |
|
367 We do not care if the operation is be successful because if |
|
368 it does not it will only result in duplicate entry in the update |
|
369 buffer because of the "cache-miss". So it's not worth spinning. |
|
370 */ |
|
371 |
346 |
372 |
347 bool CardTableModRefBS::claim_card(size_t card_index) { |
373 bool CardTableModRefBS::claim_card(size_t card_index) { |
348 jbyte val = _byte_map[card_index]; |
374 jbyte val = _byte_map[card_index]; |
349 if (val != claimed_card_val()) { |
375 assert(val != dirty_card_val(), "Shouldn't claim a dirty card"); |
350 jbyte res = Atomic::cmpxchg((jbyte) claimed_card_val(), &_byte_map[card_index], val); |
376 while (val == clean_card_val() || |
351 if (res == val) |
377 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) { |
|
378 jbyte new_val = val; |
|
379 if (val == clean_card_val()) { |
|
380 new_val = (jbyte)claimed_card_val(); |
|
381 } else { |
|
382 new_val = val | (jbyte)claimed_card_val(); |
|
383 } |
|
384 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val); |
|
385 if (res == val) { |
352 return true; |
386 return true; |
353 else return false; |
387 } |
|
388 val = res; |
354 } |
389 } |
355 return false; |
390 return false; |
356 } |
391 } |
|
392 |
|
393 bool CardTableModRefBS::mark_card_deferred(size_t card_index) { |
|
394 jbyte val = _byte_map[card_index]; |
|
395 // It's already processed |
|
396 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { |
|
397 return false; |
|
398 } |
|
399 // Cached bit can be installed either on a clean card or on a claimed card. |
|
400 jbyte new_val = val; |
|
401 if (val == clean_card_val()) { |
|
402 new_val = (jbyte)deferred_card_val(); |
|
403 } else { |
|
404 if (val & claimed_card_val()) { |
|
405 new_val = val | (jbyte)deferred_card_val(); |
|
406 } |
|
407 } |
|
408 if (new_val != val) { |
|
409 Atomic::cmpxchg(new_val, &_byte_map[card_index], val); |
|
410 } |
|
411 return true; |
|
412 } |
|
413 |
357 |
414 |
358 void CardTableModRefBS::non_clean_card_iterate(Space* sp, |
415 void CardTableModRefBS::non_clean_card_iterate(Space* sp, |
359 MemRegion mr, |
416 MemRegion mr, |
360 DirtyCardToOopClosure* dcto_cl, |
417 DirtyCardToOopClosure* dcto_cl, |
361 MemRegionClosure* cl, |
418 MemRegionClosure* cl, |