40 { |
40 { |
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
42 "uninitialized, check declaration order"); |
42 "uninitialized, check declaration order"); |
43 assert(_page_size != 0, "uninitialized, check declaration order"); |
43 assert(_page_size != 0, "uninitialized, check declaration order"); |
44 const size_t granularity = os::vm_allocation_granularity(); |
44 const size_t granularity = os::vm_allocation_granularity(); |
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); |
45 return align_up(_guard_index + 1, MAX2(_page_size, granularity)); |
46 } |
46 } |
47 |
47 |
48 CardTableModRefBS::CardTableModRefBS( |
48 CardTableModRefBS::CardTableModRefBS( |
49 MemRegion whole_heap, |
49 MemRegion whole_heap, |
50 const BarrierSet::FakeRtti& fake_rtti) : |
50 const BarrierSet::FakeRtti& fake_rtti) : |
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); |
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); |
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); |
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); |
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); |
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); |
111 |
111 |
112 jbyte* guard_card = &_byte_map[_guard_index]; |
112 jbyte* guard_card = &_byte_map[_guard_index]; |
113 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); |
113 uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size); |
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); |
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); |
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, |
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, |
116 !ExecMem, "card table last card"); |
116 !ExecMem, "card table last card"); |
117 *guard_card = last_card; |
117 *guard_card = last_card; |
118 |
118 |
150 int res = i; |
150 int res = i; |
151 _cur_covered_regions++; |
151 _cur_covered_regions++; |
152 _covered[res].set_start(base); |
152 _covered[res].set_start(base); |
153 _covered[res].set_word_size(0); |
153 _covered[res].set_word_size(0); |
154 jbyte* ct_start = byte_for(base); |
154 jbyte* ct_start = byte_for(base); |
155 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); |
155 uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size); |
156 _committed[res].set_start((HeapWord*)ct_start_aligned); |
156 _committed[res].set_start((HeapWord*)ct_start_aligned); |
157 _committed[res].set_word_size(0); |
157 _committed[res].set_word_size(0); |
158 return res; |
158 return res; |
159 } |
159 } |
160 |
160 |
210 if (max_prev_end > cur_committed.end()) { |
210 if (max_prev_end > cur_committed.end()) { |
211 cur_committed.set_end(max_prev_end); |
211 cur_committed.set_end(max_prev_end); |
212 } |
212 } |
213 // Align the end up to a page size (starts are already aligned). |
213 // Align the end up to a page size (starts are already aligned). |
214 jbyte* const new_end = byte_after(new_region.last()); |
214 jbyte* const new_end = byte_after(new_region.last()); |
215 HeapWord* new_end_aligned = (HeapWord*) align_ptr_up(new_end, _page_size); |
215 HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size); |
216 assert((void*)new_end_aligned >= (void*) new_end, "align up, but less"); |
216 assert((void*)new_end_aligned >= (void*) new_end, "align up, but less"); |
217 // Check the other regions (excludes "ind") to ensure that |
217 // Check the other regions (excludes "ind") to ensure that |
218 // the new_end_aligned does not intrude onto the committed |
218 // the new_end_aligned does not intrude onto the committed |
219 // space of another region. |
219 // space of another region. |
220 int ri = 0; |
220 int ri = 0; |
366 inline_write_ref_field(field, newVal, release); |
366 inline_write_ref_field(field, newVal, release); |
367 } |
367 } |
368 |
368 |
369 |
369 |
370 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { |
370 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { |
371 assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
371 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
372 assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
372 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
373 jbyte* cur = byte_for(mr.start()); |
373 jbyte* cur = byte_for(mr.start()); |
374 jbyte* last = byte_after(mr.last()); |
374 jbyte* last = byte_after(mr.last()); |
375 while (cur < last) { |
375 while (cur < last) { |
376 *cur = dirty_card; |
376 *cur = dirty_card; |
377 cur++; |
377 cur++; |
378 } |
378 } |
379 } |
379 } |
380 |
380 |
381 void CardTableModRefBS::invalidate(MemRegion mr) { |
381 void CardTableModRefBS::invalidate(MemRegion mr) { |
382 assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
382 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
383 assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
383 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
384 for (int i = 0; i < _cur_covered_regions; i++) { |
384 for (int i = 0; i < _cur_covered_regions; i++) { |
385 MemRegion mri = mr.intersection(_covered[i]); |
385 MemRegion mri = mr.intersection(_covered[i]); |
386 if (!mri.is_empty()) dirty_MemRegion(mri); |
386 if (!mri.is_empty()) dirty_MemRegion(mri); |
387 } |
387 } |
388 } |
388 } |