114 static const IsDirtyRegionState Dirty = 1; |
114 static const IsDirtyRegionState Dirty = 1; |
115 // Holds a flag for every region whether it is in the _dirty_region_buffer already |
115 // Holds a flag for every region whether it is in the _dirty_region_buffer already |
116 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools. |
116 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools. |
117 IsDirtyRegionState* _in_dirty_region_buffer; |
117 IsDirtyRegionState* _in_dirty_region_buffer; |
118 size_t _cur_dirty_region; |
118 size_t _cur_dirty_region; |
|
119 |
|
120 // Creates a snapshot of the current _top values at the start of collection to |
|
121 // filter out card marks that we do not want to scan. |
|
122 class G1ResetScanTopClosure : public HeapRegionClosure { |
|
123 private: |
|
124 HeapWord** _scan_top; |
|
125 public: |
|
126 G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { } |
|
127 |
|
128 virtual bool doHeapRegion(HeapRegion* r) { |
|
129 uint hrm_index = r->hrm_index(); |
|
130 if (!r->in_collection_set() && r->is_old_or_humongous()) { |
|
131 _scan_top[hrm_index] = r->top(); |
|
132 } else { |
|
133 _scan_top[hrm_index] = r->bottom(); |
|
134 } |
|
135 return false; |
|
136 } |
|
137 }; |
|
138 |
|
139 // For each region, contains the maximum top() value to be used during this garbage |
|
140 // collection. Subsumes common checks like filtering out everything but old and |
|
141 // humongous regions outside the collection set. |
|
142 // This is valid because we are not interested in scanning stray remembered set |
|
143 // entries from free or archive regions. |
|
144 HeapWord** _scan_top; |
119 public: |
145 public: |
120 G1RemSetScanState() : |
146 G1RemSetScanState() : |
121 _max_regions(0), |
147 _max_regions(0), |
122 _iter_states(NULL), |
148 _iter_states(NULL), |
123 _iter_claims(NULL), |
149 _iter_claims(NULL), |
124 _dirty_region_buffer(NULL), |
150 _dirty_region_buffer(NULL), |
125 _in_dirty_region_buffer(NULL), |
151 _in_dirty_region_buffer(NULL), |
126 _cur_dirty_region(0) { |
152 _cur_dirty_region(0), |
127 |
153 _scan_top(NULL) { |
128 } |
154 } |
129 |
155 |
130 ~G1RemSetScanState() { |
156 ~G1RemSetScanState() { |
131 if (_iter_states != NULL) { |
157 if (_iter_states != NULL) { |
132 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states); |
158 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states); |
148 _max_regions = max_regions; |
177 _max_regions = max_regions; |
149 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); |
178 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); |
150 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); |
179 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); |
151 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); |
180 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); |
152 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC); |
181 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC); |
|
182 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); |
153 } |
183 } |
154 |
184 |
155 void reset() { |
185 void reset() { |
156 for (uint i = 0; i < _max_regions; i++) { |
186 for (uint i = 0; i < _max_regions; i++) { |
157 _iter_states[i] = Unclaimed; |
187 _iter_states[i] = Unclaimed; |
158 } |
188 } |
|
189 |
|
190 G1ResetScanTopClosure cl(_scan_top); |
|
191 G1CollectedHeap::heap()->heap_region_iterate(&cl); |
|
192 |
159 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t)); |
193 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t)); |
160 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState)); |
194 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState)); |
161 _cur_dirty_region = 0; |
195 _cur_dirty_region = 0; |
162 } |
196 } |
163 |
197 |
208 bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; |
242 bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; |
209 if (marked_as_dirty) { |
243 if (marked_as_dirty) { |
210 size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1; |
244 size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1; |
211 _dirty_region_buffer[allocated] = region; |
245 _dirty_region_buffer[allocated] = region; |
212 } |
246 } |
|
247 } |
|
248 |
|
249 HeapWord* scan_top(uint region_idx) const { |
|
250 return _scan_top[region_idx]; |
213 } |
251 } |
214 |
252 |
215 // Clear the card table of "dirty" regions. |
253 // Clear the card table of "dirty" regions. |
216 void clear_card_table(WorkGang* workers) { |
254 void clear_card_table(WorkGang* workers) { |
217 if (_cur_dirty_region == 0) { |
255 if (_cur_dirty_region == 0) { |
305 _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); |
343 _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); |
306 } |
344 } |
307 |
345 |
308 void G1ScanRSClosure::scan_card(size_t index, HeapWord* card_start, HeapRegion *r) { |
346 void G1ScanRSClosure::scan_card(size_t index, HeapWord* card_start, HeapRegion *r) { |
309 MemRegion card_region(card_start, BOTConstants::N_words); |
347 MemRegion card_region(card_start, BOTConstants::N_words); |
310 MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); |
348 MemRegion pre_gc_allocated(r->bottom(), _scan_state->scan_top(r->hrm_index())); |
311 MemRegion mr = pre_gc_allocated.intersection(card_region); |
349 MemRegion mr = pre_gc_allocated.intersection(card_region); |
312 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { |
350 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { |
313 // We make the card as "claimed" lazily (so races are possible |
351 // We make the card as "claimed" lazily (so races are possible |
314 // but they're benign), which reduces the number of duplicate |
352 // but they're benign), which reduces the number of duplicate |
315 // scans (the rsets of the regions in the cset can intersect). |
353 // scans (the rsets of the regions in the cset can intersect). |
708 // No need to return that this card contains refs that point |
746 // No need to return that this card contains refs that point |
709 // into the collection set. |
747 // into the collection set. |
710 return false; |
748 return false; |
711 } |
749 } |
712 |
750 |
|
751 // During GC we can immediately clean the card since we will not re-enqueue stale |
|
752 // cards as we know they can be disregarded. |
|
753 *card_ptr = CardTableModRefBS::clean_card_val(); |
|
754 |
713 // Construct the region representing the card. |
755 // Construct the region representing the card. |
714 HeapWord* start = _ct_bs->addr_for(card_ptr); |
756 HeapWord* card_start = _ct_bs->addr_for(card_ptr); |
715 // And find the region containing it. |
757 // And find the region containing it. |
716 HeapRegion* r = _g1->heap_region_containing(start); |
758 HeapRegion* r = _g1->heap_region_containing(card_start); |
717 |
759 |
718 // This check is needed for some uncommon cases where we should |
760 HeapWord* scan_limit = _scan_state->scan_top(r->hrm_index()); |
719 // ignore the card. |
761 if (scan_limit <= card_start) { |
720 // |
762 // If the card starts above the area in the region containing objects to scan, skip it. |
721 // The region could be young. Cards for young regions are |
|
722 // distinctly marked (set to g1_young_gen), so the post-barrier will |
|
723 // filter them out. However, that marking is performed |
|
724 // concurrently. A write to a young object could occur before the |
|
725 // card has been marked young, slipping past the filter. |
|
726 // |
|
727 // The card could be stale, because the region has been freed since |
|
728 // the card was recorded. In this case the region type could be |
|
729 // anything. If (still) free or (reallocated) young, just ignore |
|
730 // it. If (reallocated) old or humongous, the later card trimming |
|
731 // and additional checks in iteration may detect staleness. At |
|
732 // worst, we end up processing a stale card unnecessarily. |
|
733 // |
|
734 // In the normal (non-stale) case, the synchronization between the |
|
735 // enqueueing of the card and processing it here will have ensured |
|
736 // we see the up-to-date region type here. |
|
737 if (!r->is_old_or_humongous()) { |
|
738 return false; |
763 return false; |
739 } |
764 } |
740 |
|
741 // While we are processing RSet buffers during the collection, we |
|
742 // actually don't want to scan any cards on the collection set, |
|
743 // since we don't want to update remembered sets with entries that |
|
744 // point into the collection set, given that live objects from the |
|
745 // collection set are about to move and such entries will be stale |
|
746 // very soon. This change also deals with a reliability issue which |
|
747 // involves scanning a card in the collection set and coming across |
|
748 // an array that was being chunked and looking malformed. Note, |
|
749 // however, that if evacuation fails, we have to scan any objects |
|
750 // that were not moved and create any missing entries. |
|
751 if (r->in_collection_set()) { |
|
752 return false; |
|
753 } |
|
754 |
|
755 // Trim the region designated by the card to what's been allocated |
|
756 // in the region. The card could be stale, or the card could cover |
|
757 // (part of) an object at the end of the allocated space and extend |
|
758 // beyond the end of allocation. |
|
759 |
|
760 // If we're in a STW GC, then a card might be in a GC alloc region |
|
761 // and extend onto a GC LAB, which may not be parsable. Stop such |
|
762 // at the "scan_top" of the region. |
|
763 HeapWord* scan_limit = r->scan_top(); |
|
764 |
|
765 if (scan_limit <= start) { |
|
766 // If the trimmed region is empty, the card must be stale. |
|
767 return false; |
|
768 } |
|
769 |
|
770 // Okay to clean and process the card now. There are still some |
|
771 // stale card cases that may be detected by iteration and dealt with |
|
772 // as iteration failure. |
|
773 *const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val(); |
|
774 |
765 |
775 // Don't use addr_for(card_ptr + 1) which can ask for |
766 // Don't use addr_for(card_ptr + 1) which can ask for |
776 // a card beyond the heap. |
767 // a card beyond the heap. |
777 HeapWord* end = start + CardTableModRefBS::card_size_in_words; |
768 HeapWord* card_end = card_start + CardTableModRefBS::card_size_in_words; |
778 MemRegion dirty_region(start, MIN2(scan_limit, end)); |
769 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end)); |
779 assert(!dirty_region.is_empty(), "sanity"); |
770 assert(!dirty_region.is_empty(), "sanity"); |
780 |
771 |
781 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, |
772 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, |
782 oops_in_heap_closure, |
773 oops_in_heap_closure, |
783 true, |
774 true, |