26 #define SHARE_GC_G1_HEAPREGIONREMSET_HPP |
26 #define SHARE_GC_G1_HEAPREGIONREMSET_HPP |
27 |
27 |
28 #include "gc/g1/g1CodeCacheRemSet.hpp" |
28 #include "gc/g1/g1CodeCacheRemSet.hpp" |
29 #include "gc/g1/g1FromCardCache.hpp" |
29 #include "gc/g1/g1FromCardCache.hpp" |
30 #include "gc/g1/sparsePRT.hpp" |
30 #include "gc/g1/sparsePRT.hpp" |
|
31 #include "utilities/bitMap.hpp" |
31 |
32 |
32 // Remembered set for a heap region. Represent a set of "cards" that |
33 // Remembered set for a heap region. Represent a set of "cards" that |
33 // contain pointers into the owner heap region. Cards are defined somewhat |
34 // contain pointers into the owner heap region. Cards are defined somewhat |
34 // abstractly, in terms of what the "BlockOffsetTable" in use can parse. |
35 // abstractly, in terms of what the "BlockOffsetTable" in use can parse. |
35 |
36 |
36 class G1CollectedHeap; |
37 class G1CollectedHeap; |
37 class G1BlockOffsetTable; |
38 class G1BlockOffsetTable; |
38 class G1CardLiveData; |
39 class G1CardLiveData; |
39 class HeapRegion; |
40 class HeapRegion; |
40 class HeapRegionRemSetIterator; |
|
41 class PerRegionTable; |
41 class PerRegionTable; |
42 class SparsePRT; |
42 class SparsePRT; |
43 class nmethod; |
43 class nmethod; |
44 |
44 |
45 // The "_coarse_map" is a bitmap with one bit for each region, where set |
45 // The "_coarse_map" is a bitmap with one bit for each region, where set |
65 // it's _coarse_map bit is set, so the that we were attempting to add |
65 // it's _coarse_map bit is set, so the that we were attempting to add |
66 // is represented. If a deleted PRT is re-used, a thread adding a bit, |
66 // is represented. If a deleted PRT is re-used, a thread adding a bit, |
67 // thinking the PRT is for a different region, does no harm. |
67 // thinking the PRT is for a different region, does no harm. |
68 |
68 |
69 class OtherRegionsTable { |
69 class OtherRegionsTable { |
70 friend class HeapRegionRemSetIterator; |
|
71 |
|
72 G1CollectedHeap* _g1h; |
70 G1CollectedHeap* _g1h; |
73 Mutex* _m; |
71 Mutex* _m; |
74 |
72 |
75 // These are protected by "_m". |
73 // These are protected by "_m". |
76 CHeapBitMap _coarse_map; |
74 CHeapBitMap _coarse_map; |
123 |
121 |
124 public: |
122 public: |
125 // Create a new remembered set. The given mutex is used to ensure consistency. |
123 // Create a new remembered set. The given mutex is used to ensure consistency. |
126 OtherRegionsTable(Mutex* m); |
124 OtherRegionsTable(Mutex* m); |
127 |
125 |
|
126 template <class Closure> |
|
127 void iterate(Closure& v); |
|
128 |
128 // Returns the card index of the given within_region pointer relative to the bottom |
129 // Returns the card index of the given within_region pointer relative to the bottom |
129 // of the given heap region. |
130 // of the given heap region. |
130 static CardIdx_t card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr); |
131 static CardIdx_t card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr); |
131 // Adds the reference from "from to this remembered set. |
132 // Adds the reference from "from to this remembered set. |
132 void add_reference(OopOrNarrowOopStar from, uint tid); |
133 void add_reference(OopOrNarrowOopStar from, uint tid); |
155 |
156 |
156 // Clear the entire contents of this remembered set. |
157 // Clear the entire contents of this remembered set. |
157 void clear(); |
158 void clear(); |
158 }; |
159 }; |
159 |
160 |
|
161 class PerRegionTable: public CHeapObj<mtGC> { |
|
162 friend class OtherRegionsTable; |
|
163 |
|
164 HeapRegion* _hr; |
|
165 CHeapBitMap _bm; |
|
166 jint _occupied; |
|
167 |
|
168 // next pointer for free/allocated 'all' list |
|
169 PerRegionTable* _next; |
|
170 |
|
171 // prev pointer for the allocated 'all' list |
|
172 PerRegionTable* _prev; |
|
173 |
|
174 // next pointer in collision list |
|
175 PerRegionTable * _collision_list_next; |
|
176 |
|
177 // Global free list of PRTs |
|
178 static PerRegionTable* volatile _free_list; |
|
179 |
|
180 protected: |
|
181 PerRegionTable(HeapRegion* hr) : |
|
182 _hr(hr), |
|
183 _bm(HeapRegion::CardsPerRegion, mtGC), |
|
184 _occupied(0), |
|
185 _next(NULL), _prev(NULL), |
|
186 _collision_list_next(NULL) |
|
187 {} |
|
188 |
|
189 inline void add_card_work(CardIdx_t from_card, bool par); |
|
190 |
|
191 inline void add_reference_work(OopOrNarrowOopStar from, bool par); |
|
192 |
|
193 public: |
|
194 // We need access in order to union things into the base table. |
|
195 BitMap* bm() { return &_bm; } |
|
196 |
|
197 HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); } |
|
198 |
|
199 jint occupied() const { |
|
200 // Overkill, but if we ever need it... |
|
201 // guarantee(_occupied == _bm.count_one_bits(), "Check"); |
|
202 return _occupied; |
|
203 } |
|
204 |
|
205 void init(HeapRegion* hr, bool clear_links_to_all_list); |
|
206 |
|
207 inline void add_reference(OopOrNarrowOopStar from); |
|
208 |
|
209 inline void seq_add_reference(OopOrNarrowOopStar from); |
|
210 |
|
211 inline void add_card(CardIdx_t from_card_index); |
|
212 |
|
213 void seq_add_card(CardIdx_t from_card_index); |
|
214 |
|
215 // (Destructively) union the bitmap of the current table into the given |
|
216 // bitmap (which is assumed to be of the same size.) |
|
217 void union_bitmap_into(BitMap* bm) { |
|
218 bm->set_union(_bm); |
|
219 } |
|
220 |
|
221 // Mem size in bytes. |
|
222 size_t mem_size() const { |
|
223 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize; |
|
224 } |
|
225 |
|
226 // Requires "from" to be in "hr()". |
|
227 bool contains_reference(OopOrNarrowOopStar from) const { |
|
228 assert(hr()->is_in_reserved(from), "Precondition."); |
|
229 size_t card_ind = pointer_delta(from, hr()->bottom(), |
|
230 G1CardTable::card_size); |
|
231 return _bm.at(card_ind); |
|
232 } |
|
233 |
|
234 // Bulk-free the PRTs from prt to last, assumes that they are |
|
235 // linked together using their _next field. |
|
236 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) { |
|
237 while (true) { |
|
238 PerRegionTable* fl = _free_list; |
|
239 last->set_next(fl); |
|
240 PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl); |
|
241 if (res == fl) { |
|
242 return; |
|
243 } |
|
244 } |
|
245 ShouldNotReachHere(); |
|
246 } |
|
247 |
|
248 static void free(PerRegionTable* prt) { |
|
249 bulk_free(prt, prt); |
|
250 } |
|
251 |
|
252 // Returns an initialized PerRegionTable instance. |
|
253 static PerRegionTable* alloc(HeapRegion* hr); |
|
254 |
|
255 PerRegionTable* next() const { return _next; } |
|
256 void set_next(PerRegionTable* next) { _next = next; } |
|
257 PerRegionTable* prev() const { return _prev; } |
|
258 void set_prev(PerRegionTable* prev) { _prev = prev; } |
|
259 |
|
260 // Accessor and Modification routines for the pointer for the |
|
261 // singly linked collision list that links the PRTs within the |
|
262 // OtherRegionsTable::_fine_grain_regions hash table. |
|
263 // |
|
264 // It might be useful to also make the collision list doubly linked |
|
265 // to avoid iteration over the collisions list during scrubbing/deletion. |
|
266 // OTOH there might not be many collisions. |
|
267 |
|
268 PerRegionTable* collision_list_next() const { |
|
269 return _collision_list_next; |
|
270 } |
|
271 |
|
272 void set_collision_list_next(PerRegionTable* next) { |
|
273 _collision_list_next = next; |
|
274 } |
|
275 |
|
276 PerRegionTable** collision_list_next_addr() { |
|
277 return &_collision_list_next; |
|
278 } |
|
279 |
|
280 static size_t fl_mem_size() { |
|
281 PerRegionTable* cur = _free_list; |
|
282 size_t res = 0; |
|
283 while (cur != NULL) { |
|
284 res += cur->mem_size(); |
|
285 cur = cur->next(); |
|
286 } |
|
287 return res; |
|
288 } |
|
289 |
|
290 static void test_fl_mem_size(); |
|
291 }; |
|
292 |
160 class HeapRegionRemSet : public CHeapObj<mtGC> { |
293 class HeapRegionRemSet : public CHeapObj<mtGC> { |
161 friend class VMStructs; |
294 friend class VMStructs; |
162 friend class HeapRegionRemSetIterator; |
|
163 |
295 |
164 private: |
296 private: |
165 G1BlockOffsetTable* _bot; |
297 G1BlockOffsetTable* _bot; |
166 |
298 |
167 // A set of code blobs (nmethods) whose code contains pointers into |
299 // A set of code blobs (nmethods) whose code contains pointers into |
180 HeapRegionRemSet(G1BlockOffsetTable* bot, HeapRegion* hr); |
312 HeapRegionRemSet(G1BlockOffsetTable* bot, HeapRegion* hr); |
181 |
313 |
182 // Setup sparse and fine-grain tables sizes. |
314 // Setup sparse and fine-grain tables sizes. |
183 static void setup_remset_size(); |
315 static void setup_remset_size(); |
184 |
316 |
185 bool cardset_is_empty() const { |
|
186 return _other_regions.is_empty(); |
|
187 } |
|
188 |
|
189 bool is_empty() const { |
317 bool is_empty() const { |
190 return (strong_code_roots_list_length() == 0) && cardset_is_empty(); |
318 return (strong_code_roots_list_length() == 0) && _other_regions.is_empty(); |
191 } |
319 } |
192 |
320 |
193 bool occupancy_less_or_equal_than(size_t occ) const { |
321 bool occupancy_less_or_equal_than(size_t occ) const { |
194 return (strong_code_roots_list_length() == 0) && _other_regions.occupancy_less_or_equal_than(occ); |
322 return (strong_code_roots_list_length() == 0) && _other_regions.occupancy_less_or_equal_than(occ); |
195 } |
323 } |
|
324 |
|
325 // For each PRT in the card (remembered) set call one of the following methods |
|
326 // of the given closure: |
|
327 // |
|
328 // set_full_region_dirty(uint region_idx) - pass the region index for coarse PRTs |
|
329 // set_bitmap_dirty(uint region_idx, BitMap* bitmap) - pass the region index and bitmap for fine PRTs |
|
330 // set_cards_dirty(uint region_idx, elem_t* cards, uint num_cards) - pass region index and cards for sparse PRTs |
|
331 template <class Closure> |
|
332 inline void iterate_prts(Closure& cl); |
196 |
333 |
197 size_t occupied() { |
334 size_t occupied() { |
198 MutexLocker x(&_m, Mutex::_no_safepoint_check_flag); |
335 MutexLocker x(&_m, Mutex::_no_safepoint_check_flag); |
199 return occupied_locked(); |
336 return occupied_locked(); |
200 } |
337 } |
337 |
474 |
338 static void test(); |
475 static void test(); |
339 #endif |
476 #endif |
340 }; |
477 }; |
341 |
478 |
342 class HeapRegionRemSetIterator : public StackObj { |
|
343 private: |
|
344 // The region RSet over which we are iterating. |
|
345 HeapRegionRemSet* _hrrs; |
|
346 |
|
347 // Local caching of HRRS fields. |
|
348 const BitMap* _coarse_map; |
|
349 |
|
350 G1BlockOffsetTable* _bot; |
|
351 G1CollectedHeap* _g1h; |
|
352 |
|
353 // The number of cards yielded since initialization. |
|
354 size_t _n_yielded_fine; |
|
355 size_t _n_yielded_coarse; |
|
356 size_t _n_yielded_sparse; |
|
357 |
|
358 // Indicates what granularity of table that we are currently iterating over. |
|
359 // We start iterating over the sparse table, progress to the fine grain |
|
360 // table, and then finish with the coarse table. |
|
361 enum IterState { |
|
362 Sparse, |
|
363 Fine, |
|
364 Coarse |
|
365 }; |
|
366 IterState _is; |
|
367 |
|
368 // For both Coarse and Fine remembered set iteration this contains the |
|
369 // first card number of the heap region we currently iterate over. |
|
370 size_t _cur_region_card_offset; |
|
371 |
|
372 // Current region index for the Coarse remembered set iteration. |
|
373 int _coarse_cur_region_index; |
|
374 size_t _coarse_cur_region_cur_card; |
|
375 |
|
376 bool coarse_has_next(size_t& card_index); |
|
377 |
|
378 // The PRT we are currently iterating over. |
|
379 PerRegionTable* _fine_cur_prt; |
|
380 // Card offset within the current PRT. |
|
381 size_t _cur_card_in_prt; |
|
382 |
|
383 // Update internal variables when switching to the given PRT. |
|
384 void switch_to_prt(PerRegionTable* prt); |
|
385 bool fine_has_next(); |
|
386 bool fine_has_next(size_t& card_index); |
|
387 |
|
388 // The Sparse remembered set iterator. |
|
389 SparsePRTIter _sparse_iter; |
|
390 |
|
391 public: |
|
392 HeapRegionRemSetIterator(HeapRegionRemSet* hrrs); |
|
393 |
|
394 // If there remains one or more cards to be yielded, returns true and |
|
395 // sets "card_index" to one of those cards (which is then considered |
|
396 // yielded.) Otherwise, returns false (and leaves "card_index" |
|
397 // undefined.) |
|
398 bool has_next(size_t& card_index); |
|
399 |
|
400 size_t n_yielded_fine() { return _n_yielded_fine; } |
|
401 size_t n_yielded_coarse() { return _n_yielded_coarse; } |
|
402 size_t n_yielded_sparse() { return _n_yielded_sparse; } |
|
403 size_t n_yielded() { |
|
404 return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse(); |
|
405 } |
|
406 }; |
|
407 |
|
408 #endif // SHARE_GC_G1_HEAPREGIONREMSET_HPP |
479 #endif // SHARE_GC_G1_HEAPREGIONREMSET_HPP |