34 #include "utilities/globalDefinitions.hpp" |
34 #include "utilities/globalDefinitions.hpp" |
35 |
35 |
36 // Sparse remembered set for a heap region (the "owning" region). Maps |
36 // Sparse remembered set for a heap region (the "owning" region). Maps |
37 // indices of other regions to short sequences of cards in the other region |
37 // indices of other regions to short sequences of cards in the other region |
38 // that might contain pointers into the owner region. |
38 // that might contain pointers into the owner region. |
39 |
|
40 // These tables only expand while they are accessed in parallel -- |
|
41 // deletions may be done in single-threaded code. This allows us to allow |
|
42 // unsynchronized reads/iterations, as long as expansions caused by |
|
43 // insertions only enqueue old versions for deletions, but do not delete |
|
44 // old versions synchronously. |
|
45 |
39 |
46 class SparsePRTEntry: public CHeapObj<mtGC> { |
40 class SparsePRTEntry: public CHeapObj<mtGC> { |
47 private: |
41 private: |
48 // The type of a card entry. |
42 // The type of a card entry. |
49 typedef uint16_t card_elem_t; |
43 typedef uint16_t card_elem_t; |
156 // Otherwise, returns "false" to indicate that the addition would |
150 // Otherwise, returns "false" to indicate that the addition would |
157 // overflow the entry for the region. The caller must transfer these |
151 // overflow the entry for the region. The caller must transfer these |
158 // entries to a larger-capacity representation. |
152 // entries to a larger-capacity representation. |
159 bool add_card(RegionIdx_t region_id, CardIdx_t card_index); |
153 bool add_card(RegionIdx_t region_id, CardIdx_t card_index); |
160 |
154 |
161 bool get_cards(RegionIdx_t region_id, CardIdx_t* cards); |
|
162 |
|
163 bool delete_entry(RegionIdx_t region_id); |
155 bool delete_entry(RegionIdx_t region_id); |
164 |
156 |
165 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const; |
157 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const; |
166 |
158 |
167 void add_entry(SparsePRTEntry* e); |
159 void add_entry(SparsePRTEntry* e); |
218 }; |
210 }; |
219 |
211 |
220 // Concurrent access to a SparsePRT must be serialized by some external mutex. |
212 // Concurrent access to a SparsePRT must be serialized by some external mutex. |
221 |
213 |
222 class SparsePRTIter; |
214 class SparsePRTIter; |
223 class SparsePRTCleanupTask; |
|
224 |
215 |
225 class SparsePRT { |
216 class SparsePRT { |
226 friend class SparsePRTCleanupTask; |
217 friend class SparsePRTIter; |
227 |
218 |
228 // Iterations are done on the _cur hash table, since they only need to |
219 RSHashTable* _table; |
229 // see entries visible at the start of a collection pause. |
|
230 // All other operations are done using the _next hash table. |
|
231 RSHashTable* _cur; |
|
232 RSHashTable* _next; |
|
233 |
220 |
234 enum SomeAdditionalPrivateConstants { |
221 enum SomeAdditionalPrivateConstants { |
235 InitialCapacity = 16 |
222 InitialCapacity = 16 |
236 }; |
223 }; |
237 |
224 |
238 void expand(); |
225 void expand(); |
239 |
226 |
240 bool _expanded; |
|
241 |
|
242 bool expanded() { return _expanded; } |
|
243 void set_expanded(bool b) { _expanded = b; } |
|
244 |
|
245 SparsePRT* _next_expanded; |
|
246 |
|
247 SparsePRT* next_expanded() { return _next_expanded; } |
|
248 void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; } |
|
249 |
|
250 bool should_be_on_expanded_list(); |
|
251 |
|
252 static SparsePRT* volatile _head_expanded_list; |
|
253 |
|
254 public: |
227 public: |
255 SparsePRT(); |
228 SparsePRT(); |
256 |
|
257 ~SparsePRT(); |
229 ~SparsePRT(); |
258 |
230 |
259 size_t occupied() const { return _next->occupied_cards(); } |
231 size_t occupied() const { return _table->occupied_cards(); } |
260 size_t mem_size() const; |
232 size_t mem_size() const; |
261 |
233 |
262 // Attempts to ensure that the given card_index in the given region is in |
234 // Attempts to ensure that the given card_index in the given region is in |
263 // the sparse table. If successful (because the card was already |
235 // the sparse table. If successful (because the card was already |
264 // present, or because it was successfully added) returns "true". |
236 // present, or because it was successfully added) returns "true". |
275 bool delete_entry(RegionIdx_t region_ind); |
247 bool delete_entry(RegionIdx_t region_ind); |
276 |
248 |
277 // Clear the table, and reinitialize to initial capacity. |
249 // Clear the table, and reinitialize to initial capacity. |
278 void clear(); |
250 void clear(); |
279 |
251 |
280 // Ensure that "_cur" and "_next" point to the same table. |
|
281 void cleanup(); |
|
282 |
|
283 // Clean up all tables on the expanded list. Called single threaded. |
|
284 static void cleanup_all(); |
|
285 RSHashTable* cur() const { return _cur; } |
|
286 |
|
287 static void add_to_expanded_list(SparsePRT* sprt); |
|
288 static SparsePRT* get_from_expanded_list(); |
|
289 |
|
290 // The purpose of these three methods is to help the GC workers |
|
291 // during the cleanup pause to recreate the expanded list, purging |
|
292 // any tables from it that belong to regions that are freed during |
|
293 // cleanup (if we don't purge those tables, there is a race that |
|
294 // causes various crashes; see CR 7014261). |
|
295 // |
|
296 // We chose to recreate the expanded list, instead of purging |
|
297 // entries from it by iterating over it, to avoid this serial phase |
|
298 // at the end of the cleanup pause. |
|
299 // |
|
300 // The three methods below work as follows: |
|
301 // * reset_for_cleanup_tasks() : Nulls the expanded list head at the |
|
302 // start of the cleanup pause. |
|
303 // * do_cleanup_work() : Called by the cleanup workers for every |
|
304 // region that is not free / is being freed by the cleanup |
|
305 // pause. It creates a list of expanded tables whose head / tail |
|
306 // are on the thread-local SparsePRTCleanupTask object. |
|
307 // * finish_cleanup_task() : Called by the cleanup workers after |
|
308 // they complete their cleanup task. It adds the local list into |
|
309 // the global expanded list. It assumes that the |
|
310 // ParGCRareEvent_lock is being held to ensure MT-safety. |
|
311 static void reset_for_cleanup_tasks(); |
|
312 void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task); |
|
313 static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task); |
|
314 |
|
315 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const { |
252 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const { |
316 return _next->contains_card(region_id, card_index); |
253 return _table->contains_card(region_id, card_index); |
317 } |
254 } |
318 }; |
255 }; |
319 |
256 |
320 class SparsePRTIter: public RSHashTableIter { |
257 class SparsePRTIter: public RSHashTableIter { |
321 public: |
258 public: |
322 SparsePRTIter(const SparsePRT* sprt) : |
259 SparsePRTIter(const SparsePRT* sprt) : |
323 RSHashTableIter(sprt->cur()) {} |
260 RSHashTableIter(sprt->_table) { } |
324 |
261 |
325 bool has_next(size_t& card_index) { |
262 bool has_next(size_t& card_index) { |
326 return RSHashTableIter::has_next(card_index); |
263 return RSHashTableIter::has_next(card_index); |
327 } |
264 } |
328 }; |
265 }; |
329 |
266 |
330 // This allows each worker during a cleanup pause to create a |
|
331 // thread-local list of sparse tables that have been expanded and need |
|
332 // to be processed at the beginning of the next GC pause. This lists |
|
333 // are concatenated into the single expanded list at the end of the |
|
334 // cleanup pause. |
|
335 class SparsePRTCleanupTask { |
|
336 private: |
|
337 SparsePRT* _head; |
|
338 SparsePRT* _tail; |
|
339 |
|
340 public: |
|
341 SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { } |
|
342 |
|
343 void add(SparsePRT* sprt); |
|
344 SparsePRT* head() { return _head; } |
|
345 SparsePRT* tail() { return _tail; } |
|
346 }; |
|
347 |
|
348 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP |
267 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP |