1 /* |
1 /* |
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
108 } |
108 } |
109 } |
109 } |
110 |
110 |
111 public: |
111 public: |
112 |
112 |
113 HeapRegion* hr() const { return _hr; } |
113 HeapRegion* hr() const { |
|
114 return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr); |
|
115 } |
114 |
116 |
115 jint occupied() const { |
117 jint occupied() const { |
116 // Overkill, but if we ever need it... |
118 // Overkill, but if we ever need it... |
117 // guarantee(_occupied == _bm.count_one_bits(), "Check"); |
119 // guarantee(_occupied == _bm.count_one_bits(), "Check"); |
118 return _occupied; |
120 return _occupied; |
121 void init(HeapRegion* hr, bool clear_links_to_all_list) { |
123 void init(HeapRegion* hr, bool clear_links_to_all_list) { |
122 if (clear_links_to_all_list) { |
124 if (clear_links_to_all_list) { |
123 set_next(NULL); |
125 set_next(NULL); |
124 set_prev(NULL); |
126 set_prev(NULL); |
125 } |
127 } |
126 _hr = hr; |
|
127 _collision_list_next = NULL; |
128 _collision_list_next = NULL; |
128 _occupied = 0; |
129 _occupied = 0; |
129 _bm.clear(); |
130 _bm.clear(); |
|
131 // Make sure that the bitmap clearing above has been finished before publishing |
|
132 // this PRT to concurrent threads. |
|
133 OrderAccess::release_store_ptr(&_hr, hr); |
130 } |
134 } |
131 |
135 |
132 void add_reference(OopOrNarrowOopStar from) { |
136 void add_reference(OopOrNarrowOopStar from) { |
133 add_reference_work(from, /*parallel*/ true); |
137 add_reference_work(from, /*parallel*/ true); |
134 } |
138 } |
355 uint cur_hrm_ind = _hr->hrm_index(); |
359 uint cur_hrm_ind = _hr->hrm_index(); |
356 |
360 |
357 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); |
361 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); |
358 |
362 |
359 if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) { |
363 if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) { |
360 assert(contains_reference(from), "We just added it!"); |
364 assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from)); |
361 return; |
365 return; |
362 } |
366 } |
363 |
367 |
364 // Note that this may be a continued H region. |
368 // Note that this may be a continued H region. |
365 HeapRegion* from_hr = _g1h->heap_region_containing(from); |
369 HeapRegion* from_hr = _g1h->heap_region_containing(from); |
366 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); |
370 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); |
367 |
371 |
368 // If the region is already coarsened, return. |
372 // If the region is already coarsened, return. |
369 if (_coarse_map.at(from_hrm_ind)) { |
373 if (_coarse_map.at(from_hrm_ind)) { |
370 assert(contains_reference(from), "We just added it!"); |
374 assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from)); |
371 return; |
375 return; |
372 } |
376 } |
373 |
377 |
374 // Otherwise find a per-region table to add it to. |
378 // Otherwise find a per-region table to add it to. |
375 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; |
379 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; |
386 CardIdx_t card_index = from_card - from_hr_bot_card_index; |
390 CardIdx_t card_index = from_card - from_hr_bot_card_index; |
387 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion, |
391 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion, |
388 "Must be in range."); |
392 "Must be in range."); |
389 if (G1HRRSUseSparseTable && |
393 if (G1HRRSUseSparseTable && |
390 _sparse_table.add_card(from_hrm_ind, card_index)) { |
394 _sparse_table.add_card(from_hrm_ind, card_index)) { |
391 assert(contains_reference_locked(from), "We just added it!"); |
395 assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from)); |
392 return; |
396 return; |
393 } |
397 } |
394 |
398 |
395 if (_n_fine_entries == _max_fine_entries) { |
399 if (_n_fine_entries == _max_fine_entries) { |
396 prt = delete_region_table(); |
400 prt = delete_region_table(); |
436 // possibility of concurrent reuse. But see head comment of |
440 // possibility of concurrent reuse. But see head comment of |
437 // OtherRegionsTable for why this is OK. |
441 // OtherRegionsTable for why this is OK. |
438 assert(prt != NULL, "Inv"); |
442 assert(prt != NULL, "Inv"); |
439 |
443 |
440 prt->add_reference(from); |
444 prt->add_reference(from); |
441 assert(contains_reference(from), "We just added it!"); |
445 assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from)); |
442 } |
446 } |
443 |
447 |
444 PerRegionTable* |
448 PerRegionTable* |
445 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { |
449 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { |
446 assert(ind < _max_fine_entries, "Preconditions."); |
450 assert(ind < _max_fine_entries, "Preconditions."); |