123 G1CollectedHeap* _g1h; |
123 G1CollectedHeap* _g1h; |
124 CardTableModRefBS* _ctbs; |
124 CardTableModRefBS* _ctbs; |
125 int _histo[256]; |
125 int _histo[256]; |
126 public: |
126 public: |
127 ClearLoggedCardTableEntryClosure() : |
127 ClearLoggedCardTableEntryClosure() : |
128 _calls(0) |
128 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) |
129 { |
129 { |
130 _g1h = G1CollectedHeap::heap(); |
|
131 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
|
132 for (int i = 0; i < 256; i++) _histo[i] = 0; |
130 for (int i = 0; i < 256; i++) _histo[i] = 0; |
133 } |
131 } |
134 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
132 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
135 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
133 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
136 _calls++; |
134 _calls++; |
156 int _calls; |
154 int _calls; |
157 G1CollectedHeap* _g1h; |
155 G1CollectedHeap* _g1h; |
158 CardTableModRefBS* _ctbs; |
156 CardTableModRefBS* _ctbs; |
159 public: |
157 public: |
160 RedirtyLoggedCardTableEntryClosure() : |
158 RedirtyLoggedCardTableEntryClosure() : |
161 _calls(0) |
159 _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {} |
162 { |
160 |
163 _g1h = G1CollectedHeap::heap(); |
|
164 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
|
165 } |
|
166 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
161 bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
167 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
162 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
168 _calls++; |
163 _calls++; |
169 *card_ptr = 0; |
164 *card_ptr = 0; |
170 } |
165 } |
476 } |
471 } |
477 } |
472 } |
478 |
473 |
479 void G1CollectedHeap::check_ct_logs_at_safepoint() { |
474 void G1CollectedHeap::check_ct_logs_at_safepoint() { |
480 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
475 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
481 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
476 CardTableModRefBS* ct_bs = g1_barrier_set(); |
482 |
477 |
483 // Count the dirty cards at the start. |
478 // Count the dirty cards at the start. |
484 CountNonCleanMemRegionClosure count1(this); |
479 CountNonCleanMemRegionClosure count1(this); |
485 ct_bs->mod_card_iterate(&count1); |
480 ct_bs->mod_card_iterate(&count1); |
486 int orig_count = count1.n(); |
481 int orig_count = count1.n(); |
1203 return false; |
1198 return false; |
1204 } |
1199 } |
1205 }; |
1200 }; |
1206 |
1201 |
1207 void G1CollectedHeap::clear_rsets_post_compaction() { |
1202 void G1CollectedHeap::clear_rsets_post_compaction() { |
1208 PostMCRemSetClearClosure rs_clear(this, mr_bs()); |
1203 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set()); |
1209 heap_region_iterate(&rs_clear); |
1204 heap_region_iterate(&rs_clear); |
1210 } |
1205 } |
1211 |
1206 |
1212 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
1207 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
1213 G1CollectedHeap* _g1h; |
1208 G1CollectedHeap* _g1h; |
1775 // Tell the hot card cache about the update |
1770 // Tell the hot card cache about the update |
1776 _cg1r->hot_card_cache()->resize_card_counts(capacity()); |
1771 _cg1r->hot_card_cache()->resize_card_counts(capacity()); |
1777 } |
1772 } |
1778 |
1773 |
1779 bool G1CollectedHeap::expand(size_t expand_bytes) { |
1774 bool G1CollectedHeap::expand(size_t expand_bytes) { |
1780 size_t old_mem_size = _g1_storage.committed_size(); |
|
1781 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
1775 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
1782 aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
1776 aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
1783 HeapRegion::GrainBytes); |
1777 HeapRegion::GrainBytes); |
1784 ergo_verbose2(ErgoHeapSizing, |
1778 ergo_verbose2(ErgoHeapSizing, |
1785 "expand the heap", |
1779 "expand the heap", |
1786 ergo_format_byte("requested expansion amount") |
1780 ergo_format_byte("requested expansion amount") |
1787 ergo_format_byte("attempted expansion amount"), |
1781 ergo_format_byte("attempted expansion amount"), |
1788 expand_bytes, aligned_expand_bytes); |
1782 expand_bytes, aligned_expand_bytes); |
|
1783 |
|
1784 if (_g1_storage.uncommitted_size() == 0) { |
|
1785 ergo_verbose0(ErgoHeapSizing, |
|
1786 "did not expand the heap", |
|
1787 ergo_format_reason("heap already fully expanded")); |
|
1788 return false; |
|
1789 } |
1789 |
1790 |
1790 // First commit the memory. |
1791 // First commit the memory. |
1791 HeapWord* old_end = (HeapWord*) _g1_storage.high(); |
1792 HeapWord* old_end = (HeapWord*) _g1_storage.high(); |
1792 bool successful = _g1_storage.expand_by(aligned_expand_bytes); |
1793 bool successful = _g1_storage.expand_by(aligned_expand_bytes); |
1793 if (successful) { |
1794 if (successful) { |
1843 } |
1844 } |
1844 return successful; |
1845 return successful; |
1845 } |
1846 } |
1846 |
1847 |
1847 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { |
1848 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { |
1848 size_t old_mem_size = _g1_storage.committed_size(); |
|
1849 size_t aligned_shrink_bytes = |
1849 size_t aligned_shrink_bytes = |
1850 ReservedSpace::page_align_size_down(shrink_bytes); |
1850 ReservedSpace::page_align_size_down(shrink_bytes); |
1851 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, |
1851 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, |
1852 HeapRegion::GrainBytes); |
1852 HeapRegion::GrainBytes); |
1853 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); |
1853 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); |
2043 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); |
2043 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); |
2044 |
2044 |
2045 // Create the gen rem set (and barrier set) for the entire reserved region. |
2045 // Create the gen rem set (and barrier set) for the entire reserved region. |
2046 _rem_set = collector_policy()->create_rem_set(_reserved, 2); |
2046 _rem_set = collector_policy()->create_rem_set(_reserved, 2); |
2047 set_barrier_set(rem_set()->bs()); |
2047 set_barrier_set(rem_set()->bs()); |
2048 if (barrier_set()->is_a(BarrierSet::ModRef)) { |
2048 if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { |
2049 _mr_bs = (ModRefBarrierSet*)_barrier_set; |
2049 vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); |
2050 } else { |
|
2051 vm_exit_during_initialization("G1 requires a mod ref bs."); |
|
2052 return JNI_ENOMEM; |
2050 return JNI_ENOMEM; |
2053 } |
2051 } |
2054 |
2052 |
2055 // Also create a G1 rem set. |
2053 // Also create a G1 rem set. |
2056 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
2054 _g1_rem_set = new G1RemSet(this, g1_barrier_set()); |
2057 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); |
|
2058 } else { |
|
2059 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
|
2060 return JNI_ENOMEM; |
|
2061 } |
|
2062 |
2055 |
2063 // Carve out the G1 part of the heap. |
2056 // Carve out the G1 part of the heap. |
2064 |
2057 |
2065 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); |
2058 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); |
2066 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), |
2059 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), |
3679 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { |
3672 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { |
3680 // always_do_update_barrier = false; |
3673 // always_do_update_barrier = false; |
3681 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3674 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
3682 // Fill TLAB's and such |
3675 // Fill TLAB's and such |
3683 ensure_parsability(true); |
3676 ensure_parsability(true); |
|
3677 |
|
3678 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && |
|
3679 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
|
3680 g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); |
|
3681 } |
3684 } |
3682 } |
3685 |
3683 |
3686 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { |
3684 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { |
3687 |
3685 |
3688 if (G1SummarizeRSetStats && |
3686 if (G1SummarizeRSetStats && |
3689 (G1SummarizeRSetStatsPeriod > 0) && |
3687 (G1SummarizeRSetStatsPeriod > 0) && |
3690 // we are at the end of the GC. Total collections has already been increased. |
3688 // we are at the end of the GC. Total collections has already been increased. |
3691 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { |
3689 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { |
3692 g1_rem_set()->print_periodic_summary_info(); |
3690 g1_rem_set()->print_periodic_summary_info("After GC RS summary"); |
3693 } |
3691 } |
3694 |
3692 |
3695 // FIXME: what is this about? |
3693 // FIXME: what is this about? |
3696 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" |
3694 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" |
3697 // is set. |
3695 // is set. |
4548 |
4546 |
4549 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) |
4547 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) |
4550 : _g1h(g1h), |
4548 : _g1h(g1h), |
4551 _refs(g1h->task_queue(queue_num)), |
4549 _refs(g1h->task_queue(queue_num)), |
4552 _dcq(&g1h->dirty_card_queue_set()), |
4550 _dcq(&g1h->dirty_card_queue_set()), |
4553 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
4551 _ct_bs(g1h->g1_barrier_set()), |
4554 _g1_rem(g1h->g1_rem_set()), |
4552 _g1_rem(g1h->g1_rem_set()), |
4555 _hash_seed(17), _queue_num(queue_num), |
4553 _hash_seed(17), _queue_num(queue_num), |
4556 _term_attempts(0), |
4554 _term_attempts(0), |
4557 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
4555 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
4558 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
4556 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
5977 _humongous_set.update_from_proxy(humongous_proxy_set); |
5975 _humongous_set.update_from_proxy(humongous_proxy_set); |
5978 } |
5976 } |
5979 } |
5977 } |
5980 |
5978 |
5981 class G1ParCleanupCTTask : public AbstractGangTask { |
5979 class G1ParCleanupCTTask : public AbstractGangTask { |
5982 CardTableModRefBS* _ct_bs; |
5980 G1SATBCardTableModRefBS* _ct_bs; |
5983 G1CollectedHeap* _g1h; |
5981 G1CollectedHeap* _g1h; |
5984 HeapRegion* volatile _su_head; |
5982 HeapRegion* volatile _su_head; |
5985 public: |
5983 public: |
5986 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
5984 G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs, |
5987 G1CollectedHeap* g1h) : |
5985 G1CollectedHeap* g1h) : |
5988 AbstractGangTask("G1 Par Cleanup CT Task"), |
5986 AbstractGangTask("G1 Par Cleanup CT Task"), |
5989 _ct_bs(ct_bs), _g1h(g1h) { } |
5987 _ct_bs(ct_bs), _g1h(g1h) { } |
5990 |
5988 |
5991 void work(uint worker_id) { |
5989 void work(uint worker_id) { |
6004 }; |
6002 }; |
6005 |
6003 |
6006 #ifndef PRODUCT |
6004 #ifndef PRODUCT |
6007 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
6005 class G1VerifyCardTableCleanup: public HeapRegionClosure { |
6008 G1CollectedHeap* _g1h; |
6006 G1CollectedHeap* _g1h; |
6009 CardTableModRefBS* _ct_bs; |
6007 G1SATBCardTableModRefBS* _ct_bs; |
6010 public: |
6008 public: |
6011 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) |
6009 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs) |
6012 : _g1h(g1h), _ct_bs(ct_bs) { } |
6010 : _g1h(g1h), _ct_bs(ct_bs) { } |
6013 virtual bool doHeapRegion(HeapRegion* r) { |
6011 virtual bool doHeapRegion(HeapRegion* r) { |
6014 if (r->is_survivor()) { |
6012 if (r->is_survivor()) { |
6015 _g1h->verify_dirty_region(r); |
6013 _g1h->verify_dirty_region(r); |
6016 } else { |
6014 } else { |
6020 } |
6018 } |
6021 }; |
6019 }; |
6022 |
6020 |
6023 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { |
6021 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { |
6024 // All of the region should be clean. |
6022 // All of the region should be clean. |
6025 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
6023 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); |
6026 MemRegion mr(hr->bottom(), hr->end()); |
6024 MemRegion mr(hr->bottom(), hr->end()); |
6027 ct_bs->verify_not_dirty_region(mr); |
6025 ct_bs->verify_not_dirty_region(mr); |
6028 } |
6026 } |
6029 |
6027 |
6030 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { |
6028 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { |
6033 // retires each region and replaces it with a new one will do a |
6031 // retires each region and replaces it with a new one will do a |
6034 // maximal allocation to fill in [pre_dummy_top(),end()] but will |
6032 // maximal allocation to fill in [pre_dummy_top(),end()] but will |
6035 // not dirty that area (one less thing to have to do while holding |
6033 // not dirty that area (one less thing to have to do while holding |
6036 // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
6034 // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
6037 // is dirty. |
6035 // is dirty. |
6038 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
6036 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); |
6039 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); |
6037 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); |
6040 ct_bs->verify_dirty_region(mr); |
6038 ct_bs->verify_dirty_region(mr); |
6041 } |
6039 } |
6042 |
6040 |
6043 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { |
6041 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { |
6044 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); |
6042 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); |
6045 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { |
6043 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { |
6046 verify_dirty_region(hr); |
6044 verify_dirty_region(hr); |
6047 } |
6045 } |
6048 } |
6046 } |
6049 |
6047 |
6051 verify_dirty_young_list(_young_list->first_region()); |
6049 verify_dirty_young_list(_young_list->first_region()); |
6052 } |
6050 } |
6053 #endif |
6051 #endif |
6054 |
6052 |
6055 void G1CollectedHeap::cleanUpCardTable() { |
6053 void G1CollectedHeap::cleanUpCardTable() { |
6056 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); |
6054 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); |
6057 double start = os::elapsedTime(); |
6055 double start = os::elapsedTime(); |
6058 |
6056 |
6059 { |
6057 { |
6060 // Iterate over the dirty cards region list. |
6058 // Iterate over the dirty cards region list. |
6061 G1ParCleanupCTTask cleanup_task(ct_bs, this); |
6059 G1ParCleanupCTTask cleanup_task(ct_bs, this); |