2775 return G1EagerReclaimHumongousObjectsWithStaleRefs ? |
2775 return G1EagerReclaimHumongousObjectsWithStaleRefs ? |
2776 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) : |
2776 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) : |
2777 G1EagerReclaimHumongousObjects && rem_set->is_empty(); |
2777 G1EagerReclaimHumongousObjects && rem_set->is_empty(); |
2778 } |
2778 } |
2779 |
2779 |
2780 class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure { |
|
2781 private: |
|
2782 size_t _total_humongous; |
|
2783 size_t _candidate_humongous; |
|
2784 |
|
2785 bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const { |
|
2786 assert(region->is_starts_humongous(), "Must start a humongous object"); |
|
2787 |
|
2788 oop obj = oop(region->bottom()); |
|
2789 |
|
2790 // Dead objects cannot be eager reclaim candidates. Due to class |
|
2791 // unloading it is unsafe to query their classes so we return early. |
|
2792 if (g1h->is_obj_dead(obj, region)) { |
|
2793 return false; |
|
2794 } |
|
2795 |
|
2796 // If we do not have a complete remembered set for the region, then we can |
|
2797 // not be sure that we have all references to it. |
|
2798 if (!region->rem_set()->is_complete()) { |
|
2799 return false; |
|
2800 } |
|
2801 // Candidate selection must satisfy the following constraints |
|
2802 // while concurrent marking is in progress: |
|
2803 // |
|
2804 // * In order to maintain SATB invariants, an object must not be |
|
2805 // reclaimed if it was allocated before the start of marking and |
|
2806 // has not had its references scanned. Such an object must have |
|
2807 // its references (including type metadata) scanned to ensure no |
|
2808 // live objects are missed by the marking process. Objects |
|
2809 // allocated after the start of concurrent marking don't need to |
|
2810 // be scanned. |
|
2811 // |
|
2812 // * An object must not be reclaimed if it is on the concurrent |
|
2813 // mark stack. Objects allocated after the start of concurrent |
|
2814 // marking are never pushed on the mark stack. |
|
2815 // |
|
2816 // Nominating only objects allocated after the start of concurrent |
|
2817 // marking is sufficient to meet both constraints. This may miss |
|
2818 // some objects that satisfy the constraints, but the marking data |
|
2819 // structures don't support efficiently performing the needed |
|
2820 // additional tests or scrubbing of the mark stack. |
|
2821 // |
|
2822 // However, we presently only nominate is_typeArray() objects. |
|
2823 // A humongous object containing references induces remembered |
|
2824 // set entries on other regions. In order to reclaim such an |
|
2825 // object, those remembered sets would need to be cleaned up. |
|
2826 // |
|
2827 // We also treat is_typeArray() objects specially, allowing them |
|
2828 // to be reclaimed even if allocated before the start of |
|
2829 // concurrent mark. For this we rely on mark stack insertion to |
|
2830 // exclude is_typeArray() objects, preventing reclaiming an object |
|
2831 // that is in the mark stack. We also rely on the metadata for |
|
2832 // such objects to be built-in and so ensured to be kept live. |
|
2833 // Frequent allocation and drop of large binary blobs is an |
|
2834 // important use case for eager reclaim, and this special handling |
|
2835 // may reduce needed headroom. |
|
2836 |
|
2837 return obj->is_typeArray() && |
|
2838 g1h->is_potential_eager_reclaim_candidate(region); |
|
2839 } |
|
2840 |
|
2841 public: |
|
2842 RegisterRegionsWithRegionAttrTableClosure() |
|
2843 : _total_humongous(0), |
|
2844 _candidate_humongous(0) { |
|
2845 } |
|
2846 |
|
2847 virtual bool do_heap_region(HeapRegion* r) { |
|
2848 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
2849 |
|
2850 if (!r->is_starts_humongous()) { |
|
2851 g1h->register_region_with_region_attr(r); |
|
2852 return false; |
|
2853 } |
|
2854 |
|
2855 bool is_candidate = humongous_region_is_candidate(g1h, r); |
|
2856 uint rindex = r->hrm_index(); |
|
2857 g1h->set_humongous_reclaim_candidate(rindex, is_candidate); |
|
2858 if (is_candidate) { |
|
2859 g1h->register_humongous_region_with_region_attr(rindex); |
|
2860 _candidate_humongous++; |
|
2861 // We will later handle the remembered sets of these regions. |
|
2862 } else { |
|
2863 g1h->register_region_with_region_attr(r); |
|
2864 } |
|
2865 _total_humongous++; |
|
2866 |
|
2867 return false; |
|
2868 } |
|
2869 |
|
2870 size_t total_humongous() const { return _total_humongous; } |
|
2871 size_t candidate_humongous() const { return _candidate_humongous; } |
|
2872 }; |
|
2873 |
|
2874 void G1CollectedHeap::register_regions_with_region_attr() { |
|
2875 Ticks start = Ticks::now(); |
|
2876 |
|
2877 RegisterRegionsWithRegionAttrTableClosure cl; |
|
2878 heap_region_iterate(&cl); |
|
2879 |
|
2880 phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0, |
|
2881 cl.total_humongous(), |
|
2882 cl.candidate_humongous()); |
|
2883 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; |
|
2884 } |
|
2885 |
|
2886 #ifndef PRODUCT |
2780 #ifndef PRODUCT |
2887 void G1CollectedHeap::verify_region_attr_remset_update() { |
2781 void G1CollectedHeap::verify_region_attr_remset_update() { |
2888 class VerifyRegionAttrRemSet : public HeapRegionClosure { |
2782 class VerifyRegionAttrRemSet : public HeapRegionClosure { |
2889 public: |
2783 public: |
2890 virtual bool do_heap_region(HeapRegion* r) { |
2784 virtual bool do_heap_region(HeapRegion* r) { |
3697 Ticks start = Ticks::now(); |
3591 Ticks start = Ticks::now(); |
3698 per_thread_states->flush(); |
3592 per_thread_states->flush(); |
3699 phase_times()->record_or_add_time_secs(G1GCPhaseTimes::MergePSS, 0 /* worker_id */, (Ticks::now() - start).seconds()); |
3593 phase_times()->record_or_add_time_secs(G1GCPhaseTimes::MergePSS, 0 /* worker_id */, (Ticks::now() - start).seconds()); |
3700 } |
3594 } |
3701 |
3595 |
|
3596 class G1PrepareEvacuationTask : public AbstractGangTask { |
|
3597 class G1PrepareRegionsClosure : public HeapRegionClosure { |
|
3598 G1CollectedHeap* _g1h; |
|
3599 G1PrepareEvacuationTask* _parent_task; |
|
3600 size_t _worker_humongous_total; |
|
3601 size_t _worker_humongous_candidates; |
|
3602 |
|
3603 bool humongous_region_is_candidate(HeapRegion* region) const { |
|
3604 assert(region->is_starts_humongous(), "Must start a humongous object"); |
|
3605 |
|
3606 oop obj = oop(region->bottom()); |
|
3607 |
|
3608 // Dead objects cannot be eager reclaim candidates. Due to class |
|
3609 // unloading it is unsafe to query their classes so we return early. |
|
3610 if (_g1h->is_obj_dead(obj, region)) { |
|
3611 return false; |
|
3612 } |
|
3613 |
|
3614 // If we do not have a complete remembered set for the region, then we can |
|
3615 // not be sure that we have all references to it. |
|
3616 if (!region->rem_set()->is_complete()) { |
|
3617 return false; |
|
3618 } |
|
3619 // Candidate selection must satisfy the following constraints |
|
3620 // while concurrent marking is in progress: |
|
3621 // |
|
3622 // * In order to maintain SATB invariants, an object must not be |
|
3623 // reclaimed if it was allocated before the start of marking and |
|
3624 // has not had its references scanned. Such an object must have |
|
3625 // its references (including type metadata) scanned to ensure no |
|
3626 // live objects are missed by the marking process. Objects |
|
3627 // allocated after the start of concurrent marking don't need to |
|
3628 // be scanned. |
|
3629 // |
|
3630 // * An object must not be reclaimed if it is on the concurrent |
|
3631 // mark stack. Objects allocated after the start of concurrent |
|
3632 // marking are never pushed on the mark stack. |
|
3633 // |
|
3634 // Nominating only objects allocated after the start of concurrent |
|
3635 // marking is sufficient to meet both constraints. This may miss |
|
3636 // some objects that satisfy the constraints, but the marking data |
|
3637 // structures don't support efficiently performing the needed |
|
3638 // additional tests or scrubbing of the mark stack. |
|
3639 // |
|
3640 // However, we presently only nominate is_typeArray() objects. |
|
3641 // A humongous object containing references induces remembered |
|
3642 // set entries on other regions. In order to reclaim such an |
|
3643 // object, those remembered sets would need to be cleaned up. |
|
3644 // |
|
3645 // We also treat is_typeArray() objects specially, allowing them |
|
3646 // to be reclaimed even if allocated before the start of |
|
3647 // concurrent mark. For this we rely on mark stack insertion to |
|
3648 // exclude is_typeArray() objects, preventing reclaiming an object |
|
3649 // that is in the mark stack. We also rely on the metadata for |
|
3650 // such objects to be built-in and so ensured to be kept live. |
|
3651 // Frequent allocation and drop of large binary blobs is an |
|
3652 // important use case for eager reclaim, and this special handling |
|
3653 // may reduce needed headroom. |
|
3654 |
|
3655 return obj->is_typeArray() && |
|
3656 _g1h->is_potential_eager_reclaim_candidate(region); |
|
3657 } |
|
3658 |
|
3659 public: |
|
3660 G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) : |
|
3661 _g1h(g1h), |
|
3662 _parent_task(parent_task), |
|
3663 _worker_humongous_total(0), |
|
3664 _worker_humongous_candidates(0) { } |
|
3665 |
|
3666 ~G1PrepareRegionsClosure() { |
|
3667 _parent_task->add_humongous_candidates(_worker_humongous_candidates); |
|
3668 _parent_task->add_humongous_total(_worker_humongous_total); |
|
3669 } |
|
3670 |
|
3671 virtual bool do_heap_region(HeapRegion* hr) { |
|
3672 // First prepare the region for scanning |
|
3673 _g1h->rem_set()->prepare_region_for_scan(hr); |
|
3674 |
|
3675 // Now check if region is a humongous candidate |
|
3676 if (!hr->is_starts_humongous()) { |
|
3677 _g1h->register_region_with_region_attr(hr); |
|
3678 return false; |
|
3679 } |
|
3680 |
|
3681 uint index = hr->hrm_index(); |
|
3682 if (humongous_region_is_candidate(hr)) { |
|
3683 _g1h->set_humongous_reclaim_candidate(index, true); |
|
3684 _g1h->register_humongous_region_with_region_attr(index); |
|
3685 _worker_humongous_candidates++; |
|
3686 // We will later handle the remembered sets of these regions. |
|
3687 } else { |
|
3688 _g1h->set_humongous_reclaim_candidate(index, false); |
|
3689 _g1h->register_region_with_region_attr(hr); |
|
3690 } |
|
3691 _worker_humongous_total++; |
|
3692 |
|
3693 return false; |
|
3694 } |
|
3695 }; |
|
3696 |
|
3697 G1CollectedHeap* _g1h; |
|
3698 HeapRegionClaimer _claimer; |
|
3699 volatile size_t _humongous_total; |
|
3700 volatile size_t _humongous_candidates; |
|
3701 public: |
|
3702 G1PrepareEvacuationTask(G1CollectedHeap* g1h) : |
|
3703 AbstractGangTask("Prepare Evacuation"), |
|
3704 _g1h(g1h), |
|
3705 _claimer(_g1h->workers()->active_workers()), |
|
3706 _humongous_total(0), |
|
3707 _humongous_candidates(0) { } |
|
3708 |
|
3709 ~G1PrepareEvacuationTask() { |
|
3710 _g1h->set_has_humongous_reclaim_candidate(_humongous_candidates > 0); |
|
3711 } |
|
3712 |
|
3713 void work(uint worker_id) { |
|
3714 G1PrepareRegionsClosure cl(_g1h, this); |
|
3715 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id); |
|
3716 } |
|
3717 |
|
3718 void add_humongous_candidates(size_t candidates) { |
|
3719 Atomic::add(&_humongous_candidates, candidates); |
|
3720 } |
|
3721 |
|
3722 void add_humongous_total(size_t total) { |
|
3723 Atomic::add(&_humongous_total, total); |
|
3724 } |
|
3725 |
|
3726 size_t humongous_candidates() { |
|
3727 return _humongous_candidates; |
|
3728 } |
|
3729 |
|
3730 size_t humongous_total() { |
|
3731 return _humongous_total; |
|
3732 } |
|
3733 }; |
|
3734 |
3702 void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) { |
3735 void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) { |
3703 _bytes_used_during_gc = 0; |
3736 _bytes_used_during_gc = 0; |
3704 |
3737 |
3705 _expand_heap_after_alloc_failure = true; |
3738 _expand_heap_after_alloc_failure = true; |
3706 _evacuation_failed = false; |
3739 _evacuation_failed = false; |