equal
deleted
inserted
replaced
89 assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned"); |
89 assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned"); |
90 assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size"); |
90 assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size"); |
91 |
91 |
92 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
92 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
93 size_t result = gen_size_limit() - virtual_space()->committed_size(); |
93 size_t result = gen_size_limit() - virtual_space()->committed_size(); |
94 size_t result_aligned = align_size_down(result, heap->generation_alignment()); |
94 size_t result_aligned = align_down(result, heap->generation_alignment()); |
95 return result_aligned; |
95 return result_aligned; |
96 } |
96 } |
97 |
97 |
98 size_t ASPSOldGen::available_for_contraction() { |
98 size_t ASPSOldGen::available_for_contraction() { |
99 size_t uncommitted_bytes = virtual_space()->uncommitted_size(); |
99 size_t uncommitted_bytes = virtual_space()->uncommitted_size(); |
104 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
104 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
105 const size_t gen_alignment = heap->generation_alignment(); |
105 const size_t gen_alignment = heap->generation_alignment(); |
106 PSAdaptiveSizePolicy* policy = heap->size_policy(); |
106 PSAdaptiveSizePolicy* policy = heap->size_policy(); |
107 const size_t working_size = |
107 const size_t working_size = |
108 used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); |
108 used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); |
109 const size_t working_aligned = align_size_up(working_size, gen_alignment); |
109 const size_t working_aligned = align_up(working_size, gen_alignment); |
110 const size_t working_or_min = MAX2(working_aligned, min_gen_size()); |
110 const size_t working_or_min = MAX2(working_aligned, min_gen_size()); |
111 if (working_or_min > reserved().byte_size()) { |
111 if (working_or_min > reserved().byte_size()) { |
112 // If the used or minimum gen size (aligned up) is greater |
112 // If the used or minimum gen size (aligned up) is greater |
113 // than the total reserved size, then the space available |
113 // than the total reserved size, then the space available |
114 // for contraction should (after proper alignment) be 0 |
114 // for contraction should (after proper alignment) be 0 |
122 // "decrement" fraction is conservative because its intent is to |
122 // "decrement" fraction is conservative because its intent is to |
123 // only reduce the footprint. |
123 // only reduce the footprint. |
124 |
124 |
125 size_t result = policy->promo_increment_aligned_down(max_contraction); |
125 size_t result = policy->promo_increment_aligned_down(max_contraction); |
126 // Also adjust for inter-generational alignment |
126 // Also adjust for inter-generational alignment |
127 size_t result_aligned = align_size_down(result, gen_alignment); |
127 size_t result_aligned = align_down(result, gen_alignment); |
128 |
128 |
129 Log(gc, ergo) log; |
129 Log(gc, ergo) log; |
130 if (log.is_trace()) { |
130 if (log.is_trace()) { |
131 size_t working_promoted = (size_t) policy->avg_promoted()->padded_average(); |
131 size_t working_promoted = (size_t) policy->avg_promoted()->padded_average(); |
132 size_t promo_increment = policy->promo_increment(max_contraction); |
132 size_t promo_increment = policy->promo_increment(max_contraction); |