8008508: CMS does not correctly reduce heap size after a Full GC
Reviewed-by: johnc, ysr
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Apr 05 10:20:04 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Feb 11 10:31:56 2013 -0800
@@ -48,6 +48,7 @@
#include "memory/iterator.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
+#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/globals_extension.hpp"
@@ -916,7 +917,31 @@
return;
}
- size_t expand_bytes = 0;
+ // Compute some numbers about the state of the heap.
+ const size_t used_after_gc = used();
+ const size_t capacity_after_gc = capacity();
+
+ CardGeneration::compute_new_size();
+
+ // Reset again after a possible resizing
+ cmsSpace()->reset_after_compaction();
+
+ assert(used() == used_after_gc && used_after_gc <= capacity(),
+ err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
+ " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
+}
+
+void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
+ assert_locked_or_safepoint(Heap_lock);
+
+ // If incremental collection failed, we just want to expand
+ // to the limit.
+ if (incremental_collection_failed()) {
+ clear_incremental_collection_failed();
+ grow_to_reserved();
+ return;
+ }
+
double free_percentage = ((double) free()) / capacity();
double desired_free_percentage = (double) MinHeapFreeRatio / 100;
double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
@@ -925,9 +950,7 @@
if (free_percentage < desired_free_percentage) {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
assert(desired_capacity >= capacity(), "invalid expansion size");
- expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
- }
- if (expand_bytes > 0) {
+ size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
if (PrintGCDetails && Verbose) {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
@@ -961,6 +984,14 @@
gclog_or_tty->print_cr(" Expanded free fraction %f",
((double) free()) / capacity());
}
+ } else {
+ size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
+ assert(desired_capacity <= capacity(), "invalid expansion size");
+ size_t shrink_bytes = capacity() - desired_capacity;
+ // Don't shrink unless the delta is greater than the minimum shrink we want
+ if (shrink_bytes >= MinHeapDeltaBytes) {
+ shrink_free_list_by(shrink_bytes);
+ }
}
}
@@ -1872,7 +1903,7 @@
assert_locked_or_safepoint(Heap_lock);
FreelistLocker z(this);
MetaspaceGC::compute_new_size();
- _cmsGen->compute_new_size();
+ _cmsGen->compute_new_size_free_list();
}
// A work method used by foreground collection to determine
@@ -2601,6 +2632,10 @@
}
void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
+
+ _capacity_at_prologue = capacity();
+ _used_at_prologue = used();
+
// Delegate to CMScollector which knows how to coordinate between
// this and any other CMS generations that it is responsible for
// collecting.
@@ -3300,6 +3335,26 @@
}
+void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
+ assert_locked_or_safepoint(ExpandHeap_lock);
+ // Shrink committed space
+ _virtual_space.shrink_by(bytes);
+ // Shrink space; this also shrinks the space's BOT
+ _cmsSpace->set_end((HeapWord*) _virtual_space.high());
+ size_t new_word_size = heap_word_size(_cmsSpace->capacity());
+ // Shrink the shared block offset array
+ _bts->resize(new_word_size);
+ MemRegion mr(_cmsSpace->bottom(), new_word_size);
+ // Shrink the card table
+ Universe::heap()->barrier_set()->resize_covered_region(mr);
+
+ if (Verbose && PrintGC) {
+ size_t new_mem_size = _virtual_space.committed_size();
+ size_t old_mem_size = new_mem_size + bytes;
+ gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+ name(), old_mem_size/K, new_mem_size/K);
+ }
+}
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
assert_locked_or_safepoint(Heap_lock);
@@ -3351,7 +3406,7 @@
return success;
}
-void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
+void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
assert_locked_or_safepoint(Heap_lock);
assert_lock_strong(freelistLock());
// XXX Fix when compaction is implemented.
@@ -9074,51 +9129,6 @@
}
}
-// The desired expansion delta is computed so that:
-// . desired free percentage or greater is used
-void ASConcurrentMarkSweepGeneration::compute_new_size() {
- assert_locked_or_safepoint(Heap_lock);
-
- GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
-
- // If incremental collection failed, we just want to expand
- // to the limit.
- if (incremental_collection_failed()) {
- clear_incremental_collection_failed();
- grow_to_reserved();
- return;
- }
-
- assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
-
- assert(gch->kind() == CollectedHeap::GenCollectedHeap,
- "Wrong type of heap");
- int prev_level = level() - 1;
- assert(prev_level >= 0, "The cms generation is the lowest generation");
- Generation* prev_gen = gch->get_gen(prev_level);
- assert(prev_gen->kind() == Generation::ASParNew,
- "Wrong type of young generation");
- ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
- size_t cur_eden = younger_gen->eden()->capacity();
- CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
- size_t cur_promo = free();
- size_policy->compute_tenured_generation_free_space(cur_promo,
- max_available(),
- cur_eden);
- resize(cur_promo, size_policy->promo_size());
-
- // Record the new size of the space in the cms generation
- // that is available for promotions. This is temporary.
- // It should be the desired promo size.
- size_policy->avg_cms_promo()->sample(free());
- size_policy->avg_old_live()->sample(used());
-
- if (UsePerfData) {
- CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
- counters->update_cms_capacity_counter(capacity());
- }
-}
-
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
assert_locked_or_safepoint(Heap_lock);
assert_lock_strong(freelistLock());
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Apr 05 10:20:04 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Mon Feb 11 10:31:56 2013 -0800
@@ -60,6 +60,7 @@
class FreeChunk;
class PromotionInfo;
class ScanMarkedObjectsAgainCarefullyClosure;
+class TenuredGeneration;
// A generic CMS bit map. It's the basis for both the CMS marking bit map
// as well as for the mod union table (in each case only a subset of the
@@ -810,9 +811,6 @@
// used regions of each generation to limit the extent of sweep
void save_sweep_limits();
- // Resize the generations included in the collector.
- void compute_new_size();
-
// A work method used by foreground collection to determine
// what type of collection (compacting or not, continuing or fresh)
// it should do.
@@ -909,6 +907,9 @@
void releaseFreelistLocks() const;
bool haveFreelistLocks() const;
+ // Adjust size of underlying generation
+ void compute_new_size();
+
// GC prologue and epilogue
void gc_prologue(bool full);
void gc_epilogue(bool full);
@@ -1082,7 +1083,7 @@
protected:
// Shrink generation by specified size (returns false if unable to shrink)
- virtual void shrink_by(size_t bytes);
+ void shrink_free_list_by(size_t bytes);
// Update statistics for GC
virtual void update_gc_stats(int level, bool full);
@@ -1233,6 +1234,7 @@
CMSExpansionCause::Cause cause);
virtual bool expand(size_t bytes, size_t expand_bytes);
void shrink(size_t bytes);
+ void shrink_by(size_t bytes);
HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
bool expand_and_ensure_spooling_space(PromotionInfo* promo);
@@ -1293,7 +1295,13 @@
bool must_be_youngest() const { return false; }
bool must_be_oldest() const { return true; }
- void compute_new_size();
+ // Resize the generation after a compacting GC. The
+ // generation can be treated as a contiguous space
+ // after the compaction.
+ virtual void compute_new_size();
+ // Resize the generation after a non-compacting
+ // collection.
+ void compute_new_size_free_list();
CollectionTypes debug_collection_type() { return _debug_collection_type; }
void rotate_debug_collection_type();
@@ -1315,7 +1323,6 @@
virtual void shrink_by(size_t bytes);
public:
- virtual void compute_new_size();
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
bool use_adaptive_freelists,
--- a/hotspot/src/share/vm/memory/generation.cpp Fri Apr 05 10:20:04 2013 -0700
+++ b/hotspot/src/share/vm/memory/generation.cpp Mon Feb 11 10:31:56 2013 -0800
@@ -382,7 +382,9 @@
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
int level,
GenRemSet* remset) :
- Generation(rs, initial_byte_size, level), _rs(remset)
+ Generation(rs, initial_byte_size, level), _rs(remset),
+ _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
+ _used_at_prologue()
{
HeapWord* start = (HeapWord*)rs.base();
size_t reserved_byte_size = rs.size();
@@ -406,6 +408,9 @@
// the end if we try.
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
}
+ _min_heap_delta_bytes = MinHeapDeltaBytes;
+ _capacity_at_prologue = initial_byte_size;
+ _used_at_prologue = 0;
}
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
@@ -457,6 +462,160 @@
}
+void CardGeneration::compute_new_size() {
+ assert(_shrink_factor <= 100, "invalid shrink factor");
+ size_t current_shrink_factor = _shrink_factor;
+ _shrink_factor = 0;
+
+ // We don't have floating point command-line arguments
+ // Note: argument processing ensures that MinHeapFreeRatio < 100.
+ const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
+ const double maximum_used_percentage = 1.0 - minimum_free_percentage;
+
+ // Compute some numbers about the state of the heap.
+ const size_t used_after_gc = used();
+ const size_t capacity_after_gc = capacity();
+
+ const double min_tmp = used_after_gc / maximum_used_percentage;
+ size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
+ // Don't shrink less than the initial generation size
+ minimum_desired_capacity = MAX2(minimum_desired_capacity,
+ spec()->init_size());
+ assert(used_after_gc <= minimum_desired_capacity, "sanity check");
+
+ if (PrintGC && Verbose) {
+ const size_t free_after_gc = free();
+ const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
+ gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
+ gclog_or_tty->print_cr(" "
+ " minimum_free_percentage: %6.2f"
+ " maximum_used_percentage: %6.2f",
+ minimum_free_percentage,
+ maximum_used_percentage);
+ gclog_or_tty->print_cr(" "
+ " free_after_gc : %6.1fK"
+ " used_after_gc : %6.1fK"
+ " capacity_after_gc : %6.1fK",
+ free_after_gc / (double) K,
+ used_after_gc / (double) K,
+ capacity_after_gc / (double) K);
+ gclog_or_tty->print_cr(" "
+ " free_percentage: %6.2f",
+ free_percentage);
+ }
+
+ if (capacity_after_gc < minimum_desired_capacity) {
+ // If we have less free space than we want then expand
+ size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
+ // Don't expand unless it's significant
+ if (expand_bytes >= _min_heap_delta_bytes) {
+ expand(expand_bytes, 0); // safe if expansion fails
+ }
+ if (PrintGC && Verbose) {
+ gclog_or_tty->print_cr(" expanding:"
+ " minimum_desired_capacity: %6.1fK"
+ " expand_bytes: %6.1fK"
+ " _min_heap_delta_bytes: %6.1fK",
+ minimum_desired_capacity / (double) K,
+ expand_bytes / (double) K,
+ _min_heap_delta_bytes / (double) K);
+ }
+ return;
+ }
+
+ // No expansion, now see if we want to shrink
+ size_t shrink_bytes = 0;
+ // We would never want to shrink more than this
+ size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
+
+ if (MaxHeapFreeRatio < 100) {
+ const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
+ const double minimum_used_percentage = 1.0 - maximum_free_percentage;
+ const double max_tmp = used_after_gc / minimum_used_percentage;
+ size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
+ maximum_desired_capacity = MAX2(maximum_desired_capacity,
+ spec()->init_size());
+ if (PrintGC && Verbose) {
+ gclog_or_tty->print_cr(" "
+ " maximum_free_percentage: %6.2f"
+ " minimum_used_percentage: %6.2f",
+ maximum_free_percentage,
+ minimum_used_percentage);
+ gclog_or_tty->print_cr(" "
+ " _capacity_at_prologue: %6.1fK"
+ " minimum_desired_capacity: %6.1fK"
+ " maximum_desired_capacity: %6.1fK",
+ _capacity_at_prologue / (double) K,
+ minimum_desired_capacity / (double) K,
+ maximum_desired_capacity / (double) K);
+ }
+ assert(minimum_desired_capacity <= maximum_desired_capacity,
+ "sanity check");
+
+ if (capacity_after_gc > maximum_desired_capacity) {
+ // Capacity too large, compute shrinking size
+ shrink_bytes = capacity_after_gc - maximum_desired_capacity;
+ // We don't want shrink all the way back to initSize if people call
+ // System.gc(), because some programs do that between "phases" and then
+ // we'd just have to grow the heap up again for the next phase. So we
+ // damp the shrinking: 0% on the first call, 10% on the second call, 40%
+ // on the third call, and 100% by the fourth call. But if we recompute
+ // size without shrinking, it goes back to 0%.
+ shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
+ assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
+ if (current_shrink_factor == 0) {
+ _shrink_factor = 10;
+ } else {
+ _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
+ }
+ if (PrintGC && Verbose) {
+ gclog_or_tty->print_cr(" "
+ " shrinking:"
+ " initSize: %.1fK"
+ " maximum_desired_capacity: %.1fK",
+ spec()->init_size() / (double) K,
+ maximum_desired_capacity / (double) K);
+ gclog_or_tty->print_cr(" "
+ " shrink_bytes: %.1fK"
+ " current_shrink_factor: %d"
+ " new shrink factor: %d"
+ " _min_heap_delta_bytes: %.1fK",
+ shrink_bytes / (double) K,
+ current_shrink_factor,
+ _shrink_factor,
+ _min_heap_delta_bytes / (double) K);
+ }
+ }
+ }
+
+ if (capacity_after_gc > _capacity_at_prologue) {
+ // We might have expanded for promotions, in which case we might want to
+ // take back that expansion if there's room after GC. That keeps us from
+ // stretching the heap with promotions when there's plenty of room.
+ size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
+ expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
+ // We have two shrinking computations, take the largest
+ shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
+ assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
+ if (PrintGC && Verbose) {
+ gclog_or_tty->print_cr(" "
+ " aggressive shrinking:"
+ " _capacity_at_prologue: %.1fK"
+ " capacity_after_gc: %.1fK"
+ " expansion_for_promotion: %.1fK"
+ " shrink_bytes: %.1fK",
+ capacity_after_gc / (double) K,
+ _capacity_at_prologue / (double) K,
+ expansion_for_promotion / (double) K,
+ shrink_bytes / (double) K);
+ }
+ }
+ // Don't shrink unless it's significant
+ if (shrink_bytes >= _min_heap_delta_bytes) {
+ shrink(shrink_bytes);
+ }
+}
+
// Currently nothing to do.
void CardGeneration::prepare_for_verify() {}
--- a/hotspot/src/share/vm/memory/generation.hpp Fri Apr 05 10:20:04 2013 -0700
+++ b/hotspot/src/share/vm/memory/generation.hpp Mon Feb 11 10:31:56 2013 -0800
@@ -634,6 +634,17 @@
// This is local to this generation.
BlockOffsetSharedArray* _bts;
+ // current shrinking effect: this damps shrinking when the heap gets empty.
+ size_t _shrink_factor;
+
+ size_t _min_heap_delta_bytes; // Minimum amount to expand.
+
+ // Some statistics from before gc started.
+ // These are gathered in the gc_prologue (and should_collect)
+ // to control growing/shrinking policy in spite of promotions.
+ size_t _capacity_at_prologue;
+ size_t _used_at_prologue;
+
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
GenRemSet* remset);
@@ -644,6 +655,11 @@
// necessarily the full "bytes") was done.
virtual bool expand(size_t bytes, size_t expand_bytes);
+ // Shrink generation with specified size (returns false if unable to shrink)
+ virtual void shrink(size_t bytes) = 0;
+
+ virtual void compute_new_size();
+
virtual void clear_remembered_set();
virtual void invalidate_remembered_set();
@@ -667,7 +683,6 @@
friend class VM_PopulateDumpSharedSpace;
protected:
- size_t _min_heap_delta_bytes; // Minimum amount to expand.
ContiguousSpace* _the_space; // actual space holding objects
WaterMark _last_gc; // watermark between objects allocated before
// and after last GC.
@@ -688,11 +703,10 @@
public:
OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
- size_t min_heap_delta_bytes,
int level, GenRemSet* remset,
ContiguousSpace* space) :
CardGeneration(rs, initial_byte_size, level, remset),
- _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
+ _the_space(space)
{}
inline bool is_in(const void* p) const;
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp Fri Apr 05 10:20:04 2013 -0700
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp Mon Feb 11 10:31:56 2013 -0800
@@ -39,7 +39,7 @@
size_t initial_byte_size, int level,
GenRemSet* remset) :
OneContigSpaceCardGeneration(rs, initial_byte_size,
- MinHeapDeltaBytes, level, remset, NULL)
+ level, remset, NULL)
{
HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high();
@@ -86,162 +86,6 @@
return "tenured generation";
}
-void TenuredGeneration::compute_new_size() {
- assert(_shrink_factor <= 100, "invalid shrink factor");
- size_t current_shrink_factor = _shrink_factor;
- _shrink_factor = 0;
-
- // We don't have floating point command-line arguments
- // Note: argument processing ensures that MinHeapFreeRatio < 100.
- const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
- const double maximum_used_percentage = 1.0 - minimum_free_percentage;
-
- // Compute some numbers about the state of the heap.
- const size_t used_after_gc = used();
- const size_t capacity_after_gc = capacity();
-
- const double min_tmp = used_after_gc / maximum_used_percentage;
- size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
- // Don't shrink less than the initial generation size
- minimum_desired_capacity = MAX2(minimum_desired_capacity,
- spec()->init_size());
- assert(used_after_gc <= minimum_desired_capacity, "sanity check");
-
- if (PrintGC && Verbose) {
- const size_t free_after_gc = free();
- const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
- gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
- gclog_or_tty->print_cr(" "
- " minimum_free_percentage: %6.2f"
- " maximum_used_percentage: %6.2f",
- minimum_free_percentage,
- maximum_used_percentage);
- gclog_or_tty->print_cr(" "
- " free_after_gc : %6.1fK"
- " used_after_gc : %6.1fK"
- " capacity_after_gc : %6.1fK",
- free_after_gc / (double) K,
- used_after_gc / (double) K,
- capacity_after_gc / (double) K);
- gclog_or_tty->print_cr(" "
- " free_percentage: %6.2f",
- free_percentage);
- }
-
- if (capacity_after_gc < minimum_desired_capacity) {
- // If we have less free space than we want then expand
- size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
- // Don't expand unless it's significant
- if (expand_bytes >= _min_heap_delta_bytes) {
- expand(expand_bytes, 0); // safe if expansion fails
- }
- if (PrintGC && Verbose) {
- gclog_or_tty->print_cr(" expanding:"
- " minimum_desired_capacity: %6.1fK"
- " expand_bytes: %6.1fK"
- " _min_heap_delta_bytes: %6.1fK",
- minimum_desired_capacity / (double) K,
- expand_bytes / (double) K,
- _min_heap_delta_bytes / (double) K);
- }
- return;
- }
-
- // No expansion, now see if we want to shrink
- size_t shrink_bytes = 0;
- // We would never want to shrink more than this
- size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
-
- if (MaxHeapFreeRatio < 100) {
- const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
- const double minimum_used_percentage = 1.0 - maximum_free_percentage;
- const double max_tmp = used_after_gc / minimum_used_percentage;
- size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
- maximum_desired_capacity = MAX2(maximum_desired_capacity,
- spec()->init_size());
- if (PrintGC && Verbose) {
- gclog_or_tty->print_cr(" "
- " maximum_free_percentage: %6.2f"
- " minimum_used_percentage: %6.2f",
- maximum_free_percentage,
- minimum_used_percentage);
- gclog_or_tty->print_cr(" "
- " _capacity_at_prologue: %6.1fK"
- " minimum_desired_capacity: %6.1fK"
- " maximum_desired_capacity: %6.1fK",
- _capacity_at_prologue / (double) K,
- minimum_desired_capacity / (double) K,
- maximum_desired_capacity / (double) K);
- }
- assert(minimum_desired_capacity <= maximum_desired_capacity,
- "sanity check");
-
- if (capacity_after_gc > maximum_desired_capacity) {
- // Capacity too large, compute shrinking size
- shrink_bytes = capacity_after_gc - maximum_desired_capacity;
- // We don't want shrink all the way back to initSize if people call
- // System.gc(), because some programs do that between "phases" and then
- // we'd just have to grow the heap up again for the next phase. So we
- // damp the shrinking: 0% on the first call, 10% on the second call, 40%
- // on the third call, and 100% by the fourth call. But if we recompute
- // size without shrinking, it goes back to 0%.
- shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
- assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
- if (current_shrink_factor == 0) {
- _shrink_factor = 10;
- } else {
- _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
- }
- if (PrintGC && Verbose) {
- gclog_or_tty->print_cr(" "
- " shrinking:"
- " initSize: %.1fK"
- " maximum_desired_capacity: %.1fK",
- spec()->init_size() / (double) K,
- maximum_desired_capacity / (double) K);
- gclog_or_tty->print_cr(" "
- " shrink_bytes: %.1fK"
- " current_shrink_factor: %d"
- " new shrink factor: %d"
- " _min_heap_delta_bytes: %.1fK",
- shrink_bytes / (double) K,
- current_shrink_factor,
- _shrink_factor,
- _min_heap_delta_bytes / (double) K);
- }
- }
- }
-
- if (capacity_after_gc > _capacity_at_prologue) {
- // We might have expanded for promotions, in which case we might want to
- // take back that expansion if there's room after GC. That keeps us from
- // stretching the heap with promotions when there's plenty of room.
- size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
- expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
- // We have two shrinking computations, take the largest
- shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
- assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
- if (PrintGC && Verbose) {
- gclog_or_tty->print_cr(" "
- " aggressive shrinking:"
- " _capacity_at_prologue: %.1fK"
- " capacity_after_gc: %.1fK"
- " expansion_for_promotion: %.1fK"
- " shrink_bytes: %.1fK",
- capacity_after_gc / (double) K,
- _capacity_at_prologue / (double) K,
- expansion_for_promotion / (double) K,
- shrink_bytes / (double) K);
- }
- }
- // Don't shrink unless it's significant
- if (shrink_bytes >= _min_heap_delta_bytes) {
- shrink(shrink_bytes);
- }
- assert(used() == used_after_gc && used_after_gc <= capacity(),
- "sanity check");
-}
-
void TenuredGeneration::gc_prologue(bool full) {
_capacity_at_prologue = capacity();
_used_at_prologue = used();
@@ -312,6 +156,19 @@
size, is_tlab);
}
+void TenuredGeneration::compute_new_size() {
+ assert_locked_or_safepoint(Heap_lock);
+
+ // Compute some numbers about the state of the heap.
+ const size_t used_after_gc = used();
+ const size_t capacity_after_gc = capacity();
+
+ CardGeneration::compute_new_size();
+
+ assert(used() == used_after_gc && used_after_gc <= capacity(),
+ err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
+ " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
+}
void TenuredGeneration::update_gc_stats(int current_level,
bool full) {
// If the next lower level(s) has been collected, gather any statistics
--- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp Fri Apr 05 10:20:04 2013 -0700
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp Mon Feb 11 10:31:56 2013 -0800
@@ -38,13 +38,6 @@
class TenuredGeneration: public OneContigSpaceCardGeneration {
friend class VMStructs;
protected:
- // current shrinking effect: this damps shrinking when the heap gets empty.
- size_t _shrink_factor;
- // Some statistics from before gc started.
- // These are gathered in the gc_prologue (and should_collect)
- // to control growing/shrinking policy in spite of promotions.
- size_t _capacity_at_prologue;
- size_t _used_at_prologue;
#if INCLUDE_ALL_GCS
// To support parallel promotion: an array of parallel allocation
@@ -80,9 +73,6 @@
return !CollectGen0First;
}
- // Mark sweep support
- void compute_new_size();
-
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
bool should_collect(bool full,
@@ -93,6 +83,7 @@
bool clear_all_soft_refs,
size_t size,
bool is_tlab);
+ virtual void compute_new_size();
#if INCLUDE_ALL_GCS
// Overrides.
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Apr 05 10:20:04 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Feb 11 10:31:56 2013 -0800
@@ -478,6 +478,9 @@
\
nonstatic_field(CardGeneration, _rs, GenRemSet*) \
nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \
+ nonstatic_field(CardGeneration, _shrink_factor, size_t) \
+ nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
+ nonstatic_field(CardGeneration, _used_at_prologue, size_t) \
\
nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \
nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \
@@ -548,8 +551,6 @@
nonstatic_field(Space, _bottom, HeapWord*) \
nonstatic_field(Space, _end, HeapWord*) \
\
- nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
- nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \