8066782: Move common code from CMSGeneration and TenuredGeneration to CardGeneration
Reviewed-by: kbarrett, tschatzl
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Dec 10 11:32:22 2014 +0100
@@ -793,11 +793,6 @@
}
}
-CompactibleSpace*
-ConcurrentMarkSweepGeneration::first_compaction_space() const {
- return _cmsSpace;
-}
-
void ConcurrentMarkSweepGeneration::reset_after_compaction() {
// Clear the promotion information. These pointers can be adjusted
// along with all the other pointers into the heap but
@@ -808,10 +803,6 @@
}
}
-void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
- blk->do_space(_cmsSpace);
-}
-
void ConcurrentMarkSweepGeneration::compute_new_size() {
assert_locked_or_safepoint(Heap_lock);
@@ -882,7 +873,7 @@
expand_bytes);
}
// safe if expansion fails
- expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
+ expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr(" Expanded free fraction %f",
((double) free()) / capacity());
@@ -1048,8 +1039,7 @@
if (res == NULL) {
// expand and retry
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
- expand(s*HeapWordSize, MinHeapDeltaBytes,
- CMSExpansionCause::_satisfy_promotion);
+ expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
// Since there's currently no next generation, we don't try to promote
// into a more senior generation.
assert(next_gen() == NULL, "assumption, based upon which no attempt "
@@ -2625,13 +2615,6 @@
ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
void
-ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
- cl->set_generation(this);
- younger_refs_in_space_iterate(_cmsSpace, cl);
- cl->reset_generation();
-}
-
-void
ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
if (freelistLock()->owned_by_self()) {
Generation::oop_iterate(cl);
@@ -2803,23 +2786,17 @@
CMSSynchronousYieldRequest yr;
assert(!tlab, "Can't deal with TLAB allocation");
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
- expand(word_size*HeapWordSize, MinHeapDeltaBytes,
- CMSExpansionCause::_satisfy_allocation);
+ expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
}
return have_lock_and_allocate(word_size, tlab);
}
-// YSR: All of this generation expansion/shrinking stuff is an exact copy of
-// TenuredGeneration, which makes me wonder if we should move this
-// to CardGeneration and share it...
-bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
- return CardGeneration::expand(bytes, expand_bytes);
-}
-
-void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
- CMSExpansionCause::Cause cause)
+void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
+ size_t bytes,
+ size_t expand_bytes,
+ CMSExpansionCause::Cause cause)
{
bool success = expand(bytes, expand_bytes);
@@ -2848,8 +2825,7 @@
return NULL;
}
// Otherwise, we try expansion.
- expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
- CMSExpansionCause::_allocate_par_lab);
+ expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
// Now go around the loop and try alloc again;
// A competing par_promote might beat us to the expansion space,
// so we may go around the loop again if promotion fails again.
@@ -2876,8 +2852,7 @@
return false;
}
// Otherwise, we try expansion.
- expand(refill_size_bytes, MinHeapDeltaBytes,
- CMSExpansionCause::_allocate_par_spooling_space);
+ expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
// Now go around the loop and try alloc again;
// A competing allocation might beat us to the expansion space,
// so we may go around the loop again if allocation fails again.
@@ -2887,77 +2862,16 @@
}
}
-
-void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
- assert_locked_or_safepoint(ExpandHeap_lock);
- // Shrink committed space
- _virtual_space.shrink_by(bytes);
- // Shrink space; this also shrinks the space's BOT
- _cmsSpace->set_end((HeapWord*) _virtual_space.high());
- size_t new_word_size = heap_word_size(_cmsSpace->capacity());
- // Shrink the shared block offset array
- _bts->resize(new_word_size);
- MemRegion mr(_cmsSpace->bottom(), new_word_size);
- // Shrink the card table
- Universe::heap()->barrier_set()->resize_covered_region(mr);
-
- if (Verbose && PrintGC) {
- size_t new_mem_size = _virtual_space.committed_size();
- size_t old_mem_size = new_mem_size + bytes;
- gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
- name(), old_mem_size/K, new_mem_size/K);
- }
-}
-
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
- assert_locked_or_safepoint(Heap_lock);
- size_t size = ReservedSpace::page_align_size_down(bytes);
// Only shrink if a compaction was done so that all the free space
// in the generation is in a contiguous block at the end.
- if (size > 0 && did_compact()) {
- shrink_by(size);
- }
-}
-
-bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
+ if (did_compact()) {
+ CardGeneration::shrink(bytes);
+ }
+}
+
+void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
assert_locked_or_safepoint(Heap_lock);
- bool result = _virtual_space.expand_by(bytes);
- if (result) {
- size_t new_word_size =
- heap_word_size(_virtual_space.committed_size());
- MemRegion mr(_cmsSpace->bottom(), new_word_size);
- _bts->resize(new_word_size); // resize the block offset shared array
- Universe::heap()->barrier_set()->resize_covered_region(mr);
- // Hmmmm... why doesn't CFLS::set_end verify locking?
- // This is quite ugly; FIX ME XXX
- _cmsSpace->assert_locked(freelistLock());
- _cmsSpace->set_end((HeapWord*)_virtual_space.high());
-
- // update the space and generation capacity counters
- if (UsePerfData) {
- _space_counters->update_capacity();
- _gen_counters->update_all();
- }
-
- if (Verbose && PrintGC) {
- size_t new_mem_size = _virtual_space.committed_size();
- size_t old_mem_size = new_mem_size - bytes;
- gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
- name(), old_mem_size/K, bytes/K, new_mem_size/K);
- }
- }
- return result;
-}
-
-bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
- assert_locked_or_safepoint(Heap_lock);
- bool success = true;
- const size_t remaining_bytes = _virtual_space.uncommitted_size();
- if (remaining_bytes > 0) {
- success = grow_by(remaining_bytes);
- DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
- }
- return success;
}
void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Dec 10 11:32:22 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "memory/cardGeneration.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/iterator.hpp"
+#include "memory/space.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
@@ -171,9 +172,7 @@
// Represents a marking stack used by the CMS collector.
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
class CMSMarkStack: public CHeapObj<mtGC> {
- //
friend class CMSCollector; // To get at expansion stats further below.
- //
VirtualSpace _virtual_space; // Space for the stack
oop* _base; // Bottom of stack
@@ -1031,6 +1030,9 @@
void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
+ // Accessing spaces
+ CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; }
+
private:
// For parallel young-gen GC support.
CMSParGCThreadState** _par_gc_thread_states;
@@ -1064,6 +1066,10 @@
double initiating_occupancy() const { return _initiating_occupancy; }
void init_initiating_occupancy(intx io, uintx tr);
+ void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
+
+ void assert_correct_size_change_locking();
+
public:
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
@@ -1100,23 +1106,14 @@
// Override
virtual void ref_processor_init();
- // Grow generation by specified size (returns false if unable to grow)
- bool grow_by(size_t bytes);
- // Grow generation to reserved size.
- bool grow_to_reserved();
-
void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
// Space enquiries
- size_t capacity() const;
- size_t used() const;
- size_t free() const;
double occupancy() const { return ((double)used())/((double)capacity()); }
size_t contiguous_available() const;
size_t unsafe_max_alloc_nogc() const;
// over-rides
- MemRegion used_region() const;
MemRegion used_region_at_save_marks() const;
// Does a "full" (forced) collection invoked on this generation collect
@@ -1127,10 +1124,6 @@
return !ScavengeBeforeFullGC;
}
- void space_iterate(SpaceClosure* blk, bool usedOnly = false);
-
- // Support for compaction
- CompactibleSpace* first_compaction_space() const;
// Adjust quantities in the generation affected by
// the compaction.
void reset_after_compaction();
@@ -1190,18 +1183,13 @@
}
// Allocation failure
- void expand(size_t bytes, size_t expand_bytes,
- CMSExpansionCause::Cause cause);
- virtual bool expand(size_t bytes, size_t expand_bytes);
void shrink(size_t bytes);
- void shrink_by(size_t bytes);
HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
bool expand_and_ensure_spooling_space(PromotionInfo* promo);
// Iteration support and related enquiries
void save_marks();
bool no_allocs_since_save_marks();
- void younger_refs_iterate(OopsInGenClosure* cl);
// Iteration support specific to CMS generations
void save_sweep_limit();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Wed Dec 10 11:32:22 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -369,22 +369,6 @@
cmsSpace()->save_sweep_limit();
}
-inline size_t ConcurrentMarkSweepGeneration::capacity() const {
- return _cmsSpace->capacity();
-}
-
-inline size_t ConcurrentMarkSweepGeneration::used() const {
- return _cmsSpace->used();
-}
-
-inline size_t ConcurrentMarkSweepGeneration::free() const {
- return _cmsSpace->free();
-}
-
-inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
- return _cmsSpace->used_region();
-}
-
inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
return _cmsSpace->used_region_at_save_marks();
}
--- a/hotspot/src/share/vm/memory/cardGeneration.cpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/memory/cardGeneration.cpp Wed Dec 10 11:32:22 2014 +0100
@@ -23,7 +23,9 @@
*/
#include "precompiled.hpp"
+
#include "memory/blockOffsetTable.inline.hpp"
+#include "memory/cardGeneration.inline.hpp"
#include "memory/gcLocker.hpp"
#include "memory/generationSpec.hpp"
#include "memory/genOopClosures.inline.hpp"
@@ -49,8 +51,9 @@
heap_word_size(initial_byte_size));
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
_rs->resize_covered_region(committed_mr);
- if (_bts == NULL)
+ if (_bts == NULL) {
vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
+ }
// Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than on generation,
@@ -67,6 +70,43 @@
_used_at_prologue = 0;
}
+bool CardGeneration::grow_by(size_t bytes) {
+ assert_correct_size_change_locking();
+ bool result = _virtual_space.expand_by(bytes);
+ if (result) {
+ size_t new_word_size =
+ heap_word_size(_virtual_space.committed_size());
+ MemRegion mr(space()->bottom(), new_word_size);
+ // Expand card table
+ Universe::heap()->barrier_set()->resize_covered_region(mr);
+ // Expand shared block offset array
+ _bts->resize(new_word_size);
+
+ // Fix for bug #4668531
+ if (ZapUnusedHeapArea) {
+ MemRegion mangle_region(space()->end(),
+ (HeapWord*)_virtual_space.high());
+ SpaceMangler::mangle_region(mangle_region);
+ }
+
+ // Expand space -- also expands space's BOT
+ // (which uses (part of) shared array above)
+ space()->set_end((HeapWord*)_virtual_space.high());
+
+ // update the space and generation capacity counters
+ update_counters();
+
+ if (Verbose && PrintGC) {
+ size_t new_mem_size = _virtual_space.committed_size();
+ size_t old_mem_size = new_mem_size - bytes;
+ gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
+ SIZE_FORMAT "K to " SIZE_FORMAT "K",
+ name(), old_mem_size/K, bytes/K, new_mem_size/K);
+ }
+ }
+ return result;
+}
+
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
assert_locked_or_safepoint(Heap_lock);
if (bytes == 0) {
@@ -102,6 +142,44 @@
return success;
}
+bool CardGeneration::grow_to_reserved() {
+ assert_correct_size_change_locking();
+ bool success = true;
+ const size_t remaining_bytes = _virtual_space.uncommitted_size();
+ if (remaining_bytes > 0) {
+ success = grow_by(remaining_bytes);
+ DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
+ }
+ return success;
+}
+
+void CardGeneration::shrink(size_t bytes) {
+ assert_correct_size_change_locking();
+
+ size_t size = ReservedSpace::page_align_size_down(bytes);
+ if (size == 0) {
+ return;
+ }
+
+ // Shrink committed space
+ _virtual_space.shrink_by(size);
+ // Shrink space; this also shrinks the space's BOT
+ space()->set_end((HeapWord*) _virtual_space.high());
+ size_t new_word_size = heap_word_size(space()->capacity());
+ // Shrink the shared block offset array
+ _bts->resize(new_word_size);
+ MemRegion mr(space()->bottom(), new_word_size);
+ // Shrink the card table
+ Universe::heap()->barrier_set()->resize_covered_region(mr);
+
+ if (Verbose && PrintGC) {
+ size_t new_mem_size = _virtual_space.committed_size();
+ size_t old_mem_size = new_mem_size + size;
+ gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+ name(), old_mem_size/K, new_mem_size/K);
+ }
+}
+
// No young generation references, clear this generation's cards.
void CardGeneration::clear_remembered_set() {
_rs->clear(reserved());
@@ -269,3 +347,14 @@
// Currently nothing to do.
void CardGeneration::prepare_for_verify() {}
+
+void CardGeneration::space_iterate(SpaceClosure* blk,
+ bool usedOnly) {
+ blk->do_space(space());
+}
+
+void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
+ blk->set_generation(this);
+ younger_refs_in_space_iterate(space(), blk);
+ blk->reset_generation();
+}
--- a/hotspot/src/share/vm/memory/cardGeneration.hpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/memory/cardGeneration.hpp Wed Dec 10 11:32:22 2014 +0100
@@ -31,6 +31,7 @@
#include "memory/generation.hpp"
class BlockOffsetSharedArray;
+class CompactibleSpace;
class CardGeneration: public Generation {
friend class VMStructs;
@@ -40,7 +41,7 @@
// This is local to this generation.
BlockOffsetSharedArray* _bts;
- // current shrinking effect: this damps shrinking when the heap gets empty.
+ // Current shrinking effect: this damps shrinking when the heap gets empty.
size_t _shrink_factor;
size_t _min_heap_delta_bytes; // Minimum amount to expand.
@@ -54,6 +55,10 @@
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
GenRemSet* remset);
+ virtual void assert_correct_size_change_locking() = 0;
+
+ virtual CompactibleSpace* space() const = 0;
+
public:
// Attempt to expand the generation by "bytes". Expand by at a
@@ -62,7 +67,7 @@
virtual bool expand(size_t bytes, size_t expand_bytes);
// Shrink generation with specified size
- virtual void shrink(size_t bytes) = 0;
+ virtual void shrink(size_t bytes);
virtual void compute_new_size();
@@ -73,9 +78,22 @@
virtual void prepare_for_verify();
// Grow generation with specified size (returns false if unable to grow)
- virtual bool grow_by(size_t bytes) = 0;
+ bool grow_by(size_t bytes);
// Grow generation to reserved size.
- virtual bool grow_to_reserved() = 0;
+ bool grow_to_reserved();
+
+ size_t capacity() const;
+ size_t used() const;
+ size_t free() const;
+ MemRegion used_region() const;
+
+ void space_iterate(SpaceClosure* blk, bool usedOnly = false);
+
+ void younger_refs_iterate(OopsInGenClosure* blk);
+
+ bool is_in(const void* p) const;
+
+ CompactibleSpace* first_compaction_space() const;
};
#endif // SHARE_VM_MEMORY_CARDGENERATION_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/cardGeneration.inline.hpp Wed Dec 10 11:32:22 2014 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
+#define SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
+
+#include "memory/cardGeneration.hpp"
+#include "memory/space.hpp"
+
+inline size_t CardGeneration::capacity() const {
+ return space()->capacity();
+}
+
+inline size_t CardGeneration::used() const {
+ return space()->used();
+}
+
+inline size_t CardGeneration::free() const {
+ return space()->free();
+}
+
+inline MemRegion CardGeneration::used_region() const {
+ return space()->used_region();
+}
+
+inline bool CardGeneration::is_in(const void* p) const {
+ return space()->is_in(p);
+}
+
+inline CompactibleSpace* CardGeneration::first_compaction_space() const {
+ return space();
+}
+
+#endif // SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp Wed Dec 10 11:32:22 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -235,34 +235,6 @@
return CardGeneration::expand(bytes, expand_bytes);
}
-
-void TenuredGeneration::shrink(size_t bytes) {
- assert_locked_or_safepoint(ExpandHeap_lock);
- size_t size = ReservedSpace::page_align_size_down(bytes);
- if (size > 0) {
- shrink_by(size);
- }
-}
-
-
-size_t TenuredGeneration::capacity() const {
- return _the_space->capacity();
-}
-
-
-size_t TenuredGeneration::used() const {
- return _the_space->used();
-}
-
-
-size_t TenuredGeneration::free() const {
- return _the_space->free();
-}
-
-MemRegion TenuredGeneration::used_region() const {
- return the_space()->used_region();
-}
-
size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
return _the_space->free();
}
@@ -271,74 +243,8 @@
return _the_space->free() + _virtual_space.uncommitted_size();
}
-bool TenuredGeneration::grow_by(size_t bytes) {
+void TenuredGeneration::assert_correct_size_change_locking() {
assert_locked_or_safepoint(ExpandHeap_lock);
- bool result = _virtual_space.expand_by(bytes);
- if (result) {
- size_t new_word_size =
- heap_word_size(_virtual_space.committed_size());
- MemRegion mr(_the_space->bottom(), new_word_size);
- // Expand card table
- Universe::heap()->barrier_set()->resize_covered_region(mr);
- // Expand shared block offset array
- _bts->resize(new_word_size);
-
- // Fix for bug #4668531
- if (ZapUnusedHeapArea) {
- MemRegion mangle_region(_the_space->end(),
- (HeapWord*)_virtual_space.high());
- SpaceMangler::mangle_region(mangle_region);
- }
-
- // Expand space -- also expands space's BOT
- // (which uses (part of) shared array above)
- _the_space->set_end((HeapWord*)_virtual_space.high());
-
- // update the space and generation capacity counters
- update_counters();
-
- if (Verbose && PrintGC) {
- size_t new_mem_size = _virtual_space.committed_size();
- size_t old_mem_size = new_mem_size - bytes;
- gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
- SIZE_FORMAT "K to " SIZE_FORMAT "K",
- name(), old_mem_size/K, bytes/K, new_mem_size/K);
- }
- }
- return result;
-}
-
-
-bool TenuredGeneration::grow_to_reserved() {
- assert_locked_or_safepoint(ExpandHeap_lock);
- bool success = true;
- const size_t remaining_bytes = _virtual_space.uncommitted_size();
- if (remaining_bytes > 0) {
- success = grow_by(remaining_bytes);
- DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
- }
- return success;
-}
-
-void TenuredGeneration::shrink_by(size_t bytes) {
- assert_locked_or_safepoint(ExpandHeap_lock);
- // Shrink committed space
- _virtual_space.shrink_by(bytes);
- // Shrink space; this also shrinks the space's BOT
- _the_space->set_end((HeapWord*) _virtual_space.high());
- size_t new_word_size = heap_word_size(_the_space->capacity());
- // Shrink the shared block offset array
- _bts->resize(new_word_size);
- MemRegion mr(_the_space->bottom(), new_word_size);
- // Shrink the card table
- Universe::heap()->barrier_set()->resize_covered_region(mr);
-
- if (Verbose && PrintGC) {
- size_t new_mem_size = _virtual_space.committed_size();
- size_t old_mem_size = new_mem_size + bytes;
- gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
- name(), old_mem_size/K, new_mem_size/K);
- }
}
// Currently nothing to do.
@@ -348,27 +254,14 @@
_the_space->object_iterate(blk);
}
-void TenuredGeneration::space_iterate(SpaceClosure* blk,
- bool usedOnly) {
- blk->do_space(_the_space);
-}
-
-void TenuredGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
- blk->set_generation(this);
- younger_refs_in_space_iterate(_the_space, blk);
- blk->reset_generation();
-}
-
void TenuredGeneration::save_marks() {
_the_space->set_saved_mark();
}
-
void TenuredGeneration::reset_saved_marks() {
_the_space->reset_saved_mark();
}
-
bool TenuredGeneration::no_allocs_since_save_marks() {
return _the_space->saved_mark_at_top();
}
@@ -387,26 +280,25 @@
#undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
-
void TenuredGeneration::gc_epilogue(bool full) {
// update the generation and space performance counters
update_counters();
if (ZapUnusedHeapArea) {
- the_space()->check_mangled_unused_area_complete();
+ _the_space->check_mangled_unused_area_complete();
}
}
void TenuredGeneration::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
- the_space()->set_top_for_allocations();
+ _the_space->set_top_for_allocations();
}
void TenuredGeneration::verify() {
- the_space()->verify();
+ _the_space->verify();
}
void TenuredGeneration::print_on(outputStream* st) const {
Generation::print_on(st);
st->print(" the");
- the_space()->print_on(st);
+ _the_space->print_on(st);
}
--- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp Wed Dec 10 11:32:22 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,25 +42,18 @@
friend class VM_PopulateDumpSharedSpace;
protected:
- ContiguousSpace* _the_space; // actual space holding objects
+ ContiguousSpace* _the_space; // Actual space holding objects
GenerationCounters* _gen_counters;
CSpaceCounters* _space_counters;
- // Grow generation with specified size (returns false if unable to grow)
- virtual bool grow_by(size_t bytes);
- // Grow generation to reserved size.
- virtual bool grow_to_reserved();
- // Shrink generation with specified size (returns false if unable to shrink)
- void shrink_by(size_t bytes);
-
// Allocation failure
virtual bool expand(size_t bytes, size_t expand_bytes);
- void shrink(size_t bytes);
// Accessing spaces
- ContiguousSpace* the_space() const { return _the_space; }
+ ContiguousSpace* space() const { return _the_space; }
+ void assert_correct_size_change_locking();
public:
TenuredGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, GenRemSet* remset);
@@ -79,25 +72,11 @@
return !ScavengeBeforeFullGC;
}
- inline bool is_in(const void* p) const;
-
- // Space enquiries
- size_t capacity() const;
- size_t used() const;
- size_t free() const;
-
- MemRegion used_region() const;
-
size_t unsafe_max_alloc_nogc() const;
size_t contiguous_available() const;
// Iteration
void object_iterate(ObjectClosure* blk);
- void space_iterate(SpaceClosure* blk, bool usedOnly = false);
-
- void younger_refs_iterate(OopsInGenClosure* blk);
-
- inline CompactibleSpace* first_compaction_space() const;
virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
--- a/hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp Wed Dec 10 11:31:43 2014 +0100
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp Wed Dec 10 11:32:22 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,41 +25,32 @@
#ifndef SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP
#define SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP
-#include "memory/genCollectedHeap.hpp"
#include "memory/space.hpp"
#include "memory/tenuredGeneration.hpp"
-bool TenuredGeneration::is_in(const void* p) const {
- return the_space()->is_in(p);
-}
-
-CompactibleSpace*
-TenuredGeneration::first_compaction_space() const {
- return the_space();
-}
-
HeapWord* TenuredGeneration::allocate(size_t word_size,
bool is_tlab) {
assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
- return the_space()->allocate(word_size);
+ return _the_space->allocate(word_size);
}
HeapWord* TenuredGeneration::par_allocate(size_t word_size,
bool is_tlab) {
assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
- return the_space()->par_allocate(word_size);
+ return _the_space->par_allocate(word_size);
}
size_t TenuredGeneration::block_size(const HeapWord* addr) const {
- if (addr < the_space()->top()) return oop(addr)->size();
- else {
- assert(addr == the_space()->top(), "non-block head arg to block_size");
- return the_space()->end() - the_space()->top();
+ if (addr < _the_space->top()) {
+ return oop(addr)->size();
+ } else {
+ assert(addr == _the_space->top(), "non-block head arg to block_size");
+ return _the_space->end() - _the_space->top();
}
}
bool TenuredGeneration::block_is_obj(const HeapWord* addr) const {
- return addr < the_space()->top();
+ return addr < _the_space ->top();
}
#endif // SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP