8065993: Merge OneContigSpaceCardGeneration with TenuredGeneration
Reviewed-by: mgerdin, kbarrett
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Generation.java Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Generation.java Mon Dec 01 14:37:25 2014 +0100
@@ -37,10 +37,7 @@
<ul>
<li> CardGeneration
<ul>
- <li> OneContigSpaceCardGeneration
- <ul>
- <li> TenuredGeneration
- </ul>
+ <li> TenuredGeneration
</ul>
<li> DefNewGeneration
</ul>
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/OneContigSpaceCardGeneration.java Tue Dec 02 09:51:16 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.memory;
-
-import java.io.*;
-import java.util.*;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-/** <P> OneSpaceOldGeneration models a heap of old objects contained
- in a single contiguous space. </P>
-
- <P> Garbage collection is performed using mark-compact. </P> */
-
-public abstract class OneContigSpaceCardGeneration extends CardGeneration {
- private static AddressField theSpaceField;
-
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("OneContigSpaceCardGeneration");
-
- theSpaceField = type.getAddressField("_the_space");
- }
-
- public OneContigSpaceCardGeneration(Address addr) {
- super(addr);
- }
-
- public ContiguousSpace theSpace() {
- return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, theSpaceField.getValue(addr));
- }
-
- public boolean isIn(Address p) {
- return theSpace().contains(p);
- }
-
- /** Space queries */
- public long capacity() { return theSpace().capacity(); }
- public long used() { return theSpace().used(); }
- public long free() { return theSpace().free(); }
- public long contiguousAvailable() { return theSpace().free() + virtualSpace().uncommittedSize(); }
-
- public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
- blk.doSpace(theSpace());
- }
-
- public void printOn(PrintStream tty) {
- tty.print(" old ");
- theSpace().printOn(tty);
- }
-}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/TenuredGeneration.java Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/TenuredGeneration.java Mon Dec 01 14:37:25 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,13 +24,62 @@
package sun.jvm.hotspot.memory;
+import java.io.*;
+import java.util.*;
+
import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+/** <P> TenuredGeneration models a heap of old objects contained
+ in a single contiguous space. </P>
+
+ <P> Garbage collection is performed using mark-compact. </P> */
+
+public class TenuredGeneration extends CardGeneration {
+ private static AddressField theSpaceField;
-public class TenuredGeneration extends OneContigSpaceCardGeneration {
+ static {
+ VM.registerVMInitializedObserver(new Observer() {
+ public void update(Observable o, Object data) {
+ initialize(VM.getVM().getTypeDataBase());
+ }
+ });
+ }
+
+ private static synchronized void initialize(TypeDataBase db) {
+ Type type = db.lookupType("TenuredGeneration");
+
+ theSpaceField = type.getAddressField("_the_space");
+ }
+
public TenuredGeneration(Address addr) {
super(addr);
}
+ public ContiguousSpace theSpace() {
+ return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, theSpaceField.getValue(addr));
+ }
+
+ public boolean isIn(Address p) {
+ return theSpace().contains(p);
+ }
+
+ /** Space queries */
+ public long capacity() { return theSpace().capacity(); }
+ public long used() { return theSpace().used(); }
+ public long free() { return theSpace().free(); }
+ public long contiguousAvailable() { return theSpace().free() + virtualSpace().uncommittedSize(); }
+
+ public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
+ blk.doSpace(theSpace());
+ }
+
+ public void printOn(PrintStream tty) {
+ tty.print(" old ");
+ theSpace().printOn(tty);
+ }
+
public Generation.Name kind() {
return Generation.Name.MARK_SWEEP_COMPACT;
}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -2812,7 +2812,7 @@
}
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
-// OneContigSpaceCardGeneration, which makes me wonder if we should move this
+// TenuredGeneration, which makes me wonder if we should move this
// to CardGeneration and share it...
bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
return CardGeneration::expand(bytes, expand_bytes);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Mon Dec 01 14:37:25 2014 +0100
@@ -31,6 +31,7 @@
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/shared/gcUtil.hpp"
+#include "memory/genCollectedHeap.hpp"
inline void CMSBitMap::clear_all() {
assert_locked();
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -39,7 +39,6 @@
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.hpp"
-#include "memory/generation.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -462,19 +462,6 @@
// equal to active_workers. When a different mechanism for shutting
// off parallelism is used, then active_workers can be used in
// place of n_par_threads.
- // This is an example of a path where n_par_threads is
- // set to 0 to turn off parallelism.
- // [7] CardTableModRefBS::non_clean_card_iterate()
- // [8] CardTableRS::younger_refs_in_space_iterate()
- // [9] Generation::younger_refs_in_space_iterate()
- // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
- // [11] CompactingPermGenGen::younger_refs_iterate()
- // [12] CardTableRS::younger_refs_iterate()
- // [13] SharedHeap::process_strong_roots()
- // [14] G1CollectedHeap::verify()
- // [15] Universe::verify()
- // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
- //
int n_threads = SharedHeap::heap()->n_par_threads();
bool is_par = n_threads > 0;
if (is_par) {
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp Mon Dec 01 14:37:25 2014 +0100
@@ -29,7 +29,6 @@
#include "gc_implementation/shared/cSpaceCounters.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
-#include "memory/generation.inline.hpp"
#include "utilities/stack.hpp"
class ContiguousSpace;
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -36,7 +36,6 @@
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
-#include "memory/generation.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -37,7 +37,6 @@
#include "memory/genCollectedHeap.hpp"
#include "memory/genMarkSweep.hpp"
#include "memory/genOopClosures.inline.hpp"
-#include "memory/generation.inline.hpp"
#include "memory/modRefBarrierSet.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/space.hpp"
--- a/hotspot/src/share/vm/memory/generation.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/generation.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -36,7 +36,6 @@
#include "memory/genOopClosures.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.hpp"
-#include "memory/generation.inline.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
@@ -603,252 +602,3 @@
// Currently nothing to do.
void CardGeneration::prepare_for_verify() {}
-
-void OneContigSpaceCardGeneration::collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool is_tlab) {
- GenCollectedHeap* gch = GenCollectedHeap::heap();
-
- SpecializationStats::clear();
- // Temporarily expand the span of our ref processor, so
- // refs discovery is over the entire heap, not just this generation
- ReferenceProcessorSpanMutator
- x(ref_processor(), gch->reserved_region());
-
- STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
- gc_timer->register_gc_start();
-
- SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
- gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
-
- GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
-
- gc_timer->register_gc_end();
-
- gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
-
- SpecializationStats::print();
-}
-
-HeapWord*
-OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
- bool is_tlab,
- bool parallel) {
- assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
- if (parallel) {
- MutexLocker x(ParGCRareEvent_lock);
- HeapWord* result = NULL;
- size_t byte_size = word_size * HeapWordSize;
- while (true) {
- expand(byte_size, _min_heap_delta_bytes);
- if (GCExpandToAllocateDelayMillis > 0) {
- os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
- }
- result = _the_space->par_allocate(word_size);
- if ( result != NULL) {
- return result;
- } else {
- // If there's not enough expansion space available, give up.
- if (_virtual_space.uncommitted_size() < byte_size) {
- return NULL;
- }
- // else try again
- }
- }
- } else {
- expand(word_size*HeapWordSize, _min_heap_delta_bytes);
- return _the_space->allocate(word_size);
- }
-}
-
-bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
- GCMutexLocker x(ExpandHeap_lock);
- return CardGeneration::expand(bytes, expand_bytes);
-}
-
-
-void OneContigSpaceCardGeneration::shrink(size_t bytes) {
- assert_locked_or_safepoint(ExpandHeap_lock);
- size_t size = ReservedSpace::page_align_size_down(bytes);
- if (size > 0) {
- shrink_by(size);
- }
-}
-
-
-size_t OneContigSpaceCardGeneration::capacity() const {
- return _the_space->capacity();
-}
-
-
-size_t OneContigSpaceCardGeneration::used() const {
- return _the_space->used();
-}
-
-
-size_t OneContigSpaceCardGeneration::free() const {
- return _the_space->free();
-}
-
-MemRegion OneContigSpaceCardGeneration::used_region() const {
- return the_space()->used_region();
-}
-
-size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
- return _the_space->free();
-}
-
-size_t OneContigSpaceCardGeneration::contiguous_available() const {
- return _the_space->free() + _virtual_space.uncommitted_size();
-}
-
-bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
- assert_locked_or_safepoint(ExpandHeap_lock);
- bool result = _virtual_space.expand_by(bytes);
- if (result) {
- size_t new_word_size =
- heap_word_size(_virtual_space.committed_size());
- MemRegion mr(_the_space->bottom(), new_word_size);
- // Expand card table
- Universe::heap()->barrier_set()->resize_covered_region(mr);
- // Expand shared block offset array
- _bts->resize(new_word_size);
-
- // Fix for bug #4668531
- if (ZapUnusedHeapArea) {
- MemRegion mangle_region(_the_space->end(),
- (HeapWord*)_virtual_space.high());
- SpaceMangler::mangle_region(mangle_region);
- }
-
- // Expand space -- also expands space's BOT
- // (which uses (part of) shared array above)
- _the_space->set_end((HeapWord*)_virtual_space.high());
-
- // update the space and generation capacity counters
- update_counters();
-
- if (Verbose && PrintGC) {
- size_t new_mem_size = _virtual_space.committed_size();
- size_t old_mem_size = new_mem_size - bytes;
- gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
- SIZE_FORMAT "K to " SIZE_FORMAT "K",
- name(), old_mem_size/K, bytes/K, new_mem_size/K);
- }
- }
- return result;
-}
-
-
-bool OneContigSpaceCardGeneration::grow_to_reserved() {
- assert_locked_or_safepoint(ExpandHeap_lock);
- bool success = true;
- const size_t remaining_bytes = _virtual_space.uncommitted_size();
- if (remaining_bytes > 0) {
- success = grow_by(remaining_bytes);
- DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
- }
- return success;
-}
-
-void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
- assert_locked_or_safepoint(ExpandHeap_lock);
- // Shrink committed space
- _virtual_space.shrink_by(bytes);
- // Shrink space; this also shrinks the space's BOT
- _the_space->set_end((HeapWord*) _virtual_space.high());
- size_t new_word_size = heap_word_size(_the_space->capacity());
- // Shrink the shared block offset array
- _bts->resize(new_word_size);
- MemRegion mr(_the_space->bottom(), new_word_size);
- // Shrink the card table
- Universe::heap()->barrier_set()->resize_covered_region(mr);
-
- if (Verbose && PrintGC) {
- size_t new_mem_size = _virtual_space.committed_size();
- size_t old_mem_size = new_mem_size + bytes;
- gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
- name(), old_mem_size/K, new_mem_size/K);
- }
-}
-
-// Currently nothing to do.
-void OneContigSpaceCardGeneration::prepare_for_verify() {}
-
-
-// Override for a card-table generation with one contiguous
-// space. NOTE: For reasons that are lost in the fog of history,
-// this code is used when you iterate over perm gen objects,
-// even when one uses CDS, where the perm gen has a couple of
-// other spaces; this is because CompactingPermGenGen derives
-// from OneContigSpaceCardGeneration. This should be cleaned up,
-// see CR 6897789..
-void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
- _the_space->object_iterate(blk);
-}
-
-void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
- bool usedOnly) {
- blk->do_space(_the_space);
-}
-
-void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
- blk->set_generation(this);
- younger_refs_in_space_iterate(_the_space, blk);
- blk->reset_generation();
-}
-
-void OneContigSpaceCardGeneration::save_marks() {
- _the_space->set_saved_mark();
-}
-
-
-void OneContigSpaceCardGeneration::reset_saved_marks() {
- _the_space->reset_saved_mark();
-}
-
-
-bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
- return _the_space->saved_mark_at_top();
-}
-
-#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
- \
-void OneContigSpaceCardGeneration:: \
-oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
- blk->set_generation(this); \
- _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \
- blk->reset_generation(); \
- save_marks(); \
-}
-
-ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
-
-#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
-
-
-void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
- _last_gc = WaterMark(the_space(), the_space()->top());
-
- // update the generation and space performance counters
- update_counters();
- if (ZapUnusedHeapArea) {
- the_space()->check_mangled_unused_area_complete();
- }
-}
-
-void OneContigSpaceCardGeneration::record_spaces_top() {
- assert(ZapUnusedHeapArea, "Not mangling unused space");
- the_space()->set_top_for_allocations();
-}
-
-void OneContigSpaceCardGeneration::verify() {
- the_space()->verify();
-}
-
-void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
- Generation::print_on(st);
- st->print(" the");
- the_space()->print_on(st);
-}
--- a/hotspot/src/share/vm/memory/generation.hpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/generation.hpp Mon Dec 01 14:37:25 2014 +0100
@@ -45,9 +45,7 @@
// - ParNewGeneration - a DefNewGeneration that is collected by
// several threads
// - CardGeneration - abstract class adding offset array behavior
-// - OneContigSpaceCardGeneration - abstract class holding a single
-// contiguous space with card marking
-// - TenuredGeneration - tenured (old object) space (markSweepCompact)
+// - TenuredGeneration - tenured (old object) space (markSweepCompact)
// - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
// (Detlefs-Printezis refinement of
// Boehm-Demers-Schenker)
@@ -55,9 +53,7 @@
// The system configurations currently allowed are:
//
// DefNewGeneration + TenuredGeneration
-// DefNewGeneration + ConcurrentMarkSweepGeneration
//
-// ParNewGeneration + TenuredGeneration
// ParNewGeneration + ConcurrentMarkSweepGeneration
//
@@ -641,99 +637,4 @@
virtual bool grow_to_reserved() = 0;
};
-// OneContigSpaceCardGeneration models a heap of old objects contained in a single
-// contiguous space.
-//
-// Garbage collection is performed using mark-compact.
-
-class OneContigSpaceCardGeneration: public CardGeneration {
- friend class VMStructs;
- // Abstractly, this is a subtype that gets access to protected fields.
- friend class VM_PopulateDumpSharedSpace;
-
- protected:
- ContiguousSpace* _the_space; // actual space holding objects
- WaterMark _last_gc; // watermark between objects allocated before
- // and after last GC.
-
- // Grow generation with specified size (returns false if unable to grow)
- virtual bool grow_by(size_t bytes);
- // Grow generation to reserved size.
- virtual bool grow_to_reserved();
- // Shrink generation with specified size (returns false if unable to shrink)
- void shrink_by(size_t bytes);
-
- // Allocation failure
- virtual bool expand(size_t bytes, size_t expand_bytes);
- void shrink(size_t bytes);
-
- // Accessing spaces
- ContiguousSpace* the_space() const { return _the_space; }
-
- public:
- OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
- int level, GenRemSet* remset,
- ContiguousSpace* space) :
- CardGeneration(rs, initial_byte_size, level, remset),
- _the_space(space)
- {}
-
- inline bool is_in(const void* p) const;
-
- // Space enquiries
- size_t capacity() const;
- size_t used() const;
- size_t free() const;
-
- MemRegion used_region() const;
-
- size_t unsafe_max_alloc_nogc() const;
- size_t contiguous_available() const;
-
- // Iteration
- void object_iterate(ObjectClosure* blk);
- void space_iterate(SpaceClosure* blk, bool usedOnly = false);
-
- void younger_refs_iterate(OopsInGenClosure* blk);
-
- inline CompactibleSpace* first_compaction_space() const;
-
- virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
- virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
-
- // Accessing marks
- inline WaterMark top_mark();
- inline WaterMark bottom_mark();
-
-#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
- void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
- OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
- SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
-
- void save_marks();
- void reset_saved_marks();
- bool no_allocs_since_save_marks();
-
- inline size_t block_size(const HeapWord* addr) const;
-
- inline bool block_is_obj(const HeapWord* addr) const;
-
- virtual void collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool is_tlab);
- HeapWord* expand_and_allocate(size_t size,
- bool is_tlab,
- bool parallel = false);
-
- virtual void prepare_for_verify();
-
- virtual void gc_epilogue(bool full);
-
- virtual void record_spaces_top();
-
- virtual void verify();
- virtual void print_on(outputStream* st) const;
-};
-
#endif // SHARE_VM_MEMORY_GENERATION_HPP
--- a/hotspot/src/share/vm/memory/generation.inline.hpp Tue Dec 02 09:51:16 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_HPP
-#define SHARE_VM_MEMORY_GENERATION_INLINE_HPP
-
-#include "memory/genCollectedHeap.hpp"
-#include "memory/generation.hpp"
-#include "memory/space.hpp"
-
-bool OneContigSpaceCardGeneration::is_in(const void* p) const {
- return the_space()->is_in(p);
-}
-
-
-WaterMark OneContigSpaceCardGeneration::top_mark() {
- return the_space()->top_mark();
-}
-
-CompactibleSpace*
-OneContigSpaceCardGeneration::first_compaction_space() const {
- return the_space();
-}
-
-HeapWord* OneContigSpaceCardGeneration::allocate(size_t word_size,
- bool is_tlab) {
- assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
- return the_space()->allocate(word_size);
-}
-
-HeapWord* OneContigSpaceCardGeneration::par_allocate(size_t word_size,
- bool is_tlab) {
- assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
- return the_space()->par_allocate(word_size);
-}
-
-WaterMark OneContigSpaceCardGeneration::bottom_mark() {
- return the_space()->bottom_mark();
-}
-
-size_t OneContigSpaceCardGeneration::block_size(const HeapWord* addr) const {
- if (addr < the_space()->top()) return oop(addr)->size();
- else {
- assert(addr == the_space()->top(), "non-block head arg to block_size");
- return the_space()->_end - the_space()->top();
- }
-}
-
-bool OneContigSpaceCardGeneration::block_is_obj(const HeapWord* addr) const {
- return addr < the_space()->top();
-}
-
-#endif // SHARE_VM_MEMORY_GENERATION_INLINE_HPP
--- a/hotspot/src/share/vm/memory/space.hpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.hpp Mon Dec 01 14:37:25 2014 +0100
@@ -495,7 +495,6 @@
// A space in which the free area is contiguous. It therefore supports
// faster allocation, and compaction.
class ContiguousSpace: public CompactibleSpace {
- friend class OneContigSpaceCardGeneration;
friend class VMStructs;
// Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
template <typename SpaceType>
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -24,13 +24,15 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
-#include "memory/generation.inline.hpp"
#include "memory/generationSpec.hpp"
+#include "memory/genMarkSweep.hpp"
+#include "memory/genOopClosures.inline.hpp"
#include "memory/space.hpp"
-#include "memory/tenuredGeneration.hpp"
+#include "memory/tenuredGeneration.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
@@ -38,8 +40,7 @@
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size, int level,
GenRemSet* remset) :
- OneContigSpaceCardGeneration(rs, initial_byte_size,
- level, remset, NULL)
+ CardGeneration(rs, initial_byte_size, level, remset)
{
HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high();
@@ -170,3 +171,244 @@
}
return res;
}
+
+void TenuredGeneration::collect(bool full,
+ bool clear_all_soft_refs,
+ size_t size,
+ bool is_tlab) {
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+ SpecializationStats::clear();
+ // Temporarily expand the span of our ref processor, so
+ // refs discovery is over the entire heap, not just this generation
+ ReferenceProcessorSpanMutator
+ x(ref_processor(), gch->reserved_region());
+
+ STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
+ gc_timer->register_gc_start();
+
+ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
+ gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
+
+ GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
+
+ gc_timer->register_gc_end();
+
+ gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
+
+ SpecializationStats::print();
+}
+
+HeapWord*
+TenuredGeneration::expand_and_allocate(size_t word_size,
+ bool is_tlab,
+ bool parallel) {
+ assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
+ if (parallel) {
+ MutexLocker x(ParGCRareEvent_lock);
+ HeapWord* result = NULL;
+ size_t byte_size = word_size * HeapWordSize;
+ while (true) {
+ expand(byte_size, _min_heap_delta_bytes);
+ if (GCExpandToAllocateDelayMillis > 0) {
+ os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
+ }
+ result = _the_space->par_allocate(word_size);
+ if ( result != NULL) {
+ return result;
+ } else {
+ // If there's not enough expansion space available, give up.
+ if (_virtual_space.uncommitted_size() < byte_size) {
+ return NULL;
+ }
+ // else try again
+ }
+ }
+ } else {
+ expand(word_size*HeapWordSize, _min_heap_delta_bytes);
+ return _the_space->allocate(word_size);
+ }
+}
+
+bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
+ GCMutexLocker x(ExpandHeap_lock);
+ return CardGeneration::expand(bytes, expand_bytes);
+}
+
+
+void TenuredGeneration::shrink(size_t bytes) {
+ assert_locked_or_safepoint(ExpandHeap_lock);
+ size_t size = ReservedSpace::page_align_size_down(bytes);
+ if (size > 0) {
+ shrink_by(size);
+ }
+}
+
+
+size_t TenuredGeneration::capacity() const {
+ return _the_space->capacity();
+}
+
+
+size_t TenuredGeneration::used() const {
+ return _the_space->used();
+}
+
+
+size_t TenuredGeneration::free() const {
+ return _the_space->free();
+}
+
+MemRegion TenuredGeneration::used_region() const {
+ return the_space()->used_region();
+}
+
+size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
+ return _the_space->free();
+}
+
+size_t TenuredGeneration::contiguous_available() const {
+ return _the_space->free() + _virtual_space.uncommitted_size();
+}
+
+bool TenuredGeneration::grow_by(size_t bytes) {
+ assert_locked_or_safepoint(ExpandHeap_lock);
+ bool result = _virtual_space.expand_by(bytes);
+ if (result) {
+ size_t new_word_size =
+ heap_word_size(_virtual_space.committed_size());
+ MemRegion mr(_the_space->bottom(), new_word_size);
+ // Expand card table
+ Universe::heap()->barrier_set()->resize_covered_region(mr);
+ // Expand shared block offset array
+ _bts->resize(new_word_size);
+
+ // Fix for bug #4668531
+ if (ZapUnusedHeapArea) {
+ MemRegion mangle_region(_the_space->end(),
+ (HeapWord*)_virtual_space.high());
+ SpaceMangler::mangle_region(mangle_region);
+ }
+
+ // Expand space -- also expands space's BOT
+ // (which uses (part of) shared array above)
+ _the_space->set_end((HeapWord*)_virtual_space.high());
+
+ // update the space and generation capacity counters
+ update_counters();
+
+ if (Verbose && PrintGC) {
+ size_t new_mem_size = _virtual_space.committed_size();
+ size_t old_mem_size = new_mem_size - bytes;
+ gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
+ SIZE_FORMAT "K to " SIZE_FORMAT "K",
+ name(), old_mem_size/K, bytes/K, new_mem_size/K);
+ }
+ }
+ return result;
+}
+
+
+bool TenuredGeneration::grow_to_reserved() {
+ assert_locked_or_safepoint(ExpandHeap_lock);
+ bool success = true;
+ const size_t remaining_bytes = _virtual_space.uncommitted_size();
+ if (remaining_bytes > 0) {
+ success = grow_by(remaining_bytes);
+ DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
+ }
+ return success;
+}
+
+void TenuredGeneration::shrink_by(size_t bytes) {
+ assert_locked_or_safepoint(ExpandHeap_lock);
+ // Shrink committed space
+ _virtual_space.shrink_by(bytes);
+ // Shrink space; this also shrinks the space's BOT
+ _the_space->set_end((HeapWord*) _virtual_space.high());
+ size_t new_word_size = heap_word_size(_the_space->capacity());
+ // Shrink the shared block offset array
+ _bts->resize(new_word_size);
+ MemRegion mr(_the_space->bottom(), new_word_size);
+ // Shrink the card table
+ Universe::heap()->barrier_set()->resize_covered_region(mr);
+
+ if (Verbose && PrintGC) {
+ size_t new_mem_size = _virtual_space.committed_size();
+ size_t old_mem_size = new_mem_size + bytes;
+ gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+ name(), old_mem_size/K, new_mem_size/K);
+ }
+}
+
+// Currently nothing to do.
+void TenuredGeneration::prepare_for_verify() {}
+
+void TenuredGeneration::object_iterate(ObjectClosure* blk) {
+ _the_space->object_iterate(blk);
+}
+
+void TenuredGeneration::space_iterate(SpaceClosure* blk,
+ bool usedOnly) {
+ blk->do_space(_the_space);
+}
+
+void TenuredGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
+ blk->set_generation(this);
+ younger_refs_in_space_iterate(_the_space, blk);
+ blk->reset_generation();
+}
+
+void TenuredGeneration::save_marks() {
+ _the_space->set_saved_mark();
+}
+
+
+void TenuredGeneration::reset_saved_marks() {
+ _the_space->reset_saved_mark();
+}
+
+
+bool TenuredGeneration::no_allocs_since_save_marks() {
+ return _the_space->saved_mark_at_top();
+}
+
+#define TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ \
+void TenuredGeneration:: \
+oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
+ blk->set_generation(this); \
+ _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \
+ blk->reset_generation(); \
+ save_marks(); \
+}
+
+ALL_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN)
+
+#undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
+
+
+void TenuredGeneration::gc_epilogue(bool full) {
+ _last_gc = WaterMark(the_space(), the_space()->top());
+
+ // update the generation and space performance counters
+ update_counters();
+ if (ZapUnusedHeapArea) {
+ the_space()->check_mangled_unused_area_complete();
+ }
+}
+
+void TenuredGeneration::record_spaces_top() {
+ assert(ZapUnusedHeapArea, "Not mangling unused space");
+ the_space()->set_top_for_allocations();
+}
+
+void TenuredGeneration::verify() {
+ the_space()->verify();
+}
+
+void TenuredGeneration::print_on(outputStream* st) const {
+ Generation::print_on(st);
+ st->print(" the");
+ the_space()->print_on(st);
+}
--- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp Mon Dec 01 14:37:25 2014 +0100
@@ -31,17 +31,41 @@
#include "memory/generation.hpp"
#include "utilities/macros.hpp"
-// TenuredGeneration models the heap containing old (promoted/tenured) objects.
+// TenuredGeneration models the heap containing old (promoted/tenured) objects
+// contained in a single contiguous space.
+//
+// Garbage collection is performed using mark-compact.
-class TenuredGeneration: public OneContigSpaceCardGeneration {
+class TenuredGeneration: public CardGeneration {
friend class VMStructs;
+ // Abstractly, this is a subtype that gets access to protected fields.
+ friend class VM_PopulateDumpSharedSpace;
+
protected:
+ ContiguousSpace* _the_space; // actual space holding objects
+ WaterMark _last_gc; // watermark between objects allocated before
+ // and after last GC.
+
GenerationCounters* _gen_counters;
CSpaceCounters* _space_counters;
+ // Grow generation with specified size (returns false if unable to grow)
+ virtual bool grow_by(size_t bytes);
+ // Grow generation to reserved size.
+ virtual bool grow_to_reserved();
+ // Shrink generation with specified size (returns false if unable to shrink)
+ void shrink_by(size_t bytes);
+
+ // Allocation failure
+ virtual bool expand(size_t bytes, size_t expand_bytes);
+ void shrink(size_t bytes);
+
+ // Accessing spaces
+ ContiguousSpace* the_space() const { return _the_space; }
+
public:
- TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
- GenRemSet* remset);
+ TenuredGeneration(ReservedSpace rs, size_t initial_byte_size,
+ int level, GenRemSet* remset);
Generation::Name kind() { return Generation::MarkSweepCompact; }
@@ -57,7 +81,59 @@
return !ScavengeBeforeFullGC;
}
+ inline bool is_in(const void* p) const;
+
+ // Space enquiries
+ size_t capacity() const;
+ size_t used() const;
+ size_t free() const;
+
+ MemRegion used_region() const;
+
+ size_t unsafe_max_alloc_nogc() const;
+ size_t contiguous_available() const;
+
+ // Iteration
+ void object_iterate(ObjectClosure* blk);
+ void space_iterate(SpaceClosure* blk, bool usedOnly = false);
+
+ void younger_refs_iterate(OopsInGenClosure* blk);
+
+ inline CompactibleSpace* first_compaction_space() const;
+
+ virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
+ virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
+
+ // Accessing marks
+ inline WaterMark top_mark();
+ inline WaterMark bottom_mark();
+
+#define TenuredGen_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
+ void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
+ TenuredGen_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
+ SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_DECL)
+
+ void save_marks();
+ void reset_saved_marks();
+ bool no_allocs_since_save_marks();
+
+ inline size_t block_size(const HeapWord* addr) const;
+
+ inline bool block_is_obj(const HeapWord* addr) const;
+
+ virtual void collect(bool full,
+ bool clear_all_soft_refs,
+ size_t size,
+ bool is_tlab);
+ HeapWord* expand_and_allocate(size_t size,
+ bool is_tlab,
+ bool parallel = false);
+
+ virtual void prepare_for_verify();
+
+
virtual void gc_prologue(bool full);
+ virtual void gc_epilogue(bool full);
bool should_collect(bool full,
size_t word_size,
bool is_tlab);
@@ -67,11 +143,16 @@
// Performance Counter support
void update_counters();
+ virtual void record_spaces_top();
+
// Statistics
virtual void update_gc_stats(int level, bool full);
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
+
+ virtual void verify();
+ virtual void print_on(outputStream* st) const;
};
#endif // SHARE_VM_MEMORY_TENUREDGENERATION_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp Mon Dec 01 14:37:25 2014 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_HPP
+#define SHARE_VM_MEMORY_GENERATION_INLINE_HPP
+
+#include "memory/genCollectedHeap.hpp"
+#include "memory/space.hpp"
+#include "memory/tenuredGeneration.hpp"
+
+bool TenuredGeneration::is_in(const void* p) const {
+ return the_space()->is_in(p);
+}
+
+
+WaterMark TenuredGeneration::top_mark() {
+ return the_space()->top_mark();
+}
+
+CompactibleSpace*
+TenuredGeneration::first_compaction_space() const {
+ return the_space();
+}
+
+HeapWord* TenuredGeneration::allocate(size_t word_size,
+ bool is_tlab) {
+ assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
+ return the_space()->allocate(word_size);
+}
+
+HeapWord* TenuredGeneration::par_allocate(size_t word_size,
+ bool is_tlab) {
+ assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
+ return the_space()->par_allocate(word_size);
+}
+
+WaterMark TenuredGeneration::bottom_mark() {
+ return the_space()->bottom_mark();
+}
+
+size_t TenuredGeneration::block_size(const HeapWord* addr) const {
+ if (addr < the_space()->top()) return oop(addr)->size();
+ else {
+ assert(addr == the_space()->top(), "non-block head arg to block_size");
+ return the_space()->end() - the_space()->top();
+ }
+}
+
+bool TenuredGeneration::block_is_obj(const HeapWord* addr) const {
+ return addr < the_space()->top();
+}
+
+#endif // SHARE_VM_MEMORY_GENERATION_INLINE_HPP
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp Mon Dec 01 14:37:25 2014 +0100
@@ -127,7 +127,6 @@
# include "memory/genOopClosures.hpp"
# include "memory/genRemSet.hpp"
# include "memory/generation.hpp"
-# include "memory/generation.inline.hpp"
# include "memory/heap.hpp"
# include "memory/iterator.hpp"
# include "memory/memRegion.hpp"
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Dec 02 09:51:16 2014 +0100
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Dec 01 14:37:25 2014 +0100
@@ -554,9 +554,9 @@
\
nonstatic_field(OffsetTableContigSpace, _offsets, BlockOffsetArray) \
\
- nonstatic_field(OneContigSpaceCardGeneration, _min_heap_delta_bytes, size_t) \
- nonstatic_field(OneContigSpaceCardGeneration, _the_space, ContiguousSpace*) \
- nonstatic_field(OneContigSpaceCardGeneration, _last_gc, WaterMark) \
+ nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
+ nonstatic_field(TenuredGeneration, _the_space, ContiguousSpace*) \
+ nonstatic_field(TenuredGeneration, _last_gc, WaterMark) \
\
\
\
@@ -1481,8 +1481,7 @@
declare_toplevel_type(Generation) \
declare_type(DefNewGeneration, Generation) \
declare_type(CardGeneration, Generation) \
- declare_type(OneContigSpaceCardGeneration, CardGeneration) \
- declare_type(TenuredGeneration, OneContigSpaceCardGeneration) \
+ declare_type(TenuredGeneration, CardGeneration) \
declare_toplevel_type(Space) \
declare_toplevel_type(BitMap) \
declare_type(CompactibleSpace, Space) \
@@ -1534,8 +1533,8 @@
declare_toplevel_type(HeapWord*) \
declare_toplevel_type(MemRegion*) \
declare_toplevel_type(OffsetTableContigSpace*) \
- declare_toplevel_type(OneContigSpaceCardGeneration*) \
declare_toplevel_type(Space*) \
+ declare_toplevel_type(TenuredGeneration*) \
declare_toplevel_type(ThreadLocalAllocBuffer*) \
\
/************************/ \