8202639: Use concrete Generation classes in SerialHeap and CMSHeap
Reviewed-by: eosterlund, ehelin
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp Mon May 07 14:42:05 2018 +0200
@@ -100,7 +100,7 @@
_young_manager = new GCMemoryManager("ParNew", "end of minor GC");
_old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
- ParNewGeneration* young = (ParNewGeneration*) young_gen();
+ ParNewGeneration* young = young_gen();
_eden_pool = new ContiguousSpacePool(young->eden(),
"Par Eden Space",
young->max_eden_size(),
@@ -128,18 +128,11 @@
}
-void CMSHeap::check_gen_kinds() {
- assert(young_gen()->kind() == Generation::ParNew,
- "Wrong youngest generation type");
- assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
- "Wrong generation kind");
-}
-
CMSHeap* CMSHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
- return (CMSHeap*) heap;
+ return static_cast<CMSHeap*>(heap);
}
void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp Mon May 07 14:42:05 2018 +0200
@@ -26,6 +26,7 @@
#define SHARE_VM_GC_CMS_CMSHEAP_HPP
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
+#include "gc/cms/parNewGeneration.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/genCollectedHeap.hpp"
@@ -42,10 +43,6 @@
class WorkGang;
class CMSHeap : public GenCollectedHeap {
-
-protected:
- virtual void check_gen_kinds();
-
public:
CMSHeap(GenCollectorPolicy *policy);
@@ -97,6 +94,16 @@
GCMemoryManager* old_manager() const { return _old_manager; }
+ ParNewGeneration* young_gen() const {
+ assert(_young_gen->kind() == Generation::ParNew, "Wrong generation type");
+ return static_cast<ParNewGeneration*>(_young_gen);
+ }
+
+ ConcurrentMarkSweepGeneration* old_gen() const {
+ assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Wrong generation kind");
+ return static_cast<ConcurrentMarkSweepGeneration*>(_old_gen);
+ }
+
private:
WorkGang* _workers;
MemoryPool* _eden_pool;
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Mon May 07 14:42:05 2018 +0200
@@ -602,8 +602,7 @@
// Support for parallelizing young gen rescan
CMSHeap* heap = CMSHeap::heap();
- assert(heap->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
- _young_gen = (ParNewGeneration*)heap->young_gen();
+ _young_gen = heap->young_gen();
if (heap->supports_inline_contig_alloc()) {
_top_addr = heap->top_addr();
_end_addr = heap->end_addr();
@@ -770,7 +769,6 @@
log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000);
log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
CMSHeap* heap = CMSHeap::heap();
- assert(heap->is_old_gen(this), "The CMS generation should always be the old generation");
size_t young_size = heap->young_gen()->capacity();
log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000);
log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
--- a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp Mon May 07 14:42:05 2018 +0200
@@ -126,7 +126,7 @@
static JVMFlag::Error CMSReservedAreaConstraintFunc(const char* name, size_t value, bool verbose) {
if (UseConcMarkSweepGC) {
- ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
+ ConcurrentMarkSweepGeneration* cms = CMSHeap::heap()->old_gen();
const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
if (value > ergo_max) {
CommandLineError::print(verbose,
@@ -189,7 +189,7 @@
JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
if (UseConcMarkSweepGC) {
- size_t max_capacity = GenCollectedHeap::heap()->young_gen()->max_capacity();
+ size_t max_capacity = CMSHeap::heap()->young_gen()->max_capacity();
if (value > max_uintx - max_capacity) {
CommandLineError::print(verbose,
"CMSSamplingGrain (" UINTX_FORMAT ") must be "
@@ -212,7 +212,7 @@
// Skip for current default value.
if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) {
// CMSBitMapYieldQuantum should be compared with mark bitmap size.
- ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
+ ConcurrentMarkSweepGeneration* cms = CMSHeap::heap()->old_gen();
size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
if (value > bitmap_size) {
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp Mon May 07 14:42:05 2018 +0200
@@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "gc/serial/defNewGeneration.inline.hpp"
+#include "gc/serial/serialHeap.hpp"
+#include "gc/serial/tenuredGeneration.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
@@ -34,7 +36,6 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
@@ -93,20 +94,18 @@
void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
DefNewGeneration::FastEvacuateFollowersClosure::
-FastEvacuateFollowersClosure(GenCollectedHeap* gch,
+FastEvacuateFollowersClosure(SerialHeap* heap,
FastScanClosure* cur,
FastScanClosure* older) :
- _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
+ _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older)
{
- assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
- _young_gen = (DefNewGeneration*)_gch->young_gen();
}
void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
do {
- _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
- } while (!_gch->no_allocs_since_save_marks());
- guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
+ _heap->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
+ } while (!_heap->no_allocs_since_save_marks());
+ guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
}
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
@@ -563,29 +562,29 @@
bool is_tlab) {
assert(full || size > 0, "otherwise we don't want to collect");
- GenCollectedHeap* gch = GenCollectedHeap::heap();
+ SerialHeap* heap = SerialHeap::heap();
_gc_timer->register_gc_start();
DefNewTracer gc_tracer;
- gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
+ gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
- _old_gen = gch->old_gen();
+ _old_gen = heap->old_gen();
// If the next generation is too full to accommodate promotion
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
log_trace(gc)(":: Collection attempt not safe ::");
- gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
+ heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
return;
}
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
init_assuming_no_promotion_failure();
- GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause());
+ GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause());
- gch->trace_heap_before_gc(&gc_tracer);
+ heap->trace_heap_before_gc(&gc_tracer);
// These can be shared for all code paths
IsAliveClosure is_alive(this);
@@ -596,23 +595,23 @@
// The preserved marks should be empty at the start of the GC.
_preserved_marks_set.init(1);
- gch->rem_set()->prepare_for_younger_refs_iterate(false);
+ heap->rem_set()->prepare_for_younger_refs_iterate(false);
- assert(gch->no_allocs_since_save_marks(),
+ assert(heap->no_allocs_since_save_marks(),
"save marks have not been newly set.");
FastScanClosure fsc_with_no_gc_barrier(this, false);
FastScanClosure fsc_with_gc_barrier(this, true);
CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
- gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
+ heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
- FastEvacuateFollowersClosure evacuate_followers(gch,
+ FastEvacuateFollowersClosure evacuate_followers(heap,
&fsc_with_no_gc_barrier,
&fsc_with_gc_barrier);
- assert(gch->no_allocs_since_save_marks(),
+ assert(heap->no_allocs_since_save_marks(),
"save marks have not been newly set.");
{
@@ -621,10 +620,10 @@
// See: CardTableRS::non_clean_card_iterate_possibly_parallel.
StrongRootsScope srs(0);
- gch->young_process_roots(&srs,
- &fsc_with_no_gc_barrier,
- &fsc_with_gc_barrier,
- &cld_scan_closure);
+ heap->young_process_roots(&srs,
+ &fsc_with_no_gc_barrier,
+ &fsc_with_gc_barrier,
+ &cld_scan_closure);
}
// "evacuate followers".
@@ -641,12 +640,12 @@
gc_tracer.report_tenuring_threshold(tenuring_threshold());
pt.print_all_references();
- assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set.");
+ assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
// Verify that the usage of keep_alive didn't copy any objects.
- assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set.");
+ assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
if (!_promotion_failed) {
// Swap the survivor spaces.
@@ -670,9 +669,9 @@
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
- AdaptiveSizePolicy* size_policy = gch->size_policy();
+ AdaptiveSizePolicy* size_policy = heap->size_policy();
size_policy->reset_gc_overhead_limit_count();
- assert(!gch->incremental_collection_failed(), "Should be clear");
+ assert(!heap->incremental_collection_failed(), "Should be clear");
} else {
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
@@ -686,14 +685,14 @@
// and from-space.
swap_spaces(); // For uniformity wrt ParNewGeneration.
from()->set_next_compaction_space(to());
- gch->set_incremental_collection_failed();
+ heap->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_old_gen->promotion_failure_occurred();
gc_tracer.report_promotion_failed(_promotion_failed_info);
// Reset the PromotionFailureALot counters.
- NOT_PRODUCT(gch->reset_promotion_should_fail();)
+ NOT_PRODUCT(heap->reset_promotion_should_fail();)
}
// We should have processed and cleared all the preserved marks.
_preserved_marks_set.reclaim();
@@ -707,7 +706,7 @@
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
update_time_of_last_gc(now);
- gch->trace_heap_after_gc(&gc_tracer);
+ heap->trace_heap_after_gc(&gc_tracer);
_gc_timer->register_gc_end();
--- a/src/hotspot/share/gc/serial/defNewGeneration.hpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp Mon May 07 14:42:05 2018 +0200
@@ -39,6 +39,7 @@
class STWGCTimer;
class CSpaceCounters;
class ScanWeakRefClosure;
+class SerialHeap;
// DefNewGeneration is a young generation containing eden, from- and
// to-space.
@@ -179,12 +180,11 @@
};
class FastEvacuateFollowersClosure: public VoidClosure {
- GenCollectedHeap* _gch;
- DefNewGeneration* _young_gen;
+ SerialHeap* _heap;
FastScanClosure* _scan_cur_or_nonheap;
FastScanClosure* _scan_older;
public:
- FastEvacuateFollowersClosure(GenCollectedHeap* gch,
+ FastEvacuateFollowersClosure(SerialHeap* heap,
FastScanClosure* cur,
FastScanClosure* older);
void do_void();
--- a/src/hotspot/share/gc/serial/serialHeap.cpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/serial/serialHeap.cpp Mon May 07 14:42:05 2018 +0200
@@ -25,9 +25,17 @@
#include "precompiled.hpp"
#include "gc/serial/defNewGeneration.hpp"
#include "gc/serial/serialHeap.hpp"
+#include "gc/serial/tenuredGeneration.hpp"
#include "gc/shared/genMemoryPools.hpp"
#include "services/memoryManager.hpp"
+SerialHeap* SerialHeap::heap() {
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to SerialHeap::heap()");
+ assert(heap->kind() == CollectedHeap::Serial, "Invalid name");
+ return static_cast<SerialHeap*>(heap);
+}
+
SerialHeap::SerialHeap(GenCollectorPolicy* policy) :
GenCollectedHeap(policy,
Generation::DefNew,
@@ -42,7 +50,7 @@
void SerialHeap::initialize_serviceability() {
- DefNewGeneration* young = (DefNewGeneration*) young_gen();
+ DefNewGeneration* young = young_gen();
// Add a memory pool for each space and young gen doesn't
// support low memory detection as it is expected to get filled up.
@@ -54,7 +62,7 @@
"Survivor Space",
young->max_survivor_size(),
false /* support_usage_threshold */);
- Generation* old = old_gen();
+ TenuredGeneration* old = old_gen();
_old_pool = new GenerationPool(old, "Tenured Gen", true);
_young_manager->add_pool(_eden_pool);
@@ -68,13 +76,6 @@
}
-void SerialHeap::check_gen_kinds() {
- assert(young_gen()->kind() == Generation::DefNew,
- "Wrong youngest generation type");
- assert(old_gen()->kind() == Generation::MarkSweepCompact,
- "Wrong generation kind");
-}
-
GrowableArray<GCMemoryManager*> SerialHeap::memory_managers() {
GrowableArray<GCMemoryManager*> memory_managers(2);
memory_managers.append(_young_manager);
--- a/src/hotspot/share/gc/serial/serialHeap.hpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp Mon May 07 14:42:05 2018 +0200
@@ -25,12 +25,15 @@
#ifndef SHARE_VM_GC_SERIAL_SERIALHEAP_HPP
#define SHARE_VM_GC_SERIAL_SERIALHEAP_HPP
+#include "gc/serial/defNewGeneration.hpp"
+#include "gc/serial/tenuredGeneration.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "utilities/growableArray.hpp"
class GenCollectorPolicy;
class GCMemoryManager;
class MemoryPool;
+class TenuredGeneration;
class SerialHeap : public GenCollectedHeap {
private:
@@ -40,10 +43,9 @@
virtual void initialize_serviceability();
-protected:
- virtual void check_gen_kinds();
+public:
+ static SerialHeap* heap();
-public:
SerialHeap(GenCollectorPolicy* policy);
virtual Name kind() const {
@@ -61,6 +63,16 @@
virtual bool is_in_closed_subset(const void* p) const {
return is_in(p);
}
+
+ DefNewGeneration* young_gen() const {
+ assert(_young_gen->kind() == Generation::DefNew, "Wrong generation type");
+ return static_cast<DefNewGeneration*>(_young_gen);
+ }
+
+ TenuredGeneration* old_gen() const {
+ assert(_old_gen->kind() == Generation::MarkSweepCompact, "Wrong generation type");
+ return static_cast<TenuredGeneration*>(_old_gen);
+ }
};
#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Mon May 07 14:42:05 2018 +0200
@@ -176,7 +176,7 @@
void GenCollectedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
- check_gen_kinds();
+
DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
initialize_size_policy(def_new_gen->eden()->capacity(),
@@ -249,7 +249,7 @@
// was a full collection because a partial collection (would
// have) failed and is likely to fail again
bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
- size_t young_capacity = young_gen()->capacity_before_gc();
+ size_t young_capacity = _young_gen->capacity_before_gc();
return (word_size > heap_word_size(young_capacity))
|| GCLocker::is_active_and_needs_gc()
|| incremental_collection_failed();
@@ -257,12 +257,12 @@
HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
HeapWord* result = NULL;
- if (old_gen()->should_allocate(size, is_tlab)) {
- result = old_gen()->expand_and_allocate(size, is_tlab);
+ if (_old_gen->should_allocate(size, is_tlab)) {
+ result = _old_gen->expand_and_allocate(size, is_tlab);
}
if (result == NULL) {
- if (young_gen()->should_allocate(size, is_tlab)) {
- result = young_gen()->expand_and_allocate(size, is_tlab);
+ if (_young_gen->should_allocate(size, is_tlab)) {
+ result = _young_gen->expand_and_allocate(size, is_tlab);
}
}
assert(result == NULL || is_in_reserved(result), "result not in heap");
@@ -287,7 +287,7 @@
HandleMark hm; // Discard any handles allocated in each iteration.
// First allocation attempt is lock-free.
- Generation *young = young_gen();
+ Generation *young = _young_gen;
assert(young->supports_inline_contig_alloc(),
"Otherwise, must do alloc within heap lock");
if (young->should_allocate(size, is_tlab)) {
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp Mon May 07 14:42:04 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp Mon May 07 14:42:05 2018 +0200
@@ -63,10 +63,11 @@
OldGen
};
-private:
+protected:
Generation* _young_gen;
Generation* _old_gen;
+private:
GenerationSpec* _young_gen_spec;
GenerationSpec* _old_gen_spec;
@@ -161,8 +162,6 @@
Generation::Name old,
const char* policy_counters_name);
- virtual void check_gen_kinds() = 0;
-
public:
// Returns JNI_OK on success