--- a/src/hotspot/share/gc/cms/cmsArguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/cmsArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "gc/cms/cmsArguments.hpp"
-#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/shared/gcArguments.inline.hpp"
+#include "gc/shared/cardTableRS.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/arguments.hpp"
@@ -36,10 +36,6 @@
#include "runtime/globals_extension.hpp"
#include "utilities/defaultStream.hpp"
-size_t CMSArguments::conservative_max_heap_alignment() {
- return GenCollectedHeap::conservative_max_heap_alignment();
-}
-
void CMSArguments::set_parnew_gc_flags() {
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
"control point invariant");
@@ -154,12 +150,12 @@
// Code along this path potentially sets NewSize and OldSize
log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size: " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
- Arguments::min_heap_size(), InitialHeapSize, max_heap);
+ MinHeapSize, InitialHeapSize, max_heap);
size_t min_new = preferred_max_new_size;
if (FLAG_IS_CMDLINE(NewSize)) {
min_new = NewSize;
}
- if (max_heap > min_new && Arguments::min_heap_size() > min_new) {
+ if (max_heap > min_new && MinHeapSize > min_new) {
// Unless explicitly requested otherwise, make young gen
// at least min_new, and at most preferred_max_new_size.
if (FLAG_IS_DEFAULT(NewSize)) {
@@ -225,5 +221,5 @@
}
CollectedHeap* CMSArguments::create_heap() {
- return create_heap_with_policy<CMSHeap, ConcurrentMarkSweepPolicy>();
+ return new CMSHeap();
}
--- a/src/hotspot/share/gc/cms/cmsArguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/cmsArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -26,16 +26,16 @@
#define SHARE_GC_CMS_CMSARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
+#include "gc/shared/genArguments.hpp"
class CollectedHeap;
-class CMSArguments : public GCArguments {
+class CMSArguments : public GenArguments {
private:
void disable_adaptive_size_policy(const char* collector_name);
void set_parnew_gc_flags();
-public:
+
virtual void initialize();
- virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};
--- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsCollectorPolicy.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcPolicyCounters.hpp"
-#include "gc/shared/gcVMOperations.hpp"
-#include "gc/shared/generationSpec.hpp"
-#include "gc/shared/space.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/vmThread.hpp"
-
-//
-// ConcurrentMarkSweepPolicy methods
-//
-
-void ConcurrentMarkSweepPolicy::initialize_alignments() {
- _space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
- _heap_alignment = compute_heap_alignment();
-}
--- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSCOLLECTORPOLICY_HPP
-#define SHARE_GC_CMS_CMSCOLLECTORPOLICY_HPP
-
-#include "gc/shared/collectorPolicy.hpp"
-
-class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
- protected:
- void initialize_alignments();
-
- public:
- ConcurrentMarkSweepPolicy() {}
-};
-
-#endif // SHARE_GC_CMS_CMSCOLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -65,9 +65,8 @@
}
};
-CMSHeap::CMSHeap(GenCollectorPolicy *policy) :
- GenCollectedHeap(policy,
- Generation::ParNew,
+CMSHeap::CMSHeap() :
+ GenCollectedHeap(Generation::ParNew,
Generation::ConcurrentMarkSweep,
"ParNew:CMS"),
_workers(NULL),
@@ -162,9 +161,7 @@
assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
"Unexpected generation kinds");
CMSCollector* collector =
- new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
- rem_set(),
- (ConcurrentMarkSweepPolicy*) gen_policy());
+ new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), rem_set());
if (collector == NULL || !collector->completed_initialization()) {
if (collector) {
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -34,7 +34,6 @@
#include "utilities/growableArray.hpp"
class CLDClosure;
-class GenCollectorPolicy;
class GCMemoryManager;
class MemoryPool;
class OopsInGenClosure;
@@ -45,7 +44,7 @@
class CMSHeap : public GenCollectedHeap {
public:
- CMSHeap(GenCollectorPolicy *policy);
+ CMSHeap();
// Returns JNI_OK on success
virtual jint initialize();
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -26,7 +26,6 @@
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
-#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/cmsGCStats.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/cmsOopClosures.inline.hpp"
@@ -43,7 +42,6 @@
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "gc/shared/gcTimer.hpp"
@@ -207,7 +205,11 @@
};
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
- ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
+ ReservedSpace rs,
+ size_t initial_byte_size,
+ size_t min_byte_size,
+ size_t max_byte_size,
+ CardTableRS* ct) :
CardGeneration(rs, initial_byte_size, ct),
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_did_compact(false)
@@ -258,6 +260,8 @@
// note that all arithmetic is in units of HeapWords.
assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
assert(_dilatation_factor >= 1.0, "from previous assert");
+
+ initialize_performance_counters(min_byte_size, max_byte_size);
}
@@ -314,13 +318,13 @@
return CMSHeap::heap()->size_policy();
}
-void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
+void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size,
+ size_t max_old_size) {
const char* gen_name = "old";
- GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
// Generation Counters - generation 1, 1 subspace
_gen_counters = new GenerationCounters(gen_name, 1, 1,
- gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
+ min_old_size, max_old_size, &_virtual_space);
_space_counters = new GSpaceCounters(gen_name, 0,
_virtual_space.reserved_size(),
@@ -449,8 +453,7 @@
bool CMSCollector::_foregroundGCShouldWait = false;
CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
- CardTableRS* ct,
- ConcurrentMarkSweepPolicy* cp):
+ CardTableRS* ct):
_overflow_list(NULL),
_conc_workers(NULL), // may be set later
_completed_initialization(false),
@@ -460,7 +463,6 @@
_roots_scanning_options(GenCollectedHeap::SO_None),
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_verifying(false),
- _collector_policy(cp),
_inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -585,10 +585,6 @@
bool verifying() const { return _verifying; }
void set_verifying(bool v) { _verifying = v; }
- // Collector policy
- ConcurrentMarkSweepPolicy* _collector_policy;
- ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
-
void set_did_compact(bool v);
// XXX Move these to CMSStats ??? FIX ME !!!
@@ -833,8 +829,7 @@
void setup_cms_unloading_and_verification_state();
public:
CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
- CardTableRS* ct,
- ConcurrentMarkSweepPolicy* cp);
+ CardTableRS* ct);
ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
MemRegion ref_processor_span() const { return _span_based_discoverer.span(); }
@@ -1075,7 +1070,11 @@
void assert_correct_size_change_locking();
public:
- ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct);
+ ConcurrentMarkSweepGeneration(ReservedSpace rs,
+ size_t initial_byte_size,
+ size_t min_byte_size,
+ size_t max_byte_size,
+ CardTableRS* ct);
// Accessors
CMSCollector* collector() const { return _collector; }
@@ -1212,7 +1211,7 @@
// Performance Counters support
virtual void update_counters();
virtual void update_counters(size_t used);
- void initialize_performance_counters();
+ void initialize_performance_counters(size_t min_old_size, size_t max_old_size);
CollectorCounters* counters() { return collector()->counters(); }
// Support for parallel remark of survivor space
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -622,8 +622,11 @@
_old_gen->par_oop_since_save_marks_iterate_done((int) worker_id);
}
-ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
- : DefNewGeneration(rs, initial_byte_size, "CMS young collection pauses"),
+ParNewGeneration::ParNewGeneration(ReservedSpace rs,
+ size_t initial_byte_size,
+ size_t min_byte_size,
+ size_t max_byte_size)
+ : DefNewGeneration(rs, initial_byte_size, min_byte_size, max_byte_size, "CMS young collection pauses"),
_plab_stats("Young", YoungPLABSize, PLABWeight),
_overflow_list(NULL),
_is_alive_closure(this)
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -348,7 +348,10 @@
void restore_preserved_marks();
public:
- ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
+ ParNewGeneration(ReservedSpace rs,
+ size_t initial_byte_size,
+ size_t min_byte_size,
+ size_t max_byte_size);
~ParNewGeneration() {
for (uint i = 0; i < ParallelGCThreads; i++)
--- a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,8 +25,7 @@
#include "precompiled.hpp"
#include "gc/epsilon/epsilonArguments.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
-#include "gc/epsilon/epsilonCollectorPolicy.hpp"
-#include "gc/shared/gcArguments.inline.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
@@ -67,6 +66,13 @@
#endif
}
+void EpsilonArguments::initialize_alignments() {
+ size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+ size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
+ SpaceAlignment = align;
+ HeapAlignment = align;
+}
+
CollectedHeap* EpsilonArguments::create_heap() {
- return create_heap_with_policy<EpsilonHeap, EpsilonCollectorPolicy>();
+ return new EpsilonHeap();
}
--- a/src/hotspot/share/gc/epsilon/epsilonArguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -30,7 +30,9 @@
class CollectedHeap;
class EpsilonArguments : public GCArguments {
-public:
+private:
+ virtual void initialize_alignments();
+
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
--- a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,7 +25,6 @@
#include "runtime/thread.hpp"
#include "gc/epsilon/epsilonBarrierSet.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "utilities/macros.hpp"
--- a/src/hotspot/share/gc/epsilon/epsilonCollectorPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_EPSILON_EPSILONCOLLECTORPOLICY_HPP
-#define SHARE_GC_EPSILON_EPSILONCOLLECTORPOLICY_HPP
-
-#include "gc/shared/collectorPolicy.hpp"
-
-class EpsilonCollectorPolicy: public CollectorPolicy {
-protected:
- virtual void initialize_alignments() {
- size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
- size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
- _space_alignment = align;
- _heap_alignment = align;
- }
-
-public:
- EpsilonCollectorPolicy() : CollectorPolicy() {};
-};
-
-#endif // SHARE_GC_EPSILON_EPSILONCOLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,14 +25,16 @@
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonMemoryPool.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/globals.hpp"
jint EpsilonHeap::initialize() {
- size_t align = _policy->heap_alignment();
- size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
- size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
+ size_t align = HeapAlignment;
+ size_t init_byte_size = align_up(InitialHeapSize, align);
+ size_t max_byte_size = align_up(MaxHeapSize, align);
// Initialize backing storage
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -28,7 +28,6 @@
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/space.hpp"
#include "services/memoryManager.hpp"
-#include "gc/epsilon/epsilonCollectorPolicy.hpp"
#include "gc/epsilon/epsilonMonitoringSupport.hpp"
#include "gc/epsilon/epsilonBarrierSet.hpp"
#include "gc/epsilon/epsilon_globals.hpp"
@@ -36,7 +35,6 @@
class EpsilonHeap : public CollectedHeap {
friend class VMStructs;
private:
- EpsilonCollectorPolicy* _policy;
SoftRefPolicy _soft_ref_policy;
EpsilonMonitoringSupport* _monitoring_support;
MemoryPool* _pool;
@@ -53,8 +51,7 @@
public:
static EpsilonHeap* heap();
- EpsilonHeap(EpsilonCollectorPolicy* p) :
- _policy(p),
+ EpsilonHeap() :
_memory_manager("Epsilon Heap", "") {};
virtual Name kind() const {
@@ -65,10 +62,6 @@
return "Epsilon";
}
- virtual CollectorPolicy* collector_policy() const {
- return _policy;
- }
-
virtual SoftRefPolicy* soft_ref_policy() {
return &_soft_ref_policy;
}
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -26,15 +26,37 @@
#include "precompiled.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
-#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
#include "gc/g1/heapRegion.hpp"
-#include "gc/shared/gcArguments.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/shared/cardTableRS.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
+static const double MaxRamFractionForYoung = 0.8;
+size_t G1Arguments::MaxMemoryForYoung;
+
+static size_t calculate_heap_alignment(size_t space_alignment) {
+ size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
+ size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+ return MAX3(card_table_alignment, space_alignment, page_size);
+}
+
+void G1Arguments::initialize_alignments() {
+ // Set up the region size and associated fields.
+ //
+ // There is a circular dependency here. We base the region size on the heap
+ // size, but the heap size should be aligned with the region size. To get
+ // around this we use the unaligned values for the heap.
+ HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
+ HeapRegionRemSet::setup_remset_size();
+
+ SpaceAlignment = HeapRegion::GrainBytes;
+ HeapAlignment = calculate_heap_alignment(SpaceAlignment);
+}
+
size_t G1Arguments::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
@@ -156,10 +178,81 @@
initialize_verification_types();
}
-CollectedHeap* G1Arguments::create_heap() {
- if (AllocateOldGenAt != NULL) {
- return create_heap_with_policy<G1CollectedHeap, G1HeterogeneousCollectorPolicy>();
+static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
+ julong phys_mem;
+ // If MaxRam is specified, we use that as maximum physical memory available.
+ if (FLAG_IS_DEFAULT(MaxRAM)) {
+ phys_mem = os::physical_memory();
+ calc_str.append("Physical_Memory");
} else {
- return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
+ phys_mem = (julong)MaxRAM;
+ calc_str.append("MaxRAM");
+ }
+
+ julong reasonable_max = phys_mem;
+
+ // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
+ // reasonable max size of young generation.
+ if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
+ reasonable_max = (julong)(phys_mem / MaxRAMFraction);
+ calc_str.append(" / MaxRAMFraction");
+ } else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
+ reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+ calc_str.append(" * MaxRAMPercentage / 100");
+ } else {
+ // We use our own fraction to calculate max size of young generation.
+ reasonable_max = phys_mem * max_ram_fraction_for_young;
+ calc_str.append(" * %0.2f", max_ram_fraction_for_young);
+ }
+
+ return (size_t)reasonable_max;
+}
+
+void G1Arguments::initialize_heap_flags_and_sizes() {
+ if (AllocateOldGenAt != NULL) {
+ initialize_heterogeneous();
}
+
+ GCArguments::initialize_heap_flags_and_sizes();
}
+
+void G1Arguments::initialize_heterogeneous() {
+ FormatBuffer<100> calc_str("");
+
+ MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
+
+ if (MaxNewSize > MaxMemoryForYoung) {
+ if (FLAG_IS_CMDLINE(MaxNewSize)) {
+ log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+ MaxMemoryForYoung, calc_str.buffer());
+ } else {
+ log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
+ "Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
+ }
+ MaxNewSize = MaxMemoryForYoung;
+ }
+ if (NewSize > MaxMemoryForYoung) {
+ if (FLAG_IS_CMDLINE(NewSize)) {
+ log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+ MaxMemoryForYoung, calc_str.buffer());
+ }
+ NewSize = MaxMemoryForYoung;
+ }
+
+}
+
+CollectedHeap* G1Arguments::create_heap() {
+ return new G1CollectedHeap();
+}
+
+bool G1Arguments::is_heterogeneous_heap() {
+ return AllocateOldGenAt != NULL;
+}
+
+size_t G1Arguments::reasonable_max_memory_for_young() {
+ return MaxMemoryForYoung;
+}
+
+size_t G1Arguments::heap_reserved_size_bytes() {
+ return (is_heterogeneous_heap() ? 2 : 1) * MaxHeapSize;
+}
--- a/src/hotspot/share/gc/g1/g1Arguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1Arguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -34,13 +34,25 @@
friend class G1HeapVerifierTest;
private:
+ static size_t MaxMemoryForYoung;
+
static void initialize_verification_types();
static void parse_verification_type(const char* type);
-public:
+ virtual void initialize_alignments();
+ virtual void initialize_heap_flags_and_sizes();
+
+ void initialize_heterogeneous();
+
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
+
+public:
+ // Heterogeneous heap support
+ static bool is_heterogeneous_heap();
+ static size_t reasonable_max_memory_for_young();
+ static size_t heap_reserved_size_bytes();
};
#endif // SHARE_GC_G1_G1ARGUMENTS_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -29,10 +29,10 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
+#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
@@ -1177,9 +1177,6 @@
const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
- const size_t min_heap_size = collector_policy()->min_heap_byte_size();
- const size_t max_heap_size = collector_policy()->max_heap_byte_size();
-
// We have to be careful here as these two calculations can overflow
// 32-bit size_t's.
double used_after_gc_d = (double) used_after_gc;
@@ -1188,7 +1185,7 @@
// Let's make sure that they are both under the max heap size, which
// by default will make them fit into a size_t.
- double desired_capacity_upper_bound = (double) max_heap_size;
+ double desired_capacity_upper_bound = (double) MaxHeapSize;
minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
desired_capacity_upper_bound);
maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
@@ -1208,11 +1205,11 @@
// Should not be greater than the heap max size. No need to adjust
// it with respect to the heap min size as it's a lower bound (i.e.,
// we'll try to make the capacity larger than it, not smaller).
- minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
+ minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
// Should not be less than the heap min size. No need to adjust it
// with respect to the heap max size as it's an upper bound (i.e.,
// we'll try to make the capacity smaller than it, not greater).
- maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
+ maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
if (capacity_after_gc < minimum_desired_capacity) {
// Don't expand unless it's significant
@@ -1484,11 +1481,10 @@
const char* get_description() { return "Humongous Regions"; }
};
-G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
+G1CollectedHeap::G1CollectedHeap() :
CollectedHeap(),
_young_gen_sampling_thread(NULL),
_workers(NULL),
- _collector_policy(collector_policy),
_card_table(NULL),
_soft_ref_policy(),
_old_set("Old Region Set", new OldRegionSetChecker()),
@@ -1515,7 +1511,7 @@
_survivor(),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
- _policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
+ _policy(G1Policy::create_policy(_gc_timer_stw)),
_heap_sizing_policy(NULL),
_collection_set(this, _policy),
_hot_card_cache(NULL),
@@ -1644,14 +1640,13 @@
// HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
- size_t init_byte_size = collector_policy()->initial_heap_byte_size();
- size_t max_byte_size = _collector_policy->heap_reserved_size_bytes();
- size_t heap_alignment = collector_policy()->heap_alignment();
+ size_t init_byte_size = InitialHeapSize;
+ size_t reserved_byte_size = G1Arguments::heap_reserved_size_bytes();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
- Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
- Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
+ Universe::check_alignment(reserved_byte_size, HeapRegion::GrainBytes, "g1 heap");
+ Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap");
// Reserve the maximum.
@@ -1666,8 +1661,8 @@
// If this happens then we could end up using a non-optimal
// compressed oops mode.
- ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
- heap_alignment);
+ ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
+ HeapAlignment);
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
@@ -1699,7 +1694,7 @@
_hot_card_cache = new G1HotCardCache(this);
// Carve out the G1 part of the heap.
- ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
+ ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
size_t page_size = actual_reserved_page_size(heap_rs);
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
@@ -1714,8 +1709,8 @@
}
os::trace_page_sizes("Heap",
- collector_policy()->min_heap_byte_size(),
- max_byte_size,
+ MinHeapSize,
+ reserved_byte_size,
page_size,
heap_rs.base(),
heap_rs.size());
@@ -1743,7 +1738,7 @@
G1RegionToSpaceMapper* next_bitmap_storage =
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
- _hrm = HeapRegionManager::create_manager(this, _collector_policy);
+ _hrm = HeapRegionManager::create_manager(this);
_hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_card_table->initialize(cardtable_storage);
@@ -1870,10 +1865,6 @@
SuspendibleThreadSet::desynchronize();
}
-size_t G1CollectedHeap::conservative_max_heap_alignment() {
- return HeapRegion::max_region_size();
-}
-
void G1CollectedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
@@ -1940,10 +1931,6 @@
true); // allow changes to number of processing threads
}
-CollectorPolicy* G1CollectedHeap::collector_policy() const {
- return _collector_policy;
-}
-
SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
return &_soft_ref_policy;
}
@@ -2333,7 +2320,7 @@
}
bool G1CollectedHeap::is_heterogeneous_heap() const {
- return _collector_policy->is_heterogeneous_heap();
+ return G1Arguments::is_heterogeneous_heap();
}
class PrintRegionClosure: public HeapRegionClosure {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -74,7 +74,6 @@
class CompactibleSpaceClosure;
class Space;
class G1CollectionSet;
-class G1CollectorPolicy;
class G1Policy;
class G1HotCardCache;
class G1RemSet;
@@ -156,7 +155,6 @@
G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
WorkGang* _workers;
- G1CollectorPolicy* _collector_policy;
G1CardTable* _card_table;
SoftRefPolicy _soft_ref_policy;
@@ -926,10 +924,10 @@
// A set of cards where updates happened during the GC
G1DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
- // Create a G1CollectedHeap with the specified policy.
+ // Create a G1CollectedHeap.
// Must call the initialize method afterwards.
// May not return if something goes wrong.
- G1CollectedHeap(G1CollectorPolicy* policy);
+ G1CollectedHeap();
private:
jint initialize_concurrent_refinement();
@@ -944,9 +942,6 @@
virtual void safepoint_synchronize_begin();
virtual void safepoint_synchronize_end();
- // Return the (conservative) maximum heap alignment for any G1 heap
- static size_t conservative_max_heap_alignment();
-
// Does operations required after initialization has been done.
void post_initialize();
@@ -976,8 +971,6 @@
const G1CollectionSet* collection_set() const { return &_collection_set; }
G1CollectionSet* collection_set() { return &_collection_set; }
- virtual CollectorPolicy* collector_policy() const;
-
virtual SoftRefPolicy* soft_ref_policy();
virtual void initialize_serviceability();
@@ -1008,6 +1001,7 @@
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
size_t unused_committed_regions_in_bytes() const;
+
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1Analytics.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
-#include "gc/g1/g1YoungGenSizer.hpp"
-#include "gc/g1/heapRegion.hpp"
-#include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/shared/gcPolicyCounters.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/debug.hpp"
-
-G1CollectorPolicy::G1CollectorPolicy() {
-
- // Set up the region size and associated fields. Given that the
- // policy is created before the heap, we have to set this up here,
- // so it's done as soon as possible.
-
- // It would have been natural to pass initial_heap_byte_size() and
- // max_heap_byte_size() to setup_heap_region_size() but those have
- // not been set up at this point since they should be aligned with
- // the region size. So, there is a circular dependency here. We base
- // the region size on the heap size, but the heap size should be
- // aligned with the region size. To get around this we use the
- // unaligned values for the heap.
- HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
- HeapRegionRemSet::setup_remset_size();
-}
-
-void G1CollectorPolicy::initialize_alignments() {
- _space_alignment = HeapRegion::GrainBytes;
- size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
- size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
- _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
-}
-
-size_t G1CollectorPolicy::heap_reserved_size_bytes() const {
- return _max_heap_byte_size;
-}
-
-bool G1CollectorPolicy::is_heterogeneous_heap() const {
- return false;
-}
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_G1_G1COLLECTORPOLICY_HPP
-#define SHARE_GC_G1_G1COLLECTORPOLICY_HPP
-
-#include "gc/shared/collectorPolicy.hpp"
-
-// G1CollectorPolicy is primarily used during initialization and to expose the
-// functionality of the CollectorPolicy interface to the rest of the VM.
-
-class G1YoungGenSizer;
-
-class G1CollectorPolicy: public CollectorPolicy {
-protected:
- void initialize_alignments();
-
-public:
- G1CollectorPolicy();
- virtual size_t heap_reserved_size_bytes() const;
- virtual bool is_heterogeneous_heap() const;
-};
-#endif // SHARE_GC_G1_G1COLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1FullGCAdjustTask.hpp"
#include "gc/g1/g1FullGCCompactTask.hpp"
--- a/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/os.hpp"
-#include "utilities/formatBuffer.hpp"
-
-const double G1HeterogeneousCollectorPolicy::MaxRamFractionForYoung = 0.8;
-size_t G1HeterogeneousCollectorPolicy::MaxMemoryForYoung;
-
-static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
- julong phys_mem;
- // If MaxRam is specified, we use that as maximum physical memory available.
- if (FLAG_IS_DEFAULT(MaxRAM)) {
- phys_mem = os::physical_memory();
- calc_str.append("Physical_Memory");
- } else {
- phys_mem = (julong)MaxRAM;
- calc_str.append("MaxRAM");
- }
-
- julong reasonable_max = phys_mem;
-
- // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
- // reasonable max size of young generation.
- if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
- reasonable_max = (julong)(phys_mem / MaxRAMFraction);
- calc_str.append(" / MaxRAMFraction");
- } else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
- reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
- calc_str.append(" * MaxRAMPercentage / 100");
- } else {
- // We use our own fraction to calculate max size of young generation.
- reasonable_max = phys_mem * max_ram_fraction_for_young;
- calc_str.append(" * %0.2f", max_ram_fraction_for_young);
- }
-
- return (size_t)reasonable_max;
-}
-
-void G1HeterogeneousCollectorPolicy::initialize_flags() {
-
- FormatBuffer<100> calc_str("");
-
- MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
-
- if (MaxNewSize > MaxMemoryForYoung) {
- if (FLAG_IS_CMDLINE(MaxNewSize)) {
- log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
- MaxMemoryForYoung, calc_str.buffer());
- } else {
- log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
- "Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
- }
- MaxNewSize = MaxMemoryForYoung;
- }
- if (NewSize > MaxMemoryForYoung) {
- if (FLAG_IS_CMDLINE(NewSize)) {
- log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
- MaxMemoryForYoung, calc_str.buffer());
- }
- NewSize = MaxMemoryForYoung;
- }
-
- // After setting new size flags, call base class initialize_flags()
- G1CollectorPolicy::initialize_flags();
-}
-
-size_t G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() {
- return MaxMemoryForYoung;
-}
-
-size_t G1HeterogeneousCollectorPolicy::heap_reserved_size_bytes() const {
- return 2 * _max_heap_byte_size;
-}
-
-bool G1HeterogeneousCollectorPolicy::is_heterogeneous_heap() const {
- return true;
-}
--- a/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
-#define SHARE_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
-
-#include "gc/g1/g1CollectorPolicy.hpp"
-#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
-
-class G1HeterogeneousCollectorPolicy : public G1CollectorPolicy {
-private:
- // Max fraction of dram to use for young generation when MaxRAMFraction and
- // MaxRAMPercentage are not specified on commandline.
- static const double MaxRamFractionForYoung;
- static size_t MaxMemoryForYoung;
-
-protected:
- virtual void initialize_flags();
-
-public:
- G1HeterogeneousCollectorPolicy() {}
- virtual size_t heap_reserved_size_bytes() const;
- virtual bool is_heterogeneous_heap() const;
- static size_t reasonable_max_memory_for_young();
-};
-
-#endif // SHARE_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -28,8 +28,8 @@
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
-G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
- G1Policy(policy, gc_timer), _manager(NULL) {}
+G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(STWGCTimer* gc_timer) :
+ G1Policy(gc_timer), _manager(NULL) {}
// We call the super class init(), after which we provision young_list_target_length() regions in dram.
void G1HeterogeneousHeapPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
--- a/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
#define SHARE_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
-#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
@@ -34,7 +33,7 @@
HeterogeneousHeapRegionManager* _manager;
public:
- G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
+ G1HeterogeneousHeapPolicy(STWGCTimer* gc_timer);
// initialize policy
virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
--- a/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
+#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
#include "gc/g1/heapRegion.hpp"
G1HeterogeneousHeapYoungGenSizer::G1HeterogeneousHeapYoungGenSizer() : G1YoungGenSizer() {
// will be used later when min and max young size is calculated.
- _max_young_length = (uint)(G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
+ _max_young_length = (uint)(G1Arguments::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
}
// Since heap is sized potentially to larger value accounting for dram + nvdimm, we need to limit
--- a/src/hotspot/share/gc/g1/g1Policy.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1Analytics.hpp"
+#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectionSetCandidates.hpp"
@@ -49,7 +50,7 @@
#include "utilities/growableArray.hpp"
#include "utilities/pair.hpp"
-G1Policy::G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
+G1Policy::G1Policy(STWGCTimer* gc_timer) :
_predictor(G1ConfidencePercent / 100.0),
_analytics(new G1Analytics(&_predictor)),
_remset_tracker(),
@@ -65,7 +66,7 @@
_survivor_surv_rate_group(new SurvRateGroup()),
_reserve_factor((double) G1ReservePercent / 100.0),
_reserve_regions(0),
- _young_gen_sizer(G1YoungGenSizer::create_gen_sizer(policy)),
+ _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
_free_regions_at_end_of_collection(0),
_max_rs_lengths(0),
_rs_lengths_prediction(0),
@@ -89,11 +90,11 @@
delete _young_gen_sizer;
}
-G1Policy* G1Policy::create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw) {
- if (policy->is_heterogeneous_heap()) {
- return new G1HeterogeneousHeapPolicy(policy, gc_timer_stw);
+G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
+ if (G1Arguments::is_heterogeneous_heap()) {
+ return new G1HeterogeneousHeapPolicy(gc_timer_stw);
} else {
- return new G1Policy(policy, gc_timer_stw);
+ return new G1Policy(gc_timer_stw);
}
}
--- a/src/hotspot/share/gc/g1/g1Policy.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1POLICY_HPP
#define SHARE_GC_G1_G1POLICY_HPP
-#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1InCSetState.hpp"
@@ -284,11 +283,11 @@
void abort_time_to_mixed_tracking();
public:
- G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
+ G1Policy(STWGCTimer* gc_timer);
virtual ~G1Policy();
- static G1Policy* create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw);
+ static G1Policy* create_policy(STWGCTimer* gc_timer_stw);
G1CollectorState* collector_state() const;
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
#include "gc/g1/g1YoungGenSizer.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -130,8 +130,8 @@
&_max_desired_young_length);
}
-G1YoungGenSizer* G1YoungGenSizer::create_gen_sizer(G1CollectorPolicy* policy) {
- if (policy->is_heterogeneous_heap()) {
+G1YoungGenSizer* G1YoungGenSizer::create_gen_sizer() {
+ if (G1Arguments::is_heterogeneous_heap()) {
return new G1HeterogeneousHeapYoungGenSizer();
} else {
return new G1YoungGenSizer();
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1YOUNGGENSIZER_HPP
#define SHARE_GC_G1_G1YOUNGGENSIZER_HPP
-#include "gc/g1/g1CollectorPolicy.hpp"
#include "utilities/globalDefinitions.hpp"
// There are three command line options related to the young gen size:
@@ -108,7 +107,7 @@
return _use_adaptive_sizing;
}
- static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
+ static G1YoungGenSizer* create_gen_sizer();
};
#endif // SHARE_GC_G1_G1YOUNGGENSIZER_HPP
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
+#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "memory/allocation.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -68,9 +68,9 @@
_free_list("Free list", new MasterFreeRegionListChecker())
{ }
-HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy) {
- if (policy->is_heterogeneous_heap()) {
- return new HeterogeneousHeapRegionManager((uint)(policy->max_heap_byte_size() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
+HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap) {
+ if (G1Arguments::is_heterogeneous_heap()) {
+ return new HeterogeneousHeapRegionManager((uint)(G1Arguments::heap_reserved_size_bytes() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
}
return new HeapRegionManager();
}
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -26,10 +26,8 @@
#define SHARE_GC_G1_HEAPREGIONMANAGER_HPP
#include "gc/g1/g1BiasedArray.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/heapRegionSet.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "services/memoryUsage.hpp"
class HeapRegion;
@@ -129,7 +127,7 @@
// Empty constructor, we'll initialize it with the initialize() method.
HeapRegionManager();
- static HeapRegionManager* create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy);
+ static HeapRegionManager* create_manager(G1CollectedHeap* heap);
virtual void initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
--- a/src/hotspot/share/gc/parallel/adjoiningGenerations.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerations.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -26,8 +26,9 @@
#include "gc/parallel/adjoiningGenerations.hpp"
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
-#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
+#include "gc/parallel/parallelArguments.hpp"
+#include "gc/shared/genArguments.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
@@ -38,17 +39,15 @@
// gen with ASPSYoungGen and ASPSOldGen, respectively. Revert to
// the old behavior otherwise (with PSYoungGen and PSOldGen).
-AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
- GenerationSizer* policy,
- size_t alignment) :
- _virtual_spaces(new AdjoiningVirtualSpaces(old_young_rs, policy->min_old_size(),
- policy->min_young_size(), alignment)) {
- size_t init_low_byte_size = policy->initial_old_size();
- size_t min_low_byte_size = policy->min_old_size();
- size_t max_low_byte_size = policy->max_old_size();
- size_t init_high_byte_size = policy->initial_young_size();
- size_t min_high_byte_size = policy->min_young_size();
- size_t max_high_byte_size = policy->max_young_size();
+AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs) :
+ _virtual_spaces(new AdjoiningVirtualSpaces(old_young_rs, MinOldSize,
+ MinNewSize, GenAlignment)) {
+ size_t init_low_byte_size = OldSize;
+ size_t min_low_byte_size = MinOldSize;
+ size_t max_low_byte_size = MaxOldSize;
+ size_t init_high_byte_size = NewSize;
+ size_t min_high_byte_size = MinNewSize;
+ size_t max_high_byte_size = MaxNewSize;
assert(min_low_byte_size <= init_low_byte_size &&
init_low_byte_size <= max_low_byte_size, "Parameter check");
@@ -95,7 +94,7 @@
// Layout the reserved space for the generations.
// If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
ReservedSpace old_rs =
- virtual_spaces()->reserved_space().first_part(max_low_byte_size, policy->is_hetero_heap() /* split */);
+ virtual_spaces()->reserved_space().first_part(max_low_byte_size, ParallelArguments::is_heterogeneous_heap() /* split */);
ReservedSpace heap_rs =
virtual_spaces()->reserved_space().last_part(max_low_byte_size);
ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
@@ -111,10 +110,10 @@
"old", 1);
// The virtual spaces are created by the initialization of the gens.
- _young_gen->initialize(young_rs, alignment);
+ _young_gen->initialize(young_rs, GenAlignment);
assert(young_gen()->gen_size_limit() == young_rs.size(),
"Consistency check");
- _old_gen->initialize(old_rs, alignment, "old", 1);
+ _old_gen->initialize(old_rs, GenAlignment, "old", 1);
assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check");
}
}
@@ -284,12 +283,10 @@
}
}
-AdjoiningGenerations* AdjoiningGenerations::create_adjoining_generations(ReservedSpace old_young_rs,
- GenerationSizer* policy,
- size_t alignment) {
- if (policy->is_hetero_heap() && UseAdaptiveGCBoundary) {
- return new AdjoiningGenerationsForHeteroHeap(old_young_rs, policy, alignment);
+AdjoiningGenerations* AdjoiningGenerations::create_adjoining_generations(ReservedSpace old_young_rs) {
+ if (ParallelArguments::is_heterogeneous_heap() && UseAdaptiveGCBoundary) {
+ return new AdjoiningGenerationsForHeteroHeap(old_young_rs);
} else {
- return new AdjoiningGenerations(old_young_rs, policy, alignment);
+ return new AdjoiningGenerations(old_young_rs);
}
}
--- a/src/hotspot/share/gc/parallel/adjoiningGenerations.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerations.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -28,7 +28,6 @@
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
#include "gc/parallel/asPSOldGen.hpp"
#include "gc/parallel/asPSYoungGen.hpp"
-#include "gc/parallel/generationSizer.hpp"
// Contains two generations that both use an AdjoiningVirtualSpaces.
@@ -59,7 +58,7 @@
AdjoiningVirtualSpaces* _virtual_spaces;
public:
- AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
+ AdjoiningGenerations(ReservedSpace rs);
// Accessors
PSYoungGen* young_gen() { return _young_gen; }
@@ -78,7 +77,7 @@
// for the adjoining generations.
virtual size_t reserved_byte_size();
- // Return new AdjoiningGenerations instance based on collector policy (specifically - whether heap is heterogeneous).
- static AdjoiningGenerations* create_adjoining_generations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
+ // Return new AdjoiningGenerations instance based on arguments (specifically - whether heap is heterogeneous).
+ static AdjoiningGenerations* create_adjoining_generations(ReservedSpace rs);
};
#endif // SHARE_GC_PARALLEL_ADJOININGGENERATIONS_HPP
--- a/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
-#include "gc/parallel/generationSizer.hpp"
+#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psFileBackedVirtualspace.hpp"
#include "logging/log.hpp"
@@ -37,17 +37,17 @@
// Create two virtual spaces (HeteroVirtualSpaces), low() on nv-dimm memory, high() on dram.
// create ASPSOldGen and ASPSYoungGen the same way as in base class
-AdjoiningGenerationsForHeteroHeap::AdjoiningGenerationsForHeteroHeap(ReservedSpace old_young_rs, GenerationSizer* policy, size_t alignment) :
- _total_size_limit(policy->max_heap_byte_size()) {
- size_t init_old_byte_size = policy->initial_old_size();
- size_t min_old_byte_size = policy->min_old_size();
- size_t max_old_byte_size = policy->max_old_size();
- size_t init_young_byte_size = policy->initial_young_size();
- size_t min_young_byte_size = policy->min_young_size();
- size_t max_young_byte_size = policy->max_young_size();
+AdjoiningGenerationsForHeteroHeap::AdjoiningGenerationsForHeteroHeap(ReservedSpace old_young_rs) :
+ _total_size_limit(ParallelArguments::heap_reserved_size_bytes()) {
+ size_t init_old_byte_size = OldSize;
+ size_t min_old_byte_size = MinOldSize;
+ size_t max_old_byte_size = MaxOldSize;
+ size_t init_young_byte_size = NewSize;
+ size_t min_young_byte_size = MinNewSize;
+ size_t max_young_byte_size = MaxNewSize;
// create HeteroVirtualSpaces which is composed of non-overlapping virtual spaces.
HeteroVirtualSpaces* hetero_virtual_spaces = new HeteroVirtualSpaces(old_young_rs, min_old_byte_size,
- min_young_byte_size, _total_size_limit, alignment);
+ min_young_byte_size, _total_size_limit);
assert(min_old_byte_size <= init_old_byte_size &&
init_old_byte_size <= max_old_byte_size, "Parameter check");
@@ -83,11 +83,11 @@
_virtual_spaces = hetero_virtual_spaces;
}
-size_t AdjoiningGenerationsForHeteroHeap::required_reserved_memory(GenerationSizer* policy) {
+size_t AdjoiningGenerationsForHeteroHeap::required_reserved_memory() {
// This is the size that young gen can grow to, when AdaptiveGCBoundary is true.
- size_t max_yg_size = policy->max_heap_byte_size() - policy->min_old_size();
+ size_t max_yg_size = ParallelArguments::heap_reserved_size_bytes() - MinOldSize;
// This is the size that old gen can grow to, when AdaptiveGCBoundary is true.
- size_t max_old_size = policy->max_heap_byte_size() - policy->min_young_size();
+ size_t max_old_size = ParallelArguments::heap_reserved_size_bytes() - MinNewSize;
return max_yg_size + max_old_size;
}
@@ -98,10 +98,11 @@
return total_size_limit();
}
-AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::HeteroVirtualSpaces(ReservedSpace rs, size_t min_old_byte_size, size_t min_yg_byte_size, size_t max_total_size, size_t alignment) :
- AdjoiningVirtualSpaces(rs, min_old_byte_size, min_yg_byte_size, alignment),
+AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::HeteroVirtualSpaces(ReservedSpace rs, size_t min_old_byte_size, size_t min_yg_byte_size, size_t max_total_size) :
+ AdjoiningVirtualSpaces(rs, min_old_byte_size, min_yg_byte_size, GenAlignment),
_max_total_size(max_total_size),
- _min_old_byte_size(min_old_byte_size), _min_young_byte_size(min_yg_byte_size),
+ _min_old_byte_size(min_old_byte_size),
+ _min_young_byte_size(min_yg_byte_size),
_max_old_byte_size(_max_total_size - _min_young_byte_size),
_max_young_byte_size(_max_total_size - _min_old_byte_size) {
}
--- a/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/adjoiningGenerationsForHeteroHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -56,8 +56,8 @@
public:
HeteroVirtualSpaces(ReservedSpace rs,
size_t min_old_byte_size,
- size_t min_young_byte_size, size_t max_total_size,
- size_t alignment);
+ size_t min_young_byte_size,
+ size_t max_total_size);
// Increase old generation size and decrease young generation size by same amount
bool adjust_boundary_up(size_t size_in_bytes);
@@ -72,11 +72,11 @@
};
public:
- AdjoiningGenerationsForHeteroHeap(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
+ AdjoiningGenerationsForHeteroHeap(ReservedSpace rs);
// Given the size policy, calculate the total amount of memory that needs to be reserved.
// We need to reserve more memory than Xmx, since we use non-overlapping virtual spaces for the young and old generations.
- static size_t required_reserved_memory(GenerationSizer* policy);
+ static size_t required_reserved_memory();
// Return the total byte size of the reserved space
size_t reserved_byte_size();
--- a/src/hotspot/share/gc/parallel/asPSOldGen.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/asPSOldGen.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -28,6 +28,7 @@
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/genArguments.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/align.hpp"
@@ -90,9 +91,8 @@
assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
size_t result = gen_size_limit() - virtual_space()->committed_size();
- size_t result_aligned = align_down(result, heap->generation_alignment());
+ size_t result_aligned = align_down(result, GenAlignment);
return result_aligned;
}
@@ -103,11 +103,10 @@
}
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- const size_t gen_alignment = heap->generation_alignment();
PSAdaptiveSizePolicy* policy = heap->size_policy();
const size_t working_size =
used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
- const size_t working_aligned = align_up(working_size, gen_alignment);
+ const size_t working_aligned = align_up(working_size, GenAlignment);
const size_t working_or_min = MAX2(working_aligned, min_gen_size());
if (working_or_min > reserved().byte_size()) {
// If the used or minimum gen size (aligned up) is greater
@@ -125,7 +124,7 @@
size_t result = policy->promo_increment_aligned_down(max_contraction);
// Also adjust for inter-generational alignment
- size_t result_aligned = align_down(result, gen_alignment);
+ size_t result_aligned = align_down(result, GenAlignment);
Log(gc, ergo) log;
if (log.is_trace()) {
@@ -138,7 +137,7 @@
log.trace(" min_gen_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, min_gen_size()/K, min_gen_size());
log.trace(" max_contraction " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, max_contraction/K, max_contraction);
log.trace(" without alignment " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, promo_increment/K, promo_increment);
- log.trace(" alignment " SIZE_FORMAT_HEX, gen_alignment);
+ log.trace(" alignment " SIZE_FORMAT_HEX, GenAlignment);
}
assert(result_aligned <= max_contraction, "arithmetic is wrong");
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -29,6 +29,7 @@
#include "gc/parallel/psScavenge.inline.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcUtil.hpp"
+#include "gc/shared/genArguments.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
@@ -74,9 +75,9 @@
size_t current_committed_size = virtual_space()->committed_size();
assert((gen_size_limit() >= current_committed_size),
"generation size limit is wrong");
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+
size_t result = gen_size_limit() - current_committed_size;
- size_t result_aligned = align_down(result, heap->generation_alignment());
+ size_t result_aligned = align_down(result, GenAlignment);
return result_aligned;
}
@@ -93,13 +94,12 @@
if (eden_space()->is_empty()) {
// Respect the minimum size for eden and for the young gen as a whole.
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- const size_t eden_alignment = heap->space_alignment();
- const size_t gen_alignment = heap->generation_alignment();
+ const size_t eden_alignment = SpaceAlignment;
assert(eden_space()->capacity_in_bytes() >= eden_alignment,
"Alignment is wrong");
size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
- eden_avail = align_down(eden_avail, gen_alignment);
+ eden_avail = align_down(eden_avail, GenAlignment);
assert(virtual_space()->committed_size() >= min_gen_size(),
"minimum gen size is wrong");
@@ -111,7 +111,7 @@
// for reasons the "increment" fraction is used.
PSAdaptiveSizePolicy* policy = heap->size_policy();
size_t result = policy->eden_increment_aligned_down(max_contraction);
- size_t result_aligned = align_down(result, gen_alignment);
+ size_t result_aligned = align_down(result, GenAlignment);
log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
log_trace(gc, ergo)(" max_contraction " SIZE_FORMAT " K", max_contraction/K);
@@ -128,8 +128,7 @@
// If to_space is below from_space, to_space is not considered.
// to_space can be.
size_t ASPSYoungGen::available_to_live() {
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- const size_t alignment = heap->space_alignment();
+ const size_t alignment = SpaceAlignment;
// Include any space that is committed but is not in eden.
size_t available = pointer_delta(eden_space()->bottom(),
@@ -275,7 +274,6 @@
assert(eden_start < from_start, "Cannot push into from_space");
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- const size_t alignment = heap->space_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@@ -331,9 +329,9 @@
// Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
if (from_size == 0) {
- from_size = alignment;
+ from_size = SpaceAlignment;
} else {
- from_size = align_up(from_size, alignment);
+ from_size = align_up(from_size, SpaceAlignment);
}
from_end = from_start + from_size;
@@ -380,7 +378,7 @@
// if the space sizes are to be increased by several times then
// 'to_start' will point beyond the young generation. In this case
// 'to_start' should be adjusted.
- to_start = MAX2(to_start, eden_start + alignment);
+ to_start = MAX2(to_start, eden_start + SpaceAlignment);
// Compute how big eden can be, then adjust end.
// See comments above on calculating eden_end.
@@ -395,7 +393,7 @@
assert(eden_end >= eden_start, "addition overflowed");
// Don't let eden shrink down to 0 or less.
- eden_end = MAX2(eden_end, eden_start + alignment);
+ eden_end = MAX2(eden_end, eden_start + SpaceAlignment);
to_start = MAX2(to_start, eden_end);
log_trace(gc, ergo)(" [eden_start .. eden_end): "
--- a/src/hotspot/share/gc/parallel/generationSizer.cpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/parallel/generationSizer.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/align.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-void GenerationSizer::initialize_alignments() {
- _space_alignment = _gen_alignment = default_gen_alignment();
- _heap_alignment = compute_heap_alignment();
-}
-
-void GenerationSizer::initialize_flags() {
- // Do basic sizing work
- GenCollectorPolicy::initialize_flags();
-
- // The survivor ratio's are calculated "raw", unlike the
- // default gc, which adds 2 to the ratio value. We need to
- // make sure the values are valid before using them.
- if (MinSurvivorRatio < 3) {
- FLAG_SET_ERGO(uintx, MinSurvivorRatio, 3);
- }
-
- if (InitialSurvivorRatio < 3) {
- FLAG_SET_ERGO(uintx, InitialSurvivorRatio, 3);
- }
-}
-
-void GenerationSizer::initialize_size_info() {
- const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8);
- const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
- const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages);
- const size_t page_sz = MIN2(max_page_sz, min_page_sz);
-
- // Can a page size be something else than a power of two?
- assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
- size_t new_alignment = align_up(page_sz, _gen_alignment);
- if (new_alignment != _gen_alignment) {
- _gen_alignment = new_alignment;
- _space_alignment = new_alignment;
- // Redo everything from the start
- initialize_flags();
- }
- GenCollectorPolicy::initialize_size_info();
-}
-
-bool GenerationSizer::is_hetero_heap() const {
- return false;
-}
-
-size_t GenerationSizer::heap_reserved_size_bytes() const {
- return _max_heap_byte_size;
-}
--- a/src/hotspot/share/gc/parallel/generationSizer.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_GENERATIONSIZER_HPP
-#define SHARE_GC_PARALLEL_GENERATIONSIZER_HPP
-
-#include "gc/shared/collectorPolicy.hpp"
-
-// There is a nice batch of tested generation sizing code in
-// GenCollectorPolicy. Lets reuse it!
-
-class GenerationSizer : public GenCollectorPolicy {
- private:
- // The alignment used for boundary between young gen and old gen
- static size_t default_gen_alignment() { return 64 * K * HeapWordSize; }
-
- protected:
-
- void initialize_alignments();
- void initialize_flags();
- void initialize_size_info();
-
- public:
- virtual size_t heap_reserved_size_bytes() const;
- virtual bool is_hetero_heap() const;
-};
-#endif // SHARE_GC_PARALLEL_GENERATIONSIZER_HPP
--- a/src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.cpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/parallel/heterogeneousGenerationSizer.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/formatBuffer.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-const double HeterogeneousGenerationSizer::MaxRamFractionForYoung = 0.8;
-
-// Check the available dram memory to limit NewSize and MaxNewSize before
-// calling base class initialize_flags().
-void HeterogeneousGenerationSizer::initialize_flags() {
- FormatBuffer<100> calc_str("");
-
- julong phys_mem;
- // If MaxRam is specified, we use that as maximum physical memory available.
- if (FLAG_IS_DEFAULT(MaxRAM)) {
- phys_mem = os::physical_memory();
- calc_str.append("Physical_Memory");
- } else {
- phys_mem = (julong)MaxRAM;
- calc_str.append("MaxRAM");
- }
-
- julong reasonable_max = phys_mem;
-
- // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
- // reasonable max size of young generation.
- if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
- reasonable_max = (julong)(phys_mem / MaxRAMFraction);
- calc_str.append(" / MaxRAMFraction");
- } else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
- reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
- calc_str.append(" * MaxRAMPercentage / 100");
- } else {
- // We use our own fraction to calculate max size of young generation.
- reasonable_max = phys_mem * MaxRamFractionForYoung;
- calc_str.append(" * %0.2f", MaxRamFractionForYoung);
- }
- reasonable_max = align_up(reasonable_max, _gen_alignment);
-
- if (MaxNewSize > reasonable_max) {
- if (FLAG_IS_CMDLINE(MaxNewSize)) {
- log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
- (size_t)reasonable_max, calc_str.buffer());
- } else {
- log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
- "Dram usage can be lowered by setting MaxNewSize to a lower value", (size_t)reasonable_max, calc_str.buffer());
- }
- MaxNewSize = reasonable_max;
- }
- if (NewSize > reasonable_max) {
- if (FLAG_IS_CMDLINE(NewSize)) {
- log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
- (size_t)reasonable_max, calc_str.buffer());
- }
- NewSize = reasonable_max;
- }
-
- // After setting new size flags, call base class initialize_flags()
- GenerationSizer::initialize_flags();
-}
-
-bool HeterogeneousGenerationSizer::is_hetero_heap() const {
- return true;
-}
-
-size_t HeterogeneousGenerationSizer::heap_reserved_size_bytes() const {
- if (UseAdaptiveGCBoundary) {
- // This is the size that young gen can grow to, when UseAdaptiveGCBoundary is true.
- size_t max_yg_size = _max_heap_byte_size - _min_old_size;
- // This is the size that old gen can grow to, when UseAdaptiveGCBoundary is true.
- size_t max_old_size = _max_heap_byte_size - _min_young_size;
-
- return max_yg_size + max_old_size;
- } else {
- return _max_heap_byte_size;
- }
-}
--- a/src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
-#define SHARE_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
-
-#include "gc/parallel/generationSizer.hpp"
-
-// There is a nice batch of tested generation sizing code in
-// GenCollectorPolicy. Lets reuse it!
-
-class HeterogeneousGenerationSizer : public GenerationSizer {
-private:
- // Max fraction of dram to use for young generation when MaxRAMFraction and
- // MaxRAMPercentage are not specified on commandline.
- static const double MaxRamFractionForYoung;
-
-protected:
- virtual void initialize_flags();
-
-public:
- virtual size_t heap_reserved_size_bytes() const;
- virtual bool is_hetero_heap() const;
-};
-#endif // SHARE_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,20 +24,22 @@
*/
#include "precompiled.hpp"
-#include "gc/parallel/heterogeneousGenerationSizer.hpp"
#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcArguments.inline.hpp"
+#include "gc/shared/gcArguments.hpp"
+#include "gc/shared/genArguments.hpp"
#include "gc/shared/workerPolicy.hpp"
+#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "utilities/defaultStream.hpp"
+static const double MaxRamFractionForYoung = 0.8;
+
size_t ParallelArguments::conservative_max_heap_alignment() {
- return CollectorPolicy::compute_heap_alignment();
+ return compute_heap_alignment();
}
void ParallelArguments::initialize() {
@@ -93,10 +95,125 @@
}
}
-CollectedHeap* ParallelArguments::create_heap() {
- if (AllocateOldGenAt != NULL) {
- return create_heap_with_policy<ParallelScavengeHeap, HeterogeneousGenerationSizer>();
- } else {
- return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
+// The alignment used for boundary between young gen and old gen
+static size_t default_gen_alignment() {
+ return 64 * K * HeapWordSize;
+}
+
+void ParallelArguments::initialize_alignments() {
+ SpaceAlignment = GenAlignment = default_gen_alignment();
+ HeapAlignment = compute_heap_alignment();
+}
+
+void ParallelArguments::initialize_heap_flags_and_sizes_one_pass() {
+ // Do basic sizing work
+ GenArguments::initialize_heap_flags_and_sizes();
+
+ // The survivor ratio's are calculated "raw", unlike the
+ // default gc, which adds 2 to the ratio value. We need to
+ // make sure the values are valid before using them.
+ if (MinSurvivorRatio < 3) {
+ FLAG_SET_ERGO(uintx, MinSurvivorRatio, 3);
+ }
+
+ if (InitialSurvivorRatio < 3) {
+ FLAG_SET_ERGO(uintx, InitialSurvivorRatio, 3);
+ }
+}
+
+void ParallelArguments::initialize_heap_flags_and_sizes() {
+ if (is_heterogeneous_heap()) {
+ initialize_heterogeneous();
+ }
+
+ initialize_heap_flags_and_sizes_one_pass();
+
+ const size_t max_page_sz = os::page_size_for_region_aligned(MaxHeapSize, 8);
+ const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
+ const size_t min_page_sz = os::page_size_for_region_aligned(MinHeapSize, min_pages);
+ const size_t page_sz = MIN2(max_page_sz, min_page_sz);
+
+ // Can a page size be something else than a power of two?
+ assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
+ size_t new_alignment = align_up(page_sz, GenAlignment);
+ if (new_alignment != GenAlignment) {
+ GenAlignment = new_alignment;
+ SpaceAlignment = new_alignment;
+ // Redo everything from the start
+ initialize_heap_flags_and_sizes_one_pass();
}
}
+
+// Check the available dram memory to limit NewSize and MaxNewSize before
+// calling base class initialize_flags().
+void ParallelArguments::initialize_heterogeneous() {
+ FormatBuffer<100> calc_str("");
+
+ julong phys_mem;
+ // If MaxRam is specified, we use that as maximum physical memory available.
+ if (FLAG_IS_DEFAULT(MaxRAM)) {
+ phys_mem = os::physical_memory();
+ calc_str.append("Physical_Memory");
+ } else {
+ phys_mem = (julong)MaxRAM;
+ calc_str.append("MaxRAM");
+ }
+
+ julong reasonable_max = phys_mem;
+
+ // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
+ // reasonable max size of young generation.
+ if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
+ reasonable_max = (julong)(phys_mem / MaxRAMFraction);
+ calc_str.append(" / MaxRAMFraction");
+ } else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
+ reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+ calc_str.append(" * MaxRAMPercentage / 100");
+ } else {
+ // We use our own fraction to calculate max size of young generation.
+ reasonable_max = phys_mem * MaxRamFractionForYoung;
+ calc_str.append(" * %0.2f", MaxRamFractionForYoung);
+ }
+ reasonable_max = align_up(reasonable_max, GenAlignment);
+
+ if (MaxNewSize > reasonable_max) {
+ if (FLAG_IS_CMDLINE(MaxNewSize)) {
+ log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+ (size_t)reasonable_max, calc_str.buffer());
+ } else {
+ log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
+ "Dram usage can be lowered by setting MaxNewSize to a lower value", (size_t)reasonable_max, calc_str.buffer());
+ }
+ MaxNewSize = reasonable_max;
+ }
+ if (NewSize > reasonable_max) {
+ if (FLAG_IS_CMDLINE(NewSize)) {
+ log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+ (size_t)reasonable_max, calc_str.buffer());
+ }
+ NewSize = reasonable_max;
+ }
+}
+
+bool ParallelArguments::is_heterogeneous_heap() {
+ return AllocateOldGenAt != NULL;
+}
+
+size_t ParallelArguments::heap_reserved_size_bytes() {
+ if (!is_heterogeneous_heap() || !UseAdaptiveGCBoundary) {
+ return MaxHeapSize;
+ }
+
+ // Heterogeneous heap and adaptive size gc boundary
+
+ // This is the size that young gen can grow to, when UseAdaptiveGCBoundary is true.
+ size_t max_yg_size = MaxHeapSize - MinOldSize;
+ // This is the size that old gen can grow to, when UseAdaptiveGCBoundary is true.
+ size_t max_old_size = MaxHeapSize - MinNewSize;
+
+ return max_yg_size + max_old_size;
+}
+
+CollectedHeap* ParallelArguments::create_heap() {
+ return new ParallelScavengeHeap();
+}
--- a/src/hotspot/share/gc/parallel/parallelArguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/parallelArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -26,14 +26,26 @@
#define SHARE_GC_PARALLEL_PARALLELARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
+#include "gc/shared/genArguments.hpp"
class CollectedHeap;
-class ParallelArguments : public GCArguments {
-public:
+class ParallelArguments : public GenArguments {
+private:
+ virtual void initialize_alignments();
+ virtual void initialize_heap_flags_and_sizes();
+
+ void initialize_heap_flags_and_sizes_one_pass();
+ void initialize_heterogeneous();
+
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
+
+public:
+ // Heterogeneous heap support
+ static bool is_heterogeneous_heap();
+ static size_t heap_reserved_size_bytes();
};
#endif // SHARE_GC_PARALLEL_PARALLELARGUMENTS_HPP
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -27,8 +27,8 @@
#include "gc/parallel/adjoiningGenerations.hpp"
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
+#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
@@ -41,6 +41,7 @@
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcWhen.hpp"
+#include "gc/shared/genArguments.hpp"
#include "gc/shared/scavengableNMethods.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceCounters.hpp"
@@ -60,14 +61,14 @@
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
jint ParallelScavengeHeap::initialize() {
- size_t heap_size = _collector_policy->heap_reserved_size_bytes();
+ const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
- ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
+ ReservedSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
os::trace_page_sizes("Heap",
- _collector_policy->min_heap_byte_size(),
- heap_size,
- generation_alignment(),
+ MinHeapSize,
+ reserved_heap_size,
+ GenAlignment,
heap_rs.base(),
heap_rs.size());
@@ -88,7 +89,7 @@
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
- _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs, _collector_policy, generation_alignment());
+ _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs);
_old_gen = _gens->old_gen();
_young_gen = _gens->young_gen();
@@ -100,13 +101,13 @@
new PSAdaptiveSizePolicy(eden_capacity,
initial_promo_size,
young_gen()->to_space()->capacity_in_bytes(),
- _collector_policy->gen_alignment(),
+ GenAlignment,
max_gc_pause_sec,
max_gc_minor_pause_sec,
GCTimeRatio
);
- assert(_collector_policy->is_hetero_heap() || !UseAdaptiveGCBoundary ||
+ assert(ParallelArguments::is_heterogeneous_heap() || !UseAdaptiveGCBoundary ||
(old_gen()->virtual_space()->high_boundary() ==
young_gen()->virtual_space()->low_boundary()),
"Boundaries must meet");
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,18 +25,18 @@
#ifndef SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
#define SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
-#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/objectStartArray.hpp"
#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "gc/shared/gcWhen.hpp"
+#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/strongRootsScope.hpp"
+#include "logging/log.hpp"
#include "memory/metaspace.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp"
@@ -60,8 +60,6 @@
static PSAdaptiveSizePolicy* _size_policy;
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
- GenerationSizer* _collector_policy;
-
SoftRefPolicy _soft_ref_policy;
// Collection of generations that are adjacent in the
@@ -92,9 +90,8 @@
HeapWord* mem_allocate_old_gen(size_t size);
public:
- ParallelScavengeHeap(GenerationSizer* policy) :
+ ParallelScavengeHeap() :
CollectedHeap(),
- _collector_policy(policy),
_gens(NULL),
_death_march_count(0),
_young_manager(NULL),
@@ -117,10 +114,6 @@
return "Parallel";
}
- virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
-
- virtual GenerationSizer* ps_collector_policy() const { return _collector_policy; }
-
virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
virtual GrowableArray<GCMemoryManager*> memory_managers();
@@ -148,15 +141,6 @@
void post_initialize();
void update_counters();
- // The alignment used for the various areas
- size_t space_alignment() { return _collector_policy->space_alignment(); }
- size_t generation_alignment() { return _collector_policy->gen_alignment(); }
-
- // Return the (conservative) maximum heap alignment
- static size_t conservative_max_heap_alignment() {
- return CollectorPolicy::compute_heap_alignment();
- }
-
size_t capacity() const;
size_t used() const;
--- a/src/hotspot/share/gc/parallel/pcTasks.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/pcTasks.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -28,6 +28,7 @@
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/psParallelCompact.hpp"
#include "gc/parallel/psTasks.hpp"
+#include "gc/shared/referenceProcessor.hpp"
// Tasks for parallel compaction of the old generation
--- a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -27,7 +27,6 @@
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
#include "gc/parallel/psScavenge.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcUtil.inline.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -88,7 +88,7 @@
// Note that this method should only be called from the vm_thread while
// at a safepoint!
//
-// Note that the all_soft_refs_clear flag in the collector policy
+// Note that the all_soft_refs_clear flag in the soft ref policy
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
@@ -135,7 +135,7 @@
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
- // CollectorPolicy::_should_clear_all_soft_refs.
+ // SoftRefolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
PSYoungGen* young_gen = heap->young_gen();
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
+#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psCardTable.hpp"
@@ -72,7 +73,7 @@
void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
- if(ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
+ if(ParallelArguments::is_heterogeneous_heap()) {
_virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt);
if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) {
vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path");
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -31,6 +31,7 @@
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/parallel/gcTaskManager.hpp"
+#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/parMarkBitMap.inline.hpp"
#include "gc/parallel/pcTasks.hpp"
@@ -1712,7 +1713,7 @@
// Note that this method should only be called from the vm_thread while at a
// safepoint.
//
-// Note that the all_soft_refs_clear flag in the collector policy
+// Note that the all_soft_refs_clear flag in the soft ref policy
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
@@ -1765,7 +1766,7 @@
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
- // CollectorPolicy::_should_clear_all_soft_refs.
+ // SoftRefPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(maximum_heap_compaction,
heap->soft_ref_policy());
@@ -1995,7 +1996,7 @@
// We also return false when it's a heterogenous heap because old generation cannot absorb data from eden
// when it is allocated on different memory (example, nv-dimm) than young.
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) ||
- ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
+ ParallelArguments::is_heterogeneous_heap()) {
return false;
}
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -34,7 +34,6 @@
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
#include "gc/parallel/psTasks.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -29,6 +29,7 @@
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcUtil.hpp"
+#include "gc/shared/genArguments.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
@@ -115,8 +116,7 @@
_max_gen_size, _virtual_space);
// Compute maximum space sizes for performance counters
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- size_t alignment = heap->space_alignment();
+ size_t alignment = SpaceAlignment;
size_t size = virtual_space()->reserved_size();
size_t max_survivor_size;
@@ -165,17 +165,14 @@
}
void PSYoungGen::compute_initial_space_boundaries() {
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
// Compute sizes
- size_t alignment = heap->space_alignment();
size_t size = virtual_space()->committed_size();
- assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
+ assert(size >= 3 * SpaceAlignment, "Young space is not large enough for eden + 2 survivors");
size_t survivor_size = size / InitialSurvivorRatio;
- survivor_size = align_down(survivor_size, alignment);
+ survivor_size = align_down(survivor_size, SpaceAlignment);
// ... but never less than an alignment
- survivor_size = MAX2(survivor_size, alignment);
+ survivor_size = MAX2(survivor_size, SpaceAlignment);
// Young generation is eden + 2 survivor spaces
size_t eden_size = size - (2 * survivor_size);
@@ -219,13 +216,10 @@
#ifndef PRODUCT
void PSYoungGen::space_invariants() {
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- const size_t alignment = heap->space_alignment();
-
// Currently, our eden size cannot shrink to zero
- guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
- guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
- guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
+ guarantee(eden_space()->capacity_in_bytes() >= SpaceAlignment, "eden too small");
+ guarantee(from_space()->capacity_in_bytes() >= SpaceAlignment, "from too small");
+ guarantee(to_space()->capacity_in_bytes() >= SpaceAlignment, "to too small");
// Relationship of spaces to each other
char* eden_start = (char*)eden_space()->bottom();
@@ -482,8 +476,6 @@
char* to_start = (char*)to_space()->bottom();
char* to_end = (char*)to_space()->end();
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- const size_t alignment = heap->space_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@@ -537,9 +529,9 @@
// Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
if (from_size == 0) {
- from_size = alignment;
+ from_size = SpaceAlignment;
} else {
- from_size = align_up(from_size, alignment);
+ from_size = align_up(from_size, SpaceAlignment);
}
from_end = from_start + from_size;
@@ -582,7 +574,7 @@
// if the space sizes are to be increased by several times then
// 'to_start' will point beyond the young generation. In this case
// 'to_start' should be adjusted.
- to_start = MAX2(to_start, eden_start + alignment);
+ to_start = MAX2(to_start, eden_start + SpaceAlignment);
// Compute how big eden can be, then adjust end.
// See comments above on calculating eden_end.
@@ -600,7 +592,7 @@
// to_start = MAX2(to_start, eden_end);
// Don't let eden shrink down to 0 or less.
- eden_end = MAX2(eden_end, eden_start + alignment);
+ eden_end = MAX2(eden_end, eden_start + SpaceAlignment);
to_start = MAX2(to_start, eden_end);
log_trace(gc, ergo)(" [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
@@ -800,10 +792,6 @@
// from-space.
size_t PSYoungGen::available_to_live() {
size_t delta_in_survivor = 0;
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- const size_t space_alignment = heap->space_alignment();
- const size_t gen_alignment = heap->generation_alignment();
-
MutableSpace* space_shrinking = NULL;
if (from_space()->end() > to_space()->end()) {
space_shrinking = from_space();
@@ -820,9 +808,9 @@
if (space_shrinking->is_empty()) {
// Don't let the space shrink to 0
- assert(space_shrinking->capacity_in_bytes() >= space_alignment,
+ assert(space_shrinking->capacity_in_bytes() >= SpaceAlignment,
"Space is too small");
- delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
+ delta_in_survivor = space_shrinking->capacity_in_bytes() - SpaceAlignment;
} else {
delta_in_survivor = pointer_delta(space_shrinking->end(),
space_shrinking->top(),
@@ -830,7 +818,7 @@
}
size_t delta_in_bytes = unused_committed + delta_in_survivor;
- delta_in_bytes = align_down(delta_in_bytes, gen_alignment);
+ delta_in_bytes = align_down(delta_in_bytes, GenAlignment);
return delta_in_bytes;
}
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -30,6 +30,7 @@
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectorCounters.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
@@ -151,6 +152,8 @@
DefNewGeneration::DefNewGeneration(ReservedSpace rs,
size_t initial_size,
+ size_t min_size,
+ size_t max_size,
const char* policy)
: Generation(rs, initial_size),
_preserved_marks_set(false /* in_c_heap */),
@@ -174,17 +177,15 @@
// Compute the maximum eden and survivor space sizes. These sizes
// are computed assuming the entire reserved space is committed.
// These values are exported as performance counters.
- uintx alignment = gch->collector_policy()->space_alignment();
uintx size = _virtual_space.reserved_size();
- _max_survivor_size = compute_survivor_size(size, alignment);
+ _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
_max_eden_size = size - (2*_max_survivor_size);
// allocate the performance counters
- GenCollectorPolicy* gcp = gch->gen_policy();
// Generation counters -- generation 0, 3 subspaces
_gen_counters = new GenerationCounters("new", 0, 3,
- gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
+ min_size, max_size, &_virtual_space);
_gc_counters = new CollectorCounters(policy, 0);
_eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
@@ -206,9 +207,6 @@
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
bool clear_space,
bool mangle_space) {
- uintx alignment =
- GenCollectedHeap::heap()->collector_policy()->space_alignment();
-
// If the spaces are being cleared (only done at heap initialization
// currently), the survivor spaces need not be empty.
// Otherwise, no care is taken for used areas in the survivor spaces
@@ -218,17 +216,17 @@
// Compute sizes
uintx size = _virtual_space.committed_size();
- uintx survivor_size = compute_survivor_size(size, alignment);
+ uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
uintx eden_size = size - (2*survivor_size);
assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
if (eden_size < minimum_eden_size) {
// May happen due to 64Kb rounding, if so adjust eden size back up
- minimum_eden_size = align_up(minimum_eden_size, alignment);
+ minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
uintx unaligned_survivor_size =
- align_down(maximum_survivor_size, alignment);
- survivor_size = MAX2(unaligned_survivor_size, alignment);
+ align_down(maximum_survivor_size, SpaceAlignment);
+ survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
eden_size = size - (2*survivor_size);
assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
assert(eden_size >= minimum_eden_size, "just checking");
@@ -461,9 +459,8 @@
}
size_t DefNewGeneration::max_capacity() const {
- const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
const size_t reserved_bytes = reserved().byte_size();
- return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
+ return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
}
size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
--- a/src/hotspot/share/gc/serial/defNewGeneration.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -191,7 +191,10 @@
};
public:
- DefNewGeneration(ReservedSpace rs, size_t initial_byte_size,
+ DefNewGeneration(ReservedSpace rs,
+ size_t initial_byte_size,
+ size_t min_byte_size,
+ size_t max_byte_size,
const char* policy="Serial young collection pauses");
virtual void ref_processor_init();
--- a/src/hotspot/share/gc/serial/serialArguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/serialArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -23,16 +23,10 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/genArguments.hpp"
#include "gc/serial/serialArguments.hpp"
#include "gc/serial/serialHeap.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcArguments.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-
-size_t SerialArguments::conservative_max_heap_alignment() {
- return GenCollectedHeap::conservative_max_heap_alignment();
-}
CollectedHeap* SerialArguments::create_heap() {
- return create_heap_with_policy<SerialHeap, MarkSweepPolicy>();
+ return new SerialHeap();
}
--- a/src/hotspot/share/gc/serial/serialArguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/serialArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -25,13 +25,12 @@
#ifndef SHARE_GC_SERIAL_SERIALARGUMENTS_HPP
#define SHARE_GC_SERIAL_SERIALARGUMENTS_HPP
-#include "gc/shared/gcArguments.hpp"
+#include "gc/shared/genArguments.hpp"
class CollectedHeap;
-class SerialArguments : public GCArguments {
-public:
- virtual size_t conservative_max_heap_alignment();
+class SerialArguments : public GenArguments {
+private:
virtual CollectedHeap* create_heap();
};
--- a/src/hotspot/share/gc/serial/serialHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/serialHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -36,9 +36,8 @@
return static_cast<SerialHeap*>(heap);
}
-SerialHeap::SerialHeap(GenCollectorPolicy* policy) :
- GenCollectedHeap(policy,
- Generation::DefNew,
+SerialHeap::SerialHeap() :
+ GenCollectedHeap(Generation::DefNew,
Generation::MarkSweepCompact,
"Copy:MSC"),
_eden_pool(NULL),
--- a/src/hotspot/share/gc/serial/serialHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -30,7 +30,6 @@
#include "gc/shared/genCollectedHeap.hpp"
#include "utilities/growableArray.hpp"
-class GenCollectorPolicy;
class GCMemoryManager;
class MemoryPool;
class TenuredGeneration;
@@ -46,7 +45,7 @@
public:
static SerialHeap* heap();
- SerialHeap(GenCollectorPolicy* policy);
+ SerialHeap();
virtual Name kind() const {
return CollectedHeap::Serial;
--- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -45,6 +45,8 @@
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size,
+ size_t min_byte_size,
+ size_t max_byte_size,
CardTableRS* remset) :
CardGeneration(rs, initial_byte_size, remset)
{
@@ -60,10 +62,9 @@
// initialize performance counters
const char* gen_name = "old";
- GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
// Generation Counters -- generation 1, 1 subspace
_gen_counters = new GenerationCounters(gen_name, 1, 1,
- gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
+ min_byte_size, max_byte_size, &_virtual_space);
_gc_counters = new CollectorCounters("Serial full collection pauses", 1);
--- a/src/hotspot/share/gc/serial/tenuredGeneration.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -58,6 +58,8 @@
public:
TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size,
+ size_t min_byte_size,
+ size_t max_byte_size,
CardTableRS* remset);
Generation::Name kind() { return Generation::MarkSweepCompact; }
--- a/src/hotspot/share/gc/shared/ageTable.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/ageTable.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -27,7 +27,6 @@
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/ageTableTracer.hpp"
#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "memory/resourceArea.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -44,7 +44,6 @@
class AdaptiveSizePolicy;
class BarrierSet;
-class CollectorPolicy;
class GCHeapSummary;
class GCTimer;
class GCTracer;
@@ -388,9 +387,6 @@
void increment_total_full_collections() { _total_full_collections++; }
- // Return the CollectorPolicy for the heap
- virtual CollectorPolicy* collector_policy() const = 0;
-
// Return the SoftRefPolicy for the heap;
virtual SoftRefPolicy* soft_ref_policy() = 0;
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcPolicyCounters.hpp"
-#include "gc/shared/gcVMOperations.hpp"
-#include "gc/shared/generationSpec.hpp"
-#include "gc/shared/space.hpp"
-#include "logging/log.hpp"
-#include "memory/universe.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/align.hpp"
-#include "utilities/macros.hpp"
-
-// CollectorPolicy methods
-
-CollectorPolicy::CollectorPolicy() :
- _initial_heap_byte_size(InitialHeapSize),
- _max_heap_byte_size(MaxHeapSize),
- _min_heap_byte_size(Arguments::min_heap_size()),
- _space_alignment(0),
- _heap_alignment(0)
-{}
-
-#ifdef ASSERT
-void CollectorPolicy::assert_flags() {
- assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes");
- assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment");
- assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment");
-}
-
-void CollectorPolicy::assert_size_info() {
- assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage");
- assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage");
- assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes");
- assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes");
- assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes");
- assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment");
- assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment");
- assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment");
-}
-#endif // ASSERT
-
-void CollectorPolicy::initialize_flags() {
- assert(_space_alignment != 0, "Space alignment not set up properly");
- assert(_heap_alignment != 0, "Heap alignment not set up properly");
- assert(_heap_alignment >= _space_alignment,
- "heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT,
- _heap_alignment, _space_alignment);
- assert(_heap_alignment % _space_alignment == 0,
- "heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
- _heap_alignment, _space_alignment);
-
- if (FLAG_IS_CMDLINE(MaxHeapSize)) {
- if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
- vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size");
- }
- if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) {
- vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
- }
- }
-
- // Check heap parameter properties
- if (MaxHeapSize < 2 * M) {
- vm_exit_during_initialization("Too small maximum heap");
- }
- if (InitialHeapSize < M) {
- vm_exit_during_initialization("Too small initial heap");
- }
- if (_min_heap_byte_size < M) {
- vm_exit_during_initialization("Too small minimum heap");
- }
-
- // User inputs from -Xmx and -Xms must be aligned
- _min_heap_byte_size = align_up(_min_heap_byte_size, _heap_alignment);
- size_t aligned_initial_heap_size = align_up(InitialHeapSize, _heap_alignment);
- size_t aligned_max_heap_size = align_up(MaxHeapSize, _heap_alignment);
-
- // Write back to flags if the values changed
- if (aligned_initial_heap_size != InitialHeapSize) {
- FLAG_SET_ERGO(size_t, InitialHeapSize, aligned_initial_heap_size);
- }
- if (aligned_max_heap_size != MaxHeapSize) {
- FLAG_SET_ERGO(size_t, MaxHeapSize, aligned_max_heap_size);
- }
-
- if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 &&
- InitialHeapSize < _min_heap_byte_size) {
- vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
- }
- if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
- FLAG_SET_ERGO(size_t, MaxHeapSize, InitialHeapSize);
- } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
- FLAG_SET_ERGO(size_t, InitialHeapSize, MaxHeapSize);
- if (InitialHeapSize < _min_heap_byte_size) {
- _min_heap_byte_size = InitialHeapSize;
- }
- }
-
- _initial_heap_byte_size = InitialHeapSize;
- _max_heap_byte_size = MaxHeapSize;
-
- FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_up(MinHeapDeltaBytes, _space_alignment));
-
- DEBUG_ONLY(CollectorPolicy::assert_flags();)
-}
-
-void CollectorPolicy::initialize_size_info() {
- log_debug(gc, heap)("Minimum heap " SIZE_FORMAT " Initial heap " SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
- _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
-
- DEBUG_ONLY(CollectorPolicy::assert_size_info();)
-}
-
-size_t CollectorPolicy::compute_heap_alignment() {
- // The card marking array and the offset arrays for old generations are
- // committed in os pages as well. Make sure they are entirely full (to
- // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
- // byte entry and the os page size is 4096, the maximum heap size should
- // be 512*4096 = 2MB aligned.
-
- size_t alignment = CardTableRS::ct_max_alignment_constraint();
-
- if (UseLargePages) {
- // In presence of large pages we have to make sure that our
- // alignment is large page aware.
- alignment = lcm(os::large_page_size(), alignment);
- }
-
- return alignment;
-}
-
-// GenCollectorPolicy methods
-
-GenCollectorPolicy::GenCollectorPolicy() :
- _min_young_size(0),
- _initial_young_size(0),
- _max_young_size(0),
- _min_old_size(0),
- _initial_old_size(0),
- _max_old_size(0),
- _gen_alignment(0)
-{}
-
-size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
- return align_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
-}
-
-size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
- size_t maximum_size) {
- size_t max_minus = maximum_size - _gen_alignment;
- return desired_size < max_minus ? desired_size : max_minus;
-}
-
-size_t GenCollectorPolicy::young_gen_size_lower_bound() {
- // The young generation must be aligned and have room for eden + two survivors
- return align_up(3 * _space_alignment, _gen_alignment);
-}
-
-size_t GenCollectorPolicy::old_gen_size_lower_bound() {
- return align_up(_space_alignment, _gen_alignment);
-}
-
-#ifdef ASSERT
-void GenCollectorPolicy::assert_flags() {
- CollectorPolicy::assert_flags();
- assert(NewSize >= _min_young_size, "Ergonomics decided on a too small young gen size");
- assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
- assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
- assert(NewSize % _gen_alignment == 0, "NewSize alignment");
- assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment");
- assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
- assert(OldSize % _gen_alignment == 0, "OldSize alignment");
-}
-
-void GenCollectorPolicy::assert_size_info() {
- CollectorPolicy::assert_size_info();
- // GenCollectorPolicy::initialize_size_info may update the MaxNewSize
- assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
- assert(NewSize == _initial_young_size, "Discrepancy between NewSize flag and local storage");
- assert(MaxNewSize == _max_young_size, "Discrepancy between MaxNewSize flag and local storage");
- assert(OldSize == _initial_old_size, "Discrepancy between OldSize flag and local storage");
- assert(_min_young_size <= _initial_young_size, "Ergonomics decided on incompatible minimum and initial young gen sizes");
- assert(_initial_young_size <= _max_young_size, "Ergonomics decided on incompatible initial and maximum young gen sizes");
- assert(_min_young_size % _gen_alignment == 0, "_min_young_size alignment");
- assert(_initial_young_size % _gen_alignment == 0, "_initial_young_size alignment");
- assert(_max_young_size % _gen_alignment == 0, "_max_young_size alignment");
- assert(_min_young_size <= bound_minus_alignment(_min_young_size, _min_heap_byte_size),
- "Ergonomics made minimum young generation larger than minimum heap");
- assert(_initial_young_size <= bound_minus_alignment(_initial_young_size, _initial_heap_byte_size),
- "Ergonomics made initial young generation larger than initial heap");
- assert(_max_young_size <= bound_minus_alignment(_max_young_size, _max_heap_byte_size),
- "Ergonomics made maximum young generation lager than maximum heap");
- assert(_min_old_size <= _initial_old_size, "Ergonomics decided on incompatible minimum and initial old gen sizes");
- assert(_initial_old_size <= _max_old_size, "Ergonomics decided on incompatible initial and maximum old gen sizes");
- assert(_max_old_size % _gen_alignment == 0, "_max_old_size alignment");
- assert(_initial_old_size % _gen_alignment == 0, "_initial_old_size alignment");
- assert(_max_heap_byte_size <= (_max_young_size + _max_old_size), "Total maximum heap sizes must be sum of generation maximum sizes");
- assert(_min_young_size + _min_old_size <= _min_heap_byte_size, "Minimum generation sizes exceed minimum heap size");
- assert(_initial_young_size + _initial_old_size == _initial_heap_byte_size, "Initial generation sizes should match initial heap size");
- assert(_max_young_size + _max_old_size == _max_heap_byte_size, "Maximum generation sizes should match maximum heap size");
-}
-#endif // ASSERT
-
-void GenCollectorPolicy::initialize_flags() {
- CollectorPolicy::initialize_flags();
-
- assert(_gen_alignment != 0, "Generation alignment not set up properly");
- assert(_heap_alignment >= _gen_alignment,
- "heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
- _heap_alignment, _gen_alignment);
- assert(_gen_alignment % _space_alignment == 0,
- "gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
- _gen_alignment, _space_alignment);
- assert(_heap_alignment % _gen_alignment == 0,
- "heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
- _heap_alignment, _gen_alignment);
-
- // All generational heaps have a young gen; handle those flags here
-
- // Make sure the heap is large enough for two generations
- size_t smallest_new_size = young_gen_size_lower_bound();
- size_t smallest_heap_size = align_up(smallest_new_size + old_gen_size_lower_bound(),
- _heap_alignment);
- if (MaxHeapSize < smallest_heap_size) {
- FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
- _max_heap_byte_size = MaxHeapSize;
- }
- // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
- if (_min_heap_byte_size < smallest_heap_size) {
- _min_heap_byte_size = smallest_heap_size;
- if (InitialHeapSize < _min_heap_byte_size) {
- FLAG_SET_ERGO(size_t, InitialHeapSize, smallest_heap_size);
- _initial_heap_byte_size = smallest_heap_size;
- }
- }
-
- // Make sure NewSize allows an old generation to fit even if set on the command line
- if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) {
- log_warning(gc, ergo)("NewSize was set larger than initial heap size, will use initial heap size.");
- FLAG_SET_ERGO(size_t, NewSize, bound_minus_alignment(NewSize, _initial_heap_byte_size));
- }
-
- // Now take the actual NewSize into account. We will silently increase NewSize
- // if the user specified a smaller or unaligned value.
- size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
- bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, _gen_alignment));
- if (bounded_new_size != NewSize) {
- FLAG_SET_ERGO(size_t, NewSize, bounded_new_size);
- }
- _min_young_size = smallest_new_size;
- _initial_young_size = NewSize;
-
- if (!FLAG_IS_DEFAULT(MaxNewSize)) {
- if (MaxNewSize >= MaxHeapSize) {
- // Make sure there is room for an old generation
- size_t smaller_max_new_size = MaxHeapSize - _gen_alignment;
- if (FLAG_IS_CMDLINE(MaxNewSize)) {
- log_warning(gc, ergo)("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
- "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.",
- MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
- }
- FLAG_SET_ERGO(size_t, MaxNewSize, smaller_max_new_size);
- if (NewSize > MaxNewSize) {
- FLAG_SET_ERGO(size_t, NewSize, MaxNewSize);
- _initial_young_size = NewSize;
- }
- } else if (MaxNewSize < _initial_young_size) {
- FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size);
- } else if (!is_aligned(MaxNewSize, _gen_alignment)) {
- FLAG_SET_ERGO(size_t, MaxNewSize, align_down(MaxNewSize, _gen_alignment));
- }
- _max_young_size = MaxNewSize;
- }
-
- if (NewSize > MaxNewSize) {
- // At this point this should only happen if the user specifies a large NewSize and/or
- // a small (but not too small) MaxNewSize.
- if (FLAG_IS_CMDLINE(MaxNewSize)) {
- log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
- "A new max generation size of " SIZE_FORMAT "k will be used.",
- NewSize/K, MaxNewSize/K, NewSize/K);
- }
- FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
- _max_young_size = MaxNewSize;
- }
-
- if (SurvivorRatio < 1 || NewRatio < 1) {
- vm_exit_during_initialization("Invalid young gen ratio specified");
- }
-
- if (OldSize < old_gen_size_lower_bound()) {
- FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound());
- }
- if (!is_aligned(OldSize, _gen_alignment)) {
- FLAG_SET_ERGO(size_t, OldSize, align_down(OldSize, _gen_alignment));
- }
-
- if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
- // NewRatio will be used later to set the young generation size so we use
- // it to calculate how big the heap should be based on the requested OldSize
- // and NewRatio.
- assert(NewRatio > 0, "NewRatio should have been set up earlier");
- size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
-
- calculated_heapsize = align_up(calculated_heapsize, _heap_alignment);
- FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
- _max_heap_byte_size = MaxHeapSize;
- FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
- _initial_heap_byte_size = InitialHeapSize;
- }
-
- // Adjust NewSize and OldSize or MaxHeapSize to match each other
- if (NewSize + OldSize > MaxHeapSize) {
- if (FLAG_IS_CMDLINE(MaxHeapSize)) {
- // Somebody has set a maximum heap size with the intention that we should not
- // exceed it. Adjust New/OldSize as necessary.
- size_t calculated_size = NewSize + OldSize;
- double shrink_factor = (double) MaxHeapSize / calculated_size;
- size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), _gen_alignment);
- FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
- _initial_young_size = NewSize;
-
- // OldSize is already aligned because above we aligned MaxHeapSize to
- // _heap_alignment, and we just made sure that NewSize is aligned to
- // _gen_alignment. In initialize_flags() we verified that _heap_alignment
- // is a multiple of _gen_alignment.
- FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
- } else {
- FLAG_SET_ERGO(size_t, MaxHeapSize, align_up(NewSize + OldSize, _heap_alignment));
- _max_heap_byte_size = MaxHeapSize;
- }
- }
-
- // Update NewSize, if possible, to avoid sizing the young gen too small when only
- // OldSize is set on the command line.
- if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
- if (OldSize < _initial_heap_byte_size) {
- size_t new_size = _initial_heap_byte_size - OldSize;
- // Need to compare against the flag value for max since _max_young_size
- // might not have been set yet.
- if (new_size >= _min_young_size && new_size <= MaxNewSize) {
- FLAG_SET_ERGO(size_t, NewSize, new_size);
- _initial_young_size = NewSize;
- }
- }
- }
-
- always_do_update_barrier = UseConcMarkSweepGC;
-
- DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
-}
-
-// Values set on the command line win over any ergonomically
-// set command line parameters.
-// Ergonomic choice of parameters are done before this
-// method is called. Values for command line parameters such as NewSize
-// and MaxNewSize feed those ergonomic choices into this method.
-// This method makes the final generation sizings consistent with
-// themselves and with overall heap sizings.
-// In the absence of explicitly set command line flags, policies
-// such as the use of NewRatio are used to size the generation.
-
-// Minimum sizes of the generations may be different than
-// the initial sizes. An inconsistency is permitted here
-// in the total size that can be specified explicitly by
-// command line specification of OldSize and NewSize and
-// also a command line specification of -Xms. Issue a warning
-// but allow the values to pass.
-void GenCollectorPolicy::initialize_size_info() {
- CollectorPolicy::initialize_size_info();
-
- _initial_young_size = NewSize;
- _max_young_size = MaxNewSize;
- _initial_old_size = OldSize;
-
- // Determine maximum size of the young generation.
-
- if (FLAG_IS_DEFAULT(MaxNewSize)) {
- _max_young_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
- // Bound the maximum size by NewSize below (since it historically
- // would have been NewSize and because the NewRatio calculation could
- // yield a size that is too small) and bound it by MaxNewSize above.
- // Ergonomics plays here by previously calculating the desired
- // NewSize and MaxNewSize.
- _max_young_size = MIN2(MAX2(_max_young_size, _initial_young_size), MaxNewSize);
- }
-
- // Given the maximum young size, determine the initial and
- // minimum young sizes.
-
- if (_max_heap_byte_size == _initial_heap_byte_size) {
- // The maximum and initial heap sizes are the same so the generation's
- // initial size must be the same as it maximum size. Use NewSize as the
- // size if set on command line.
- _max_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : _max_young_size;
- _initial_young_size = _max_young_size;
-
- // Also update the minimum size if min == initial == max.
- if (_max_heap_byte_size == _min_heap_byte_size) {
- _min_young_size = _max_young_size;
- }
- } else {
- if (FLAG_IS_CMDLINE(NewSize)) {
- // If NewSize is set on the command line, we should use it as
- // the initial size, but make sure it is within the heap bounds.
- _initial_young_size =
- MIN2(_max_young_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
- _min_young_size = bound_minus_alignment(_initial_young_size, _min_heap_byte_size);
- } else {
- // For the case where NewSize is not set on the command line, use
- // NewRatio to size the initial generation size. Use the current
- // NewSize as the floor, because if NewRatio is overly large, the resulting
- // size can be too small.
- _initial_young_size =
- MIN2(_max_young_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
- }
- }
-
- log_trace(gc, heap)("1: Minimum young " SIZE_FORMAT " Initial young " SIZE_FORMAT " Maximum young " SIZE_FORMAT,
- _min_young_size, _initial_young_size, _max_young_size);
-
- // At this point the minimum, initial and maximum sizes
- // of the overall heap and of the young generation have been determined.
- // The maximum old size can be determined from the maximum young
- // and maximum heap size since no explicit flags exist
- // for setting the old generation maximum.
- _max_old_size = MAX2(_max_heap_byte_size - _max_young_size, _gen_alignment);
-
- // If no explicit command line flag has been set for the
- // old generation size, use what is left.
- if (!FLAG_IS_CMDLINE(OldSize)) {
- // The user has not specified any value but the ergonomics
- // may have chosen a value (which may or may not be consistent
- // with the overall heap size). In either case make
- // the minimum, maximum and initial sizes consistent
- // with the young sizes and the overall heap sizes.
- _min_old_size = _gen_alignment;
- _initial_old_size = MIN2(_max_old_size, MAX2(_initial_heap_byte_size - _initial_young_size, _min_old_size));
- // _max_old_size has already been made consistent above.
- } else {
- // OldSize has been explicitly set on the command line. Use it
- // for the initial size but make sure the minimum allow a young
- // generation to fit as well.
- // If the user has explicitly set an OldSize that is inconsistent
- // with other command line flags, issue a warning.
- // The generation minimums and the overall heap minimum should
- // be within one generation alignment.
- if (_initial_old_size > _max_old_size) {
- log_warning(gc, ergo)("Inconsistency between maximum heap size and maximum "
- "generation sizes: using maximum heap = " SIZE_FORMAT
- ", -XX:OldSize flag is being ignored",
- _max_heap_byte_size);
- _initial_old_size = _max_old_size;
- }
-
- _min_old_size = MIN2(_initial_old_size, _min_heap_byte_size - _min_young_size);
- }
-
- // The initial generation sizes should match the initial heap size,
- // if not issue a warning and resize the generations. This behavior
- // differs from JDK8 where the generation sizes have higher priority
- // than the initial heap size.
- if ((_initial_old_size + _initial_young_size) != _initial_heap_byte_size) {
- log_warning(gc, ergo)("Inconsistency between generation sizes and heap size, resizing "
- "the generations to fit the heap.");
-
- size_t desired_young_size = _initial_heap_byte_size - _initial_old_size;
- if (_initial_heap_byte_size < _initial_old_size) {
- // Old want all memory, use minimum for young and rest for old
- _initial_young_size = _min_young_size;
- _initial_old_size = _initial_heap_byte_size - _min_young_size;
- } else if (desired_young_size > _max_young_size) {
- // Need to increase both young and old generation
- _initial_young_size = _max_young_size;
- _initial_old_size = _initial_heap_byte_size - _max_young_size;
- } else if (desired_young_size < _min_young_size) {
- // Need to decrease both young and old generation
- _initial_young_size = _min_young_size;
- _initial_old_size = _initial_heap_byte_size - _min_young_size;
- } else {
- // The young generation boundaries allow us to only update the
- // young generation.
- _initial_young_size = desired_young_size;
- }
-
- log_trace(gc, heap)("2: Minimum young " SIZE_FORMAT " Initial young " SIZE_FORMAT " Maximum young " SIZE_FORMAT,
- _min_young_size, _initial_young_size, _max_young_size);
- }
-
- // Write back to flags if necessary.
- if (NewSize != _initial_young_size) {
- FLAG_SET_ERGO(size_t, NewSize, _initial_young_size);
- }
-
- if (MaxNewSize != _max_young_size) {
- FLAG_SET_ERGO(size_t, MaxNewSize, _max_young_size);
- }
-
- if (OldSize != _initial_old_size) {
- FLAG_SET_ERGO(size_t, OldSize, _initial_old_size);
- }
-
- log_trace(gc, heap)("Minimum old " SIZE_FORMAT " Initial old " SIZE_FORMAT " Maximum old " SIZE_FORMAT,
- _min_old_size, _initial_old_size, _max_old_size);
-
- DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
-}
-
-//
-// MarkSweepPolicy methods
-//
-
-void MarkSweepPolicy::initialize_alignments() {
- _space_alignment = _gen_alignment = (size_t)Generation::GenGrain;
- _heap_alignment = compute_heap_alignment();
-}
--- a/src/hotspot/share/gc/shared/collectorPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_SHARED_COLLECTORPOLICY_HPP
-#define SHARE_GC_SHARED_COLLECTORPOLICY_HPP
-
-#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/generationSpec.hpp"
-#include "memory/allocation.hpp"
-#include "utilities/macros.hpp"
-
-// This class (or more correctly, subtypes of this class)
-// are used to define global garbage collector attributes.
-// This includes initialization of generations and any other
-// shared resources they may need.
-//
-// In general, all flag adjustment and validation should be
-// done in initialize_flags(), which is called prior to
-// initialize_size_info().
-//
-// This class is not fully developed yet. As more collector(s)
-// are added, it is expected that we will come across further
-// behavior that requires global attention. The correct place
-// to deal with those issues is this class.
-
-// Forward declarations.
-class GenCollectorPolicy;
-class AdaptiveSizePolicy;
-class ConcurrentMarkSweepPolicy;
-class G1CollectorPolicy;
-class MarkSweepPolicy;
-
-class CollectorPolicy : public CHeapObj<mtGC> {
- protected:
- virtual void initialize_alignments() = 0;
- virtual void initialize_flags();
- virtual void initialize_size_info();
-
- DEBUG_ONLY(virtual void assert_flags();)
- DEBUG_ONLY(virtual void assert_size_info();)
-
- size_t _initial_heap_byte_size;
- size_t _max_heap_byte_size;
- size_t _min_heap_byte_size;
-
- size_t _space_alignment;
- size_t _heap_alignment;
-
- CollectorPolicy();
-
- public:
- void initialize_all() {
- initialize_alignments();
- initialize_flags();
- initialize_size_info();
- }
-
- // Return maximum heap alignment that may be imposed by the policy.
- static size_t compute_heap_alignment();
-
- size_t space_alignment() { return _space_alignment; }
- size_t heap_alignment() { return _heap_alignment; }
-
- size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
- size_t max_heap_byte_size() { return _max_heap_byte_size; }
- size_t min_heap_byte_size() { return _min_heap_byte_size; }
-};
-
-class GenCollectorPolicy : public CollectorPolicy {
- friend class TestGenCollectorPolicy;
- friend class VMStructs;
-
-protected:
- size_t _min_young_size;
- size_t _initial_young_size;
- size_t _max_young_size;
- size_t _min_old_size;
- size_t _initial_old_size;
- size_t _max_old_size;
-
- // _gen_alignment and _space_alignment will have the same value most of the
- // time. When using large pages they can differ.
- size_t _gen_alignment;
-
- void initialize_flags();
- void initialize_size_info();
-
- DEBUG_ONLY(void assert_flags();)
- DEBUG_ONLY(void assert_size_info();)
-
- // Compute max heap alignment.
- size_t compute_max_alignment();
-
- // Scale the base_size by NewRatio according to
- // result = base_size / (NewRatio + 1)
- // and align by min_alignment()
- size_t scale_by_NewRatio_aligned(size_t base_size);
-
- // Bound the value by the given maximum minus the min_alignment.
- size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
-
- public:
- GenCollectorPolicy();
-
- // Accessors
- size_t min_young_size() { return _min_young_size; }
- size_t initial_young_size() { return _initial_young_size; }
- size_t max_young_size() { return _max_young_size; }
- size_t gen_alignment() { return _gen_alignment; }
- size_t min_old_size() { return _min_old_size; }
- size_t initial_old_size() { return _initial_old_size; }
- size_t max_old_size() { return _max_old_size; }
-
- size_t young_gen_size_lower_bound();
-
- size_t old_gen_size_lower_bound();
-};
-
-class MarkSweepPolicy : public GenCollectorPolicy {
- protected:
- void initialize_alignments();
-
- public:
- MarkSweepPolicy() {}
-};
-
-#endif // SHARE_GC_SHARED_COLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/shared/concurrentGCPhaseManager.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/concurrentGCPhaseManager.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -109,7 +109,7 @@
// - this must be the current top of manager stack
void deactivate();
- // Used to implement CollectorPolicy::request_concurrent_phase().
+ // Used to implement CollectedHeap::request_concurrent_phase().
// Updates request to the new phase, and notifies threads blocked on
// the old request of the change. Returns true if the phase is
// UNCONSTRAINED_PHASE. Otherwise, waits until an active phase is
--- a/src/hotspot/share/gc/shared/gcArguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,13 +24,20 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/gcArguments.hpp"
+#include "logging/log.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
+size_t MinHeapSize = 0;
+
+size_t HeapAlignment = 0;
+size_t SpaceAlignment = 0;
+
void GCArguments::initialize() {
if (FullGCALot && FLAG_IS_DEFAULT(MarkSweepAlwaysCompactCount)) {
MarkSweepAlwaysCompactCount = 1; // Move objects every gc.
@@ -65,6 +72,30 @@
}
}
+void GCArguments::initialize_heap_sizes() {
+ initialize_alignments();
+ initialize_heap_flags_and_sizes();
+ initialize_size_info();
+}
+
+size_t GCArguments::compute_heap_alignment() {
+ // The card marking array and the offset arrays for old generations are
+ // committed in os pages as well. Make sure they are entirely full (to
+ // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
+ // byte entry and the os page size is 4096, the maximum heap size should
+ // be 512*4096 = 2MB aligned.
+
+ size_t alignment = CardTableRS::ct_max_alignment_constraint();
+
+ if (UseLargePages) {
+ // In presence of large pages we have to make sure that our
+ // alignment is large page aware.
+ alignment = lcm(os::large_page_size(), alignment);
+ }
+
+ return alignment;
+}
+
bool GCArguments::check_args_consistency() {
bool status = true;
if (!FLAG_IS_DEFAULT(AllocateHeapAt) && !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
@@ -79,3 +110,88 @@
}
return status;
}
+
+#ifdef ASSERT
+void GCArguments::assert_flags() {
+ assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes");
+ assert(InitialHeapSize % HeapAlignment == 0, "InitialHeapSize alignment");
+ assert(MaxHeapSize % HeapAlignment == 0, "MaxHeapSize alignment");
+}
+
+void GCArguments::assert_size_info() {
+ assert(MaxHeapSize >= MinHeapSize, "Ergonomics decided on incompatible minimum and maximum heap sizes");
+ assert(InitialHeapSize >= MinHeapSize, "Ergonomics decided on incompatible initial and minimum heap sizes");
+ assert(MaxHeapSize >= InitialHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes");
+ assert(MaxHeapSize % HeapAlignment == 0, "MinHeapSize alignment");
+ assert(InitialHeapSize % HeapAlignment == 0, "InitialHeapSize alignment");
+ assert(MaxHeapSize % HeapAlignment == 0, "MaxHeapSize alignment");
+}
+#endif // ASSERT
+
+void GCArguments::initialize_size_info() {
+ log_debug(gc, heap)("Minimum heap " SIZE_FORMAT " Initial heap " SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
+ MinHeapSize, InitialHeapSize, MaxHeapSize);
+
+ DEBUG_ONLY(assert_size_info();)
+}
+
+void GCArguments::initialize_heap_flags_and_sizes() {
+ assert(SpaceAlignment != 0, "Space alignment not set up properly");
+ assert(HeapAlignment != 0, "Heap alignment not set up properly");
+ assert(HeapAlignment >= SpaceAlignment,
+ "HeapAlignment: " SIZE_FORMAT " less than SpaceAlignment: " SIZE_FORMAT,
+ HeapAlignment, SpaceAlignment);
+ assert(HeapAlignment % SpaceAlignment == 0,
+ "HeapAlignment: " SIZE_FORMAT " not aligned by SpaceAlignment: " SIZE_FORMAT,
+ HeapAlignment, SpaceAlignment);
+
+ if (FLAG_IS_CMDLINE(MaxHeapSize)) {
+ if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
+ vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size");
+ }
+ if (MinHeapSize != 0 && MaxHeapSize < MinHeapSize) {
+ vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
+ }
+ }
+
+ // Check heap parameter properties
+ if (MaxHeapSize < 2 * M) {
+ vm_exit_during_initialization("Too small maximum heap");
+ }
+ if (InitialHeapSize < M) {
+ vm_exit_during_initialization("Too small initial heap");
+ }
+ if (MinHeapSize < M) {
+ vm_exit_during_initialization("Too small minimum heap");
+ }
+
+ // User inputs from -Xmx and -Xms must be aligned
+ MinHeapSize = align_up(MinHeapSize, HeapAlignment);
+ size_t aligned_initial_heap_size = align_up(InitialHeapSize, HeapAlignment);
+ size_t aligned_max_heap_size = align_up(MaxHeapSize, HeapAlignment);
+
+ // Write back to flags if the values changed
+ if (aligned_initial_heap_size != InitialHeapSize) {
+ FLAG_SET_ERGO(size_t, InitialHeapSize, aligned_initial_heap_size);
+ }
+ if (aligned_max_heap_size != MaxHeapSize) {
+ FLAG_SET_ERGO(size_t, MaxHeapSize, aligned_max_heap_size);
+ }
+
+ if (FLAG_IS_CMDLINE(InitialHeapSize) && MinHeapSize != 0 &&
+ InitialHeapSize < MinHeapSize) {
+ vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
+ }
+ if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
+ FLAG_SET_ERGO(size_t, MaxHeapSize, InitialHeapSize);
+ } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
+ FLAG_SET_ERGO(size_t, InitialHeapSize, MaxHeapSize);
+ if (InitialHeapSize < MinHeapSize) {
+ MinHeapSize = InitialHeapSize;
+ }
+ }
+
+ FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_up(MinHeapDeltaBytes, SpaceAlignment));
+
+ DEBUG_ONLY(assert_flags();)
+}
--- a/src/hotspot/share/gc/shared/gcArguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -30,15 +30,30 @@
class CollectedHeap;
+extern size_t MinHeapSize;
+
+extern size_t HeapAlignment;
+extern size_t SpaceAlignment;
+
class GCArguments {
protected:
- template <class Heap, class Policy>
- CollectedHeap* create_heap_with_policy();
+ // Initialize HeapAlignment, SpaceAlignment, and extra alignments (E.g. GenAlignment)
+ virtual void initialize_alignments() = 0;
+ virtual void initialize_heap_flags_and_sizes();
+ virtual void initialize_size_info();
+
+ DEBUG_ONLY(void assert_flags();)
+ DEBUG_ONLY(void assert_size_info();)
public:
virtual void initialize();
virtual size_t conservative_max_heap_alignment() = 0;
virtual CollectedHeap* create_heap() = 0;
+
+ void initialize_heap_sizes();
+
+ static size_t compute_heap_alignment();
+
static bool check_args_consistency();
};
--- a/src/hotspot/share/gc/shared/gcArguments.inline.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "gc/shared/gcArguments.hpp"
-
-class CollectedHeap;
-
-template <class Heap, class Policy>
-CollectedHeap* GCArguments::create_heap_with_policy() {
- Policy* policy = new Policy();
- policy->initialize_all();
- return new Heap(policy);
-}
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "gc/shared/gcConfiguration.hpp"
#include "memory/universe.hpp"
#include "runtime/arguments.hpp"
@@ -131,7 +132,7 @@
}
size_t GCHeapConfiguration::min_size() const {
- return Arguments::min_heap_size();
+ return MinHeapSize;
}
size_t GCHeapConfiguration::initial_size() const {
--- a/src/hotspot/share/gc/shared/gcOverheadChecker.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcOverheadChecker.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -68,7 +68,7 @@
// GC time limit may or may not have been reached. We
// are approaching that condition and so as not to
// throw an out-of-memory before all SoftRef's have been
- // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
+ // cleared, set _should_clear_all_soft_refs in SoftRefPolicy.
// The clearing will be done on the next GC.
bool near_limit = gc_overhead_limit_near();
if (near_limit) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/genArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/genArguments.hpp"
+#include "gc/shared/generation.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/java.hpp"
+#include "utilities/align.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+size_t MinNewSize = 0;
+
+size_t MinOldSize = 0;
+size_t MaxOldSize = 0;
+
+size_t GenAlignment = 0;
+
+size_t GenArguments::conservative_max_heap_alignment() { return (size_t)Generation::GenGrain; }
+
+static size_t young_gen_size_lower_bound() {
+ // The young generation must be aligned and have room for eden + two survivors
+ return align_up(3 * SpaceAlignment, GenAlignment);
+}
+
+static size_t old_gen_size_lower_bound() {
+ return align_up(SpaceAlignment, GenAlignment);
+}
+
+size_t GenArguments::scale_by_NewRatio_aligned(size_t base_size, size_t alignment) {
+ return align_down_bounded(base_size / (NewRatio + 1), alignment);
+}
+
+static size_t bound_minus_alignment(size_t desired_size,
+ size_t maximum_size,
+ size_t alignment) {
+ size_t max_minus = maximum_size - alignment;
+ return desired_size < max_minus ? desired_size : max_minus;
+}
+
+void GenArguments::initialize_alignments() {
+ SpaceAlignment = GenAlignment = (size_t)Generation::GenGrain;
+ HeapAlignment = compute_heap_alignment();
+}
+
+void GenArguments::initialize_heap_flags_and_sizes() {
+ GCArguments::initialize_heap_flags_and_sizes();
+
+ assert(GenAlignment != 0, "Generation alignment not set up properly");
+ assert(HeapAlignment >= GenAlignment,
+ "HeapAlignment: " SIZE_FORMAT " less than GenAlignment: " SIZE_FORMAT,
+ HeapAlignment, GenAlignment);
+ assert(GenAlignment % SpaceAlignment == 0,
+ "GenAlignment: " SIZE_FORMAT " not aligned by SpaceAlignment: " SIZE_FORMAT,
+ GenAlignment, SpaceAlignment);
+ assert(HeapAlignment % GenAlignment == 0,
+ "HeapAlignment: " SIZE_FORMAT " not aligned by GenAlignment: " SIZE_FORMAT,
+ HeapAlignment, GenAlignment);
+
+ // All generational heaps have a young gen; handle those flags here
+
+ // Make sure the heap is large enough for two generations
+ size_t smallest_new_size = young_gen_size_lower_bound();
+ size_t smallest_heap_size = align_up(smallest_new_size + old_gen_size_lower_bound(),
+ HeapAlignment);
+ if (MaxHeapSize < smallest_heap_size) {
+ FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
+ }
+ // If needed, synchronize MinHeapSize size and InitialHeapSize
+ if (MinHeapSize < smallest_heap_size) {
+ MinHeapSize = smallest_heap_size;
+ if (InitialHeapSize < MinHeapSize) {
+ FLAG_SET_ERGO(size_t, InitialHeapSize, smallest_heap_size);
+ }
+ }
+
+ // Make sure NewSize allows an old generation to fit even if set on the command line
+ if (FLAG_IS_CMDLINE(NewSize) && NewSize >= InitialHeapSize) {
+ log_warning(gc, ergo)("NewSize was set larger than initial heap size, will use initial heap size.");
+ FLAG_SET_ERGO(size_t, NewSize, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment));
+ }
+
+ // Now take the actual NewSize into account. We will silently increase NewSize
+ // if the user specified a smaller or unaligned value.
+ size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, GenAlignment);
+ bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, GenAlignment));
+ if (bounded_new_size != NewSize) {
+ FLAG_SET_ERGO(size_t, NewSize, bounded_new_size);
+ }
+ MinNewSize = smallest_new_size;
+
+ if (!FLAG_IS_DEFAULT(MaxNewSize)) {
+ if (MaxNewSize >= MaxHeapSize) {
+ // Make sure there is room for an old generation
+ size_t smaller_max_new_size = MaxHeapSize - GenAlignment;
+ if (FLAG_IS_CMDLINE(MaxNewSize)) {
+ log_warning(gc, ergo)("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
+ "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.",
+ MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
+ }
+ FLAG_SET_ERGO(size_t, MaxNewSize, smaller_max_new_size);
+ if (NewSize > MaxNewSize) {
+ FLAG_SET_ERGO(size_t, NewSize, MaxNewSize);
+ }
+ } else if (MaxNewSize < NewSize) {
+ FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
+ } else if (!is_aligned(MaxNewSize, GenAlignment)) {
+ FLAG_SET_ERGO(size_t, MaxNewSize, align_down(MaxNewSize, GenAlignment));
+ }
+ }
+
+ if (NewSize > MaxNewSize) {
+ // At this point this should only happen if the user specifies a large NewSize and/or
+ // a small (but not too small) MaxNewSize.
+ if (FLAG_IS_CMDLINE(MaxNewSize)) {
+ log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
+ "A new max generation size of " SIZE_FORMAT "k will be used.",
+ NewSize/K, MaxNewSize/K, NewSize/K);
+ }
+ FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
+ }
+
+ if (SurvivorRatio < 1 || NewRatio < 1) {
+ vm_exit_during_initialization("Invalid young gen ratio specified");
+ }
+
+ if (OldSize < old_gen_size_lower_bound()) {
+ FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound());
+ }
+ if (!is_aligned(OldSize, GenAlignment)) {
+ FLAG_SET_ERGO(size_t, OldSize, align_down(OldSize, GenAlignment));
+ }
+
+ if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
+ // NewRatio will be used later to set the young generation size so we use
+ // it to calculate how big the heap should be based on the requested OldSize
+ // and NewRatio.
+ assert(NewRatio > 0, "NewRatio should have been set up earlier");
+ size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
+
+ calculated_heapsize = align_up(calculated_heapsize, HeapAlignment);
+ FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
+ FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
+ }
+
+ // Adjust NewSize and OldSize or MaxHeapSize to match each other
+ if (NewSize + OldSize > MaxHeapSize) {
+ if (FLAG_IS_CMDLINE(MaxHeapSize)) {
+ // Somebody has set a maximum heap size with the intention that we should not
+ // exceed it. Adjust New/OldSize as necessary.
+ size_t calculated_size = NewSize + OldSize;
+ double shrink_factor = (double) MaxHeapSize / calculated_size;
+ size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), GenAlignment);
+ FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
+
+ // OldSize is already aligned because above we aligned MaxHeapSize to
+ // HeapAlignment, and we just made sure that NewSize is aligned to
+ // GenAlignment. In initialize_flags() we verified that HeapAlignment
+ // is a multiple of GenAlignment.
+ FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
+ } else {
+ FLAG_SET_ERGO(size_t, MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment));
+ }
+ }
+
+ // Update NewSize, if possible, to avoid sizing the young gen too small when only
+ // OldSize is set on the command line.
+ if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
+ if (OldSize < InitialHeapSize) {
+ size_t new_size = InitialHeapSize - OldSize;
+ if (new_size >= MinNewSize && new_size <= MaxNewSize) {
+ FLAG_SET_ERGO(size_t, NewSize, new_size);
+ }
+ }
+ }
+
+ always_do_update_barrier = UseConcMarkSweepGC;
+
+ DEBUG_ONLY(assert_flags();)
+}
+
+// Values set on the command line win over any ergonomically
+// set command line parameters.
+// Ergonomic choice of parameters are done before this
+// method is called. Values for command line parameters such as NewSize
+// and MaxNewSize feed those ergonomic choices into this method.
+// This method makes the final generation sizings consistent with
+// themselves and with overall heap sizings.
+// In the absence of explicitly set command line flags, policies
+// such as the use of NewRatio are used to size the generation.
+
+// Minimum sizes of the generations may be different than
+// the initial sizes. An inconsistency is permitted here
+// in the total size that can be specified explicitly by
+// command line specification of OldSize and NewSize and
+// also a command line specification of -Xms. Issue a warning
+// but allow the values to pass.
+void GenArguments::initialize_size_info() {
+ GCArguments::initialize_size_info();
+
+ size_t max_young_size = MaxNewSize;
+
+ // Determine maximum size of the young generation.
+
+ if (FLAG_IS_DEFAULT(MaxNewSize)) {
+ max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, GenAlignment);
+ // Bound the maximum size by NewSize below (since it historically
+ // would have been NewSize and because the NewRatio calculation could
+ // yield a size that is too small) and bound it by MaxNewSize above.
+ // Ergonomics plays here by previously calculating the desired
+ // NewSize and MaxNewSize.
+ max_young_size = MIN2(MAX2(max_young_size, NewSize), MaxNewSize);
+ }
+
+ // Given the maximum young size, determine the initial and
+ // minimum young sizes.
+ size_t initial_young_size = NewSize;
+
+ if (MaxHeapSize == InitialHeapSize) {
+ // The maximum and initial heap sizes are the same so the generation's
+ // initial size must be the same as it maximum size. Use NewSize as the
+ // size if set on command line.
+ max_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : max_young_size;
+ initial_young_size = max_young_size;
+
+ // Also update the minimum size if min == initial == max.
+ if (MaxHeapSize == MinHeapSize) {
+ MinNewSize = max_young_size;
+ }
+ } else {
+ if (FLAG_IS_CMDLINE(NewSize)) {
+ // If NewSize is set on the command line, we should use it as
+ // the initial size, but make sure it is within the heap bounds.
+ initial_young_size =
+ MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment));
+ MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, GenAlignment);
+ } else {
+ // For the case where NewSize is not set on the command line, use
+ // NewRatio to size the initial generation size. Use the current
+ // NewSize as the floor, because if NewRatio is overly large, the resulting
+ // size can be too small.
+ initial_young_size =
+ MIN2(max_young_size, MAX2(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize));
+ }
+ }
+
+ log_trace(gc, heap)("1: Minimum young " SIZE_FORMAT " Initial young " SIZE_FORMAT " Maximum young " SIZE_FORMAT,
+ MinNewSize, initial_young_size, max_young_size);
+
+ // At this point the minimum, initial and maximum sizes
+ // of the overall heap and of the young generation have been determined.
+ // The maximum old size can be determined from the maximum young
+ // and maximum heap size since no explicit flags exist
+ // for setting the old generation maximum.
+ MaxOldSize = MAX2(MaxHeapSize - max_young_size, GenAlignment);
+
+ size_t initial_old_size = OldSize;
+
+ // If no explicit command line flag has been set for the
+ // old generation size, use what is left.
+ if (!FLAG_IS_CMDLINE(OldSize)) {
+ // The user has not specified any value but the ergonomics
+ // may have chosen a value (which may or may not be consistent
+ // with the overall heap size). In either case make
+ // the minimum, maximum and initial sizes consistent
+ // with the young sizes and the overall heap sizes.
+ MinOldSize = GenAlignment;
+ initial_old_size = MIN2(MaxOldSize, MAX2(InitialHeapSize - initial_young_size, MinOldSize));
+ // MaxOldSize has already been made consistent above.
+ } else {
+ // OldSize has been explicitly set on the command line. Use it
+ // for the initial size but make sure the minimum allow a young
+ // generation to fit as well.
+ // If the user has explicitly set an OldSize that is inconsistent
+ // with other command line flags, issue a warning.
+ // The generation minimums and the overall heap minimum should
+ // be within one generation alignment.
+ if (initial_old_size > MaxOldSize) {
+ log_warning(gc, ergo)("Inconsistency between maximum heap size and maximum "
+ "generation sizes: using maximum heap = " SIZE_FORMAT
+ ", -XX:OldSize flag is being ignored",
+ MaxHeapSize);
+ initial_old_size = MaxOldSize;
+ }
+
+ MinOldSize = MIN2(initial_old_size, MinHeapSize - MinNewSize);
+ }
+
+ // The initial generation sizes should match the initial heap size,
+ // if not issue a warning and resize the generations. This behavior
+ // differs from JDK8 where the generation sizes have higher priority
+ // than the initial heap size.
+ if ((initial_old_size + initial_young_size) != InitialHeapSize) {
+ log_warning(gc, ergo)("Inconsistency between generation sizes and heap size, resizing "
+ "the generations to fit the heap.");
+
+ size_t desired_young_size = InitialHeapSize - initial_old_size;
+ if (InitialHeapSize < initial_old_size) {
+ // Old want all memory, use minimum for young and rest for old
+ initial_young_size = MinNewSize;
+ initial_old_size = InitialHeapSize - MinNewSize;
+ } else if (desired_young_size > max_young_size) {
+ // Need to increase both young and old generation
+ initial_young_size = max_young_size;
+ initial_old_size = InitialHeapSize - max_young_size;
+ } else if (desired_young_size < MinNewSize) {
+ // Need to decrease both young and old generation
+ initial_young_size = MinNewSize;
+ initial_old_size = InitialHeapSize - MinNewSize;
+ } else {
+ // The young generation boundaries allow us to only update the
+ // young generation.
+ initial_young_size = desired_young_size;
+ }
+
+ log_trace(gc, heap)("2: Minimum young " SIZE_FORMAT " Initial young " SIZE_FORMAT " Maximum young " SIZE_FORMAT,
+ MinNewSize, initial_young_size, max_young_size);
+ }
+
+ // Write back to flags if necessary.
+ if (NewSize != initial_young_size) {
+ FLAG_SET_ERGO(size_t, NewSize, initial_young_size);
+ }
+
+ if (MaxNewSize != max_young_size) {
+ FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
+ }
+
+ if (OldSize != initial_old_size) {
+ FLAG_SET_ERGO(size_t, OldSize, initial_old_size);
+ }
+
+ log_trace(gc, heap)("Minimum old " SIZE_FORMAT " Initial old " SIZE_FORMAT " Maximum old " SIZE_FORMAT,
+ MinOldSize, OldSize, MaxOldSize);
+
+ DEBUG_ONLY(assert_size_info();)
+}
+
+#ifdef ASSERT
+void GenArguments::assert_flags() {
+ GCArguments::assert_flags();
+ assert(NewSize >= MinNewSize, "Ergonomics decided on a too small young gen size");
+ assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
+ assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
+ assert(NewSize % GenAlignment == 0, "NewSize alignment");
+ assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
+ assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
+ assert(OldSize % GenAlignment == 0, "OldSize alignment");
+}
+
+void GenArguments::assert_size_info() {
+ GCArguments::assert_size_info();
+ // GenArguments::initialize_size_info may update the MaxNewSize
+ assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
+ assert(MinNewSize <= NewSize, "Ergonomics decided on incompatible minimum and initial young gen sizes");
+ assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
+ assert(MinNewSize % GenAlignment == 0, "_min_young_size alignment");
+ assert(NewSize % GenAlignment == 0, "_initial_young_size alignment");
+ assert(MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
+ assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, GenAlignment),
+ "Ergonomics made minimum young generation larger than minimum heap");
+ assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment),
+ "Ergonomics made initial young generation larger than initial heap");
+ assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, GenAlignment),
+ "Ergonomics made maximum young generation lager than maximum heap");
+ assert(MinOldSize <= OldSize, "Ergonomics decided on incompatible minimum and initial old gen sizes");
+ assert(OldSize <= MaxOldSize, "Ergonomics decided on incompatible initial and maximum old gen sizes");
+ assert(MaxOldSize % GenAlignment == 0, "MaxOldSize alignment");
+ assert(OldSize % GenAlignment == 0, "OldSize alignment");
+ assert(MaxHeapSize <= (MaxNewSize + MaxOldSize), "Total maximum heap sizes must be sum of generation maximum sizes");
+ assert(MinNewSize + MinOldSize <= MinHeapSize, "Minimum generation sizes exceed minimum heap size");
+ assert(NewSize + OldSize == InitialHeapSize, "Initial generation sizes should match initial heap size");
+ assert(MaxNewSize + MaxOldSize == MaxHeapSize, "Maximum generation sizes should match maximum heap size");
+}
+#endif // ASSERT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/genArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_GENARGUMENTS_HPP
+#define SHARE_GC_SHARED_GENARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+#include "utilities/debug.hpp"
+
+extern size_t MinNewSize;
+
+extern size_t MinOldSize;
+extern size_t MaxOldSize;
+
+extern size_t GenAlignment;
+
+class GenArguments : public GCArguments {
+ friend class TestGenCollectorPolicy; // Testing
+private:
+ virtual void initialize_alignments();
+ virtual void initialize_size_info();
+
+ // Return the (conservative) maximum heap alignment
+ virtual size_t conservative_max_heap_alignment();
+
+ DEBUG_ONLY(void assert_flags();)
+ DEBUG_ONLY(void assert_size_info();)
+
+ static size_t scale_by_NewRatio_aligned(size_t base_size, size_t alignment);
+
+protected:
+ virtual void initialize_heap_flags_and_sizes();
+};
+
+#endif // SHARE_GC_SHARED_GENARGUMENTS_HPP
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -42,6 +42,7 @@
#include "gc/shared/gcPolicyCounters.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/genArguments.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
@@ -73,21 +74,19 @@
#include "jvmci/jvmci.hpp"
#endif
-GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy,
- Generation::Name young,
+GenCollectedHeap::GenCollectedHeap(Generation::Name young,
Generation::Name old,
const char* policy_counters_name) :
CollectedHeap(),
_young_gen_spec(new GenerationSpec(young,
- policy->initial_young_size(),
- policy->max_young_size(),
- policy->gen_alignment())),
+ NewSize,
+ MaxNewSize,
+ GenAlignment)),
_old_gen_spec(new GenerationSpec(old,
- policy->initial_old_size(),
- policy->max_old_size(),
- policy->gen_alignment())),
+ OldSize,
+ MaxOldSize,
+ GenAlignment)),
_rem_set(NULL),
- _gen_policy(policy),
_soft_ref_gen_policy(),
_gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
_full_collections_completed(0),
@@ -107,9 +106,7 @@
char* heap_address;
ReservedSpace heap_rs;
- size_t heap_alignment = collector_policy()->heap_alignment();
-
- heap_address = allocate(heap_alignment, &heap_rs);
+ heap_address = allocate(HeapAlignment, &heap_rs);
if (!heap_rs.is_reserved()) {
vm_shutdown_during_initialization(
@@ -170,7 +167,7 @@
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
os::trace_page_sizes("Heap",
- collector_policy()->min_heap_byte_size(),
+ MinHeapSize,
total_reserved,
alignment,
heap_rs->base(),
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -26,12 +26,12 @@
#define SHARE_GC_SHARED_GENCOLLECTEDHEAP_HPP
#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/generation.hpp"
#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/softRefGenPolicy.hpp"
class AdaptiveSizePolicy;
+class CardTableRS;
class GCPolicyCounters;
class GenerationSpec;
class StrongRootsScope;
@@ -41,7 +41,6 @@
// A "GenCollectedHeap" is a CollectedHeap that uses generational
// collection. It has two generations, young and old.
class GenCollectedHeap : public CollectedHeap {
- friend class GenCollectorPolicy;
friend class Generation;
friend class DefNewGeneration;
friend class TenuredGeneration;
@@ -75,9 +74,6 @@
// The singleton CardTable Remembered Set.
CardTableRS* _rem_set;
- // The generational collector policy.
- GenCollectorPolicy* _gen_policy;
-
SoftRefGenPolicy _soft_ref_gen_policy;
// The sizing of the heap is controlled by a sizing policy.
@@ -159,8 +155,7 @@
// we absolutely __must__ clear soft refs?
bool must_clear_all_soft_refs();
- GenCollectedHeap(GenCollectorPolicy *policy,
- Generation::Name young,
+ GenCollectedHeap(Generation::Name young,
Generation::Name old,
const char* policy_counters_name);
@@ -186,11 +181,6 @@
GenerationSpec* young_gen_spec() const;
GenerationSpec* old_gen_spec() const;
- // The generational collector policy.
- GenCollectorPolicy* gen_policy() const { return _gen_policy; }
-
- virtual CollectorPolicy* collector_policy() const { return gen_policy(); }
-
virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_gen_policy; }
// Adaptive size policy
@@ -201,11 +191,6 @@
// Performance Counter support
GCPolicyCounters* counters() { return _gc_policy_counters; }
- // Return the (conservative) maximum heap alignment
- static size_t conservative_max_heap_alignment() {
- return Generation::GenGrain;
- }
-
size_t capacity() const;
size_t used() const;
--- a/src/hotspot/share/gc/shared/generation.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/generation.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -33,6 +33,7 @@
#include "gc/shared/genOopClosures.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/generation.hpp"
+#include "gc/shared/generationSpec.hpp"
#include "gc/shared/space.inline.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
--- a/src/hotspot/share/gc/shared/generationSpec.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/generationSpec.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -42,31 +42,18 @@
switch (name()) {
#if INCLUDE_SERIALGC
case Generation::DefNew:
- return new DefNewGeneration(rs, init_size());
+ return new DefNewGeneration(rs, _init_size, _min_size, _max_size);
case Generation::MarkSweepCompact:
- return new TenuredGeneration(rs, init_size(), remset);
+ return new TenuredGeneration(rs, _init_size, _min_size, _max_size, remset);
#endif
#if INCLUDE_CMSGC
case Generation::ParNew:
- return new ParNewGeneration(rs, init_size());
+ return new ParNewGeneration(rs, _init_size, _min_size, _max_size);
case Generation::ConcurrentMarkSweep: {
- assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
- if (remset == NULL) {
- vm_exit_during_initialization("Rem set incompatibility.");
- }
- // Otherwise
- // The constructor creates the CMSCollector if needed,
- // else registers with an existing CMSCollector
-
- ConcurrentMarkSweepGeneration* g = NULL;
- g = new ConcurrentMarkSweepGeneration(rs, init_size(), remset);
-
- g->initialize_performance_counters();
-
- return g;
+ return new ConcurrentMarkSweepGeneration(rs, _init_size, _min_size, _max_size, remset);
}
#endif // INCLUDE_CMSGC
--- a/src/hotspot/share/gc/shared/generationSpec.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/generationSpec.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -37,6 +37,7 @@
private:
Generation::Name _name;
size_t _init_size;
+ size_t _min_size;
size_t _max_size;
public:
@@ -48,12 +49,10 @@
Generation* init(ReservedSpace rs, CardTableRS* remset);
- // Accessors
- Generation::Name name() const { return _name; }
- size_t init_size() const { return _init_size; }
- void set_init_size(size_t size) { _init_size = size; }
- size_t max_size() const { return _max_size; }
- void set_max_size(size_t size) { _max_size = size; }
+ Generation::Name name() const { return _name; }
+ size_t init_size() const { return _init_size; }
+ size_t min_size() const { return _min_size; }
+ size_t max_size() const { return _max_size; }
};
typedef GenerationSpec* GenerationSpecPtr;
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -23,8 +23,9 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/collectorPolicy.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "gc/shared/gcConfig.hpp"
#include "gc/shared/jvmFlagConstraintsGC.hpp"
#include "gc/shared/plab.hpp"
@@ -313,12 +314,12 @@
#if INCLUDE_G1GC
if (UseG1GC) {
- // For G1 GC, we don't know until G1CollectorPolicy is created.
+ // For G1 GC, we don't know until G1CollectedHeap is created.
heap_alignment = MaxSizeForHeapAlignmentG1();
} else
#endif
{
- heap_alignment = CollectorPolicy::compute_heap_alignment();
+ heap_alignment = GCArguments::compute_heap_alignment();
}
return MaxSizeForAlignment(name, value, heap_alignment, verbose);
@@ -422,12 +423,12 @@
JVMFlag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
if (FLAG_IS_CMDLINE(SurvivorRatio) &&
- (value > (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()))) {
+ (value > (MaxHeapSize / SpaceAlignment))) {
JVMFlag::printError(verbose,
"SurvivorRatio (" UINTX_FORMAT ") must be "
"less than or equal to ergonomic SurvivorRatio maximum (" SIZE_FORMAT ")\n",
value,
- (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()));
+ (MaxHeapSize / SpaceAlignment));
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "gc/shared/gcArguments.inline.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "gc/shenandoah/shenandoahArguments.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
@@ -198,6 +198,19 @@
return align;
}
+void ShenandoahArguments::initialize_alignments() {
+ // Need to setup sizes early to get correct alignments.
+ ShenandoahHeapRegion::setup_sizes(InitialHeapSize, MaxHeapSize);
+
+ // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
+ size_t align = ShenandoahHeapRegion::region_size_bytes();
+ if (UseLargePages) {
+ align = MAX2(align, os::large_page_size());
+ }
+ SpaceAlignment = align;
+ HeapAlignment = align;
+}
+
CollectedHeap* ShenandoahArguments::create_heap() {
- return create_heap_with_policy<ShenandoahHeap, ShenandoahCollectorPolicy>();
+ return new ShenandoahHeap(new ShenandoahCollectorPolicy());
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -30,11 +30,11 @@
class CollectedHeap;
class ShenandoahArguments : public GCArguments {
-public:
- virtual void initialize();
+private:
+ virtual void initialize_alignments();
+ virtual void initialize();
virtual size_t conservative_max_heap_alignment();
-
virtual CollectedHeap* create_heap();
};
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -42,24 +42,10 @@
Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT);
- ShenandoahHeapRegion::setup_sizes(initial_heap_byte_size(), max_heap_byte_size());
-
- initialize_all();
-
_tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
}
-void ShenandoahCollectorPolicy::initialize_alignments() {
- // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
- size_t align = ShenandoahHeapRegion::region_size_bytes();
- if (UseLargePages) {
- align = MAX2(align, os::large_page_size());
- }
- _space_alignment = align;
- _heap_alignment = align;
-}
-
void ShenandoahCollectorPolicy::record_explicit_to_concurrent() {
_explicit_concurrent++;
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,12 +24,12 @@
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahTracer.hpp"
+#include "memory/allocation.hpp"
#include "utilities/ostream.hpp"
-class ShenandoahCollectorPolicy: public CollectorPolicy {
+class ShenandoahCollectorPolicy : public CHeapObj<mtGC> {
private:
size_t _success_concurrent_gcs;
size_t _success_degenerated_gcs;
@@ -52,8 +52,6 @@
public:
ShenandoahCollectorPolicy();
- void initialize_alignments();
-
// TODO: This is different from gc_end: that one encompasses one VM operation.
// These two encompass the entire cycle.
void record_cycle_start();
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "memory/allocation.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/memAllocator.hpp"
@@ -66,6 +67,7 @@
#include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
#include "memory/metaspace.hpp"
+#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/vmThread.hpp"
@@ -139,10 +141,10 @@
// Figure out heap sizing
//
- size_t init_byte_size = collector_policy()->initial_heap_byte_size();
- size_t min_byte_size = collector_policy()->min_heap_byte_size();
- size_t max_byte_size = collector_policy()->max_heap_byte_size();
- size_t heap_alignment = collector_policy()->heap_alignment();
+ size_t init_byte_size = InitialHeapSize;
+ size_t min_byte_size = MinHeapSize;
+ size_t max_byte_size = MaxHeapSize;
+ size_t heap_alignment = HeapAlignment;
size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
@@ -1159,10 +1161,6 @@
//assert(false, "Shouldn't need to do full collections");
}
-CollectorPolicy* ShenandoahHeap::collector_policy() const {
- return _shenandoah_policy;
-}
-
HeapWord* ShenandoahHeap::block_start(const void* addr) const {
Space* sp = heap_region_containing(addr);
if (sp != NULL) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -479,7 +479,6 @@
MemoryUsage memory_usage();
GCTracer* tracer();
GCTimer* gc_timer() const;
- CollectorPolicy* collector_policy() const;
// ---------- Reference processing
//
--- a/src/hotspot/share/gc/z/zArguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/z/zArguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -24,12 +24,16 @@
#include "precompiled.hpp"
#include "gc/z/zArguments.hpp"
#include "gc/z/zCollectedHeap.hpp"
-#include "gc/z/zCollectorPolicy.hpp"
#include "gc/z/zWorkers.hpp"
-#include "gc/shared/gcArguments.inline.hpp"
+#include "gc/shared/gcArguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
+void ZArguments::initialize_alignments() {
+ SpaceAlignment = ZGranuleSize;
+ HeapAlignment = SpaceAlignment;
+}
+
void ZArguments::initialize() {
GCArguments::initialize();
@@ -101,5 +105,5 @@
}
CollectedHeap* ZArguments::create_heap() {
- return create_heap_with_policy<ZCollectedHeap, ZCollectorPolicy>();
+ return new ZCollectedHeap();
}
--- a/src/hotspot/share/gc/z/zArguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/z/zArguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -32,7 +32,8 @@
private:
void initialize_platform();
-public:
+ virtual void initialize_alignments();
+
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -40,8 +40,7 @@
return (ZCollectedHeap*)heap;
}
-ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
- _collector_policy(policy),
+ZCollectedHeap::ZCollectedHeap() :
_soft_ref_policy(),
_barrier_set(),
_initialize(&_barrier_set),
@@ -80,10 +79,6 @@
_stat->stop();
}
-CollectorPolicy* ZCollectedHeap::collector_policy() const {
- return _collector_policy;
-}
-
SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
return &_soft_ref_policy;
}
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -27,7 +27,6 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/z/zBarrierSet.hpp"
-#include "gc/z/zCollectorPolicy.hpp"
#include "gc/z/zDirector.hpp"
#include "gc/z/zDriver.hpp"
#include "gc/z/zInitialize.hpp"
@@ -39,7 +38,6 @@
friend class VMStructs;
private:
- ZCollectorPolicy* _collector_policy;
SoftRefPolicy _soft_ref_policy;
ZBarrierSet _barrier_set;
ZInitialize _initialize;
@@ -56,14 +54,13 @@
public:
static ZCollectedHeap* heap();
- ZCollectedHeap(ZCollectorPolicy* policy);
+ ZCollectedHeap();
virtual Name kind() const;
virtual const char* name() const;
virtual jint initialize();
virtual void initialize_serviceability();
virtual void stop();
- virtual CollectorPolicy* collector_policy() const;
virtual SoftRefPolicy* soft_ref_policy();
virtual size_t max_capacity() const;
--- a/src/hotspot/share/gc/z/zCollectorPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/z/zCollectorPolicy.hpp"
-#include "gc/z/zGlobals.hpp"
-
-void ZCollectorPolicy::initialize_alignments() {
- _space_alignment = ZGranuleSize;
- _heap_alignment = _space_alignment;
-}
--- a/src/hotspot/share/gc/z/zCollectorPolicy.hpp Thu May 02 10:38:00 2019 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
-#define SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
-
-#include "gc/shared/collectorPolicy.hpp"
-
-class ZCollectorPolicy : public CollectorPolicy {
-public:
- virtual void initialize_alignments();
-};
-
-#endif // SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
--- a/src/hotspot/share/memory/universe.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/memory/universe.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -663,11 +663,16 @@
initialize_global_behaviours();
+ GCConfig::arguments()->initialize_heap_sizes();
+
jint status = Universe::initialize_heap();
if (status != JNI_OK) {
return status;
}
+ Universe::initialize_compressed_oops();
+ Universe::initialize_tlab();
+
SystemDictionary::initialize_oop_storage();
Metaspace::global_initialize();
@@ -725,9 +730,16 @@
return JNI_OK;
}
-CollectedHeap* Universe::create_heap() {
+jint Universe::initialize_heap() {
assert(_collectedHeap == NULL, "Heap already created");
- return GCConfig::arguments()->create_heap();
+ _collectedHeap = GCConfig::arguments()->create_heap();
+ jint status = _collectedHeap->initialize();
+
+ if (status == JNI_OK) {
+ log_info(gc)("Using %s", _collectedHeap->name());
+ }
+
+ return status;
}
// Choose the heap base address and oop encoding mode
@@ -737,17 +749,7 @@
// ZeroBased - Use zero based compressed oops with encoding when
// NarrowOopHeapBaseMin + heap_size < 32Gb
// HeapBased - Use compressed oops with heap base + encoding.
-
-jint Universe::initialize_heap() {
- _collectedHeap = create_heap();
- jint status = _collectedHeap->initialize();
- if (status != JNI_OK) {
- return status;
- }
- log_info(gc)("Using %s", _collectedHeap->name());
-
- ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
-
+void Universe::initialize_compressed_oops() {
#ifdef _LP64
if (UseCompressedOops) {
// Subtract a page because something can get allocated at heap base.
@@ -787,16 +789,15 @@
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
Universe::narrow_oop_shift() == 0, "invalid value");
#endif
+}
- // We will never reach the CATCH below since Exceptions::_throw will cause
- // the VM to exit if an exception is thrown during initialization
-
+void Universe::initialize_tlab() {
+ ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
if (UseTLAB) {
assert(Universe::heap()->supports_tlab_allocation(),
"Should support thread-local allocation buffers");
ThreadLocalAllocBuffer::startup_initialization();
}
- return JNI_OK;
}
void Universe::print_compressed_oops_mode(outputStream* st) {
--- a/src/hotspot/share/memory/universe.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/memory/universe.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -213,8 +213,9 @@
static size_t _heap_capacity_at_last_gc;
static size_t _heap_used_at_last_gc;
- static CollectedHeap* create_heap();
static jint initialize_heap();
+ static void initialize_compressed_oops();
+ static void initialize_tlab();
static void initialize_basic_type_mirrors(TRAPS);
static void fixup_mirrors(TRAPS);
--- a/src/hotspot/share/prims/whitebox.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -34,6 +34,7 @@
#include "compiler/methodMatcher.hpp"
#include "compiler/directivesParser.hpp"
#include "gc/shared/gcConfig.hpp"
+#include "gc/shared/genArguments.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "memory/heapShared.inline.hpp"
@@ -79,6 +80,7 @@
#include "prims/cdsoffsets.hpp"
#endif // INCLUDE_CDS
#if INCLUDE_G1GC
+#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkThread.hpp"
@@ -222,11 +224,13 @@
WB_END
WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
- CollectorPolicy * p = Universe::heap()->collector_policy();
- tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
- SIZE_FORMAT " Maximum heap " SIZE_FORMAT " Space alignment " SIZE_FORMAT " Heap alignment " SIZE_FORMAT,
- p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
- p->space_alignment(), p->heap_alignment());
+ tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " SIZE_FORMAT " "
+ "Maximum heap " SIZE_FORMAT " Space alignment " SIZE_FORMAT " Heap alignment " SIZE_FORMAT,
+ MinHeapSize,
+ InitialHeapSize,
+ MaxHeapSize,
+ SpaceAlignment,
+ HeapAlignment);
}
WB_END
@@ -381,13 +385,11 @@
WB_END
WB_ENTRY(jlong, WB_GetHeapSpaceAlignment(JNIEnv* env, jobject o))
- size_t alignment = Universe::heap()->collector_policy()->space_alignment();
- return (jlong)alignment;
+ return (jlong)SpaceAlignment;
WB_END
WB_ENTRY(jlong, WB_GetHeapAlignment(JNIEnv* env, jobject o))
- size_t alignment = Universe::heap()->collector_policy()->heap_alignment();
- return (jlong)alignment;
+ return (jlong)HeapAlignment;
WB_END
WB_ENTRY(jboolean, WB_SupportsConcurrentGCPhaseControl(JNIEnv* env, jobject o))
@@ -513,7 +515,7 @@
uint end_region = HeterogeneousHeapRegionManager::manager()->end_index_of_dram();
return (jlong)(g1h->base() + (end_region + 1) * HeapRegion::GrainBytes - 1);
} else {
- return (jlong)g1h->base() + g1h->collector_policy()->max_heap_byte_size();
+ return (jlong)g1h->base() + G1Arguments::heap_reserved_size_bytes();
}
}
#endif // INCLUDE_G1GC
@@ -596,7 +598,7 @@
WB_ENTRY(jlong, WB_PSHeapGenerationAlignment(JNIEnv* env, jobject o))
if (UseParallelGC) {
- return ParallelScavengeHeap::heap()->generation_alignment();
+ return GenAlignment;
}
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_PSHeapGenerationAlignment: Parallel GC is not enabled");
WB_END
--- a/src/hotspot/share/runtime/arguments.cpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/runtime/arguments.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -29,7 +29,6 @@
#include "classfile/moduleEntry.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
-#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/gcConfig.hpp"
#include "logging/log.hpp"
@@ -78,7 +77,6 @@
SystemProperty* Arguments::_system_properties = NULL;
const char* Arguments::_gc_log_filename = NULL;
size_t Arguments::_conservative_max_heap_alignment = 0;
-size_t Arguments::_min_heap_size = 0;
Arguments::Mode Arguments::_mode = _mixed;
bool Arguments::_java_compiler = false;
bool Arguments::_xdebug_mode = false;
@@ -1673,13 +1671,12 @@
void Arguments::set_conservative_max_heap_alignment() {
// The conservative maximum required alignment for the heap is the maximum of
// the alignments imposed by several sources: any requirements from the heap
- // itself, the collector policy and the maximum page size we may run the VM
- // with.
+ // itself and the maximum page size we may run the VM with.
size_t heap_alignment = GCConfig::arguments()->conservative_max_heap_alignment();
_conservative_max_heap_alignment = MAX4(heap_alignment,
(size_t)os::vm_allocation_granularity(),
os::max_page_size(),
- CollectorPolicy::compute_heap_alignment());
+ GCArguments::compute_heap_alignment());
}
jint Arguments::set_ergonomics_flags() {
@@ -1792,7 +1789,7 @@
// If the minimum or initial heap_size have not been set or requested to be set
// ergonomically, set them accordingly.
- if (InitialHeapSize == 0 || min_heap_size() == 0) {
+ if (InitialHeapSize == 0 || MinHeapSize == 0) {
julong reasonable_minimum = (julong)(OldSize + NewSize);
reasonable_minimum = MIN2(reasonable_minimum, (julong)MaxHeapSize);
@@ -1802,7 +1799,7 @@
if (InitialHeapSize == 0) {
julong reasonable_initial = (julong)((phys_mem * InitialRAMPercentage) / 100);
- reasonable_initial = MAX3(reasonable_initial, reasonable_minimum, (julong)min_heap_size());
+ reasonable_initial = MAX3(reasonable_initial, reasonable_minimum, (julong)MinHeapSize);
reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
@@ -1812,9 +1809,9 @@
}
// If the minimum heap size has not been set (via -Xms),
// synchronize with InitialHeapSize to avoid errors with the default value.
- if (min_heap_size() == 0) {
- set_min_heap_size(MIN2((size_t)reasonable_minimum, InitialHeapSize));
- log_trace(gc, heap)(" Minimum heap size " SIZE_FORMAT, min_heap_size());
+ if (MinHeapSize == 0) {
+ MinHeapSize = MIN2((size_t)reasonable_minimum, InitialHeapSize);
+ log_trace(gc, heap)(" Minimum heap size " SIZE_FORMAT, MinHeapSize);
}
}
}
@@ -1856,7 +1853,7 @@
return JNI_EINVAL;
}
// Currently the minimum size and the initial heap sizes are the same.
- set_min_heap_size(initHeapSize);
+ MinHeapSize = initHeapSize;
}
if (FLAG_IS_DEFAULT(NewSize)) {
// Make the young generation 3/8ths of the total heap.
@@ -2556,7 +2553,7 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
- set_min_heap_size((size_t)long_initial_heap_size);
+ MinHeapSize = (size_t)long_initial_heap_size;
// Currently the minimum size and the initial heap sizes are the same.
// Can be overridden with -XX:InitialHeapSize.
if (FLAG_SET_CMDLINE(size_t, InitialHeapSize, (size_t)long_initial_heap_size) != JVMFlag::SUCCESS) {
--- a/src/hotspot/share/runtime/arguments.hpp Thu May 02 10:38:00 2019 +0200
+++ b/src/hotspot/share/runtime/arguments.hpp Mon Apr 15 11:47:46 2019 +0200
@@ -333,8 +333,6 @@
// Value of the conservative maximum heap alignment needed
static size_t _conservative_max_heap_alignment;
- static size_t _min_heap_size;
-
// -Xrun arguments
static AgentLibraryList _libraryList;
static void add_init_library(const char* name, char* options);
@@ -548,10 +546,6 @@
// -Dsun.java.launcher.pid
static int sun_java_launcher_pid() { return _sun_java_launcher_pid; }
- // -Xms
- static size_t min_heap_size() { return _min_heap_size; }
- static void set_min_heap_size(size_t v) { _min_heap_size = v; }
-
// -Xrun
static AgentLibrary* libraries() { return _libraryList.first(); }
static bool init_libraries_at_startup() { return !_libraryList.is_empty(); }
--- a/test/hotspot/gtest/gc/shared/test_collectorPolicy.cpp Thu May 02 10:38:00 2019 +0200
+++ b/test/hotspot/gtest/gc/shared/test_collectorPolicy.cpp Mon Apr 15 11:47:46 2019 +0200
@@ -22,7 +22,7 @@
*/
#include "precompiled.hpp"
-#include "gc/shared/collectorPolicy.hpp"
+#include "gc/serial/serialArguments.hpp"
#include "runtime/arguments.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/globals_extension.hpp"
@@ -54,32 +54,22 @@
BinaryExecutor(size_t val1, size_t val2) : param1(val1), param2(val2) { }
};
- class MinHeapSizeGuard {
- private:
- const size_t _stored_min_heap_size;
- public:
- MinHeapSizeGuard() : _stored_min_heap_size(Arguments::min_heap_size()) { }
- ~MinHeapSizeGuard() {
- Arguments::set_min_heap_size(_stored_min_heap_size);
- }
- };
-
class TestWrapper {
public:
static void test(Executor* setter1, Executor* setter2, Executor* checker) {
+ FLAG_GUARD(MinHeapSize);
FLAG_GUARD(InitialHeapSize);
FLAG_GUARD(MaxHeapSize);
FLAG_GUARD(MaxNewSize);
FLAG_GUARD(MinHeapDeltaBytes);
FLAG_GUARD(NewSize);
FLAG_GUARD(OldSize);
- MinHeapSizeGuard min_heap_size_guard;
+ MinHeapSize = 40 * M;
FLAG_SET_ERGO(size_t, InitialHeapSize, 100 * M);
FLAG_SET_ERGO(size_t, OldSize, 4 * M);
FLAG_SET_ERGO(size_t, NewSize, 1 * M);
FLAG_SET_ERGO(size_t, MaxNewSize, 80 * M);
- Arguments::set_min_heap_size(40 * M);
ASSERT_NO_FATAL_FAILURE(setter1->execute());
@@ -106,27 +96,31 @@
public:
CheckYoungMin(size_t param) : UnaryExecutor(param) { }
void execute() {
- MarkSweepPolicy msp;
- msp.initialize_all();
- ASSERT_LE(msp.min_young_size(), param);
+ SerialArguments sa;
+ sa.initialize_heap_sizes();
+ ASSERT_LE(MinNewSize, param);
}
};
+ static size_t scale_by_NewRatio_aligned(size_t value, size_t alignment) {
+ // Accessible via friend declaration
+ return GenArguments::scale_by_NewRatio_aligned(value, alignment);
+ }
+
class CheckScaledYoungInitial : public Executor {
public:
void execute() {
size_t initial_heap_size = InitialHeapSize;
- MarkSweepPolicy msp;
- msp.initialize_all();
+ SerialArguments sa;
+ sa.initialize_heap_sizes();
if (InitialHeapSize > initial_heap_size) {
- // InitialHeapSize was adapted by msp.initialize_all, e.g. due to alignment
+ // InitialHeapSize was adapted by sa.initialize_heap_sizes, e.g. due to alignment
// caused by 64K page size.
initial_heap_size = InitialHeapSize;
}
- size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size);
- ASSERT_EQ(expected, msp.initial_young_size());
+ size_t expected = scale_by_NewRatio_aligned(initial_heap_size, GenAlignment);
ASSERT_EQ(expected, NewSize);
}
};
@@ -143,10 +137,10 @@
public:
CheckYoungInitial(size_t param) : UnaryExecutor(param) { }
void execute() {
- MarkSweepPolicy msp;
- msp.initialize_all();
+ SerialArguments sa;
+ sa.initialize_heap_sizes();
- ASSERT_EQ(param, msp.initial_young_size());
+ ASSERT_EQ(param, NewSize);
}
};
@@ -162,7 +156,7 @@
public:
SetMaxNewSizeCmd(size_t param1, size_t param2) : BinaryExecutor(param1, param2) { }
void execute() {
- size_t heap_alignment = CollectorPolicy::compute_heap_alignment();
+ size_t heap_alignment = GCArguments::compute_heap_alignment();
size_t new_size_value = align_up(MaxHeapSize, heap_alignment)
- param1 + param2;
FLAG_SET_CMDLINE(size_t, MaxNewSize, new_size_value);
@@ -173,24 +167,24 @@
public:
CheckOldMin(size_t param) : UnaryExecutor(param) { }
void execute() {
- MarkSweepPolicy msp;
- msp.initialize_all();
- ASSERT_LE(msp.min_old_size(), param);
+ SerialArguments sa;
+ sa.initialize_heap_sizes();
+ ASSERT_LE(MinOldSize, param);
}
};
class CheckOldInitial : public Executor {
public:
void execute() {
- size_t heap_alignment = CollectorPolicy::compute_heap_alignment();
+ size_t heap_alignment = GCArguments::compute_heap_alignment();
- MarkSweepPolicy msp;
- msp.initialize_all();
+ SerialArguments sa;
+ sa.initialize_heap_sizes();
size_t expected_old_initial = align_up(InitialHeapSize, heap_alignment)
- MaxNewSize;
- ASSERT_EQ(expected_old_initial, msp.initial_old_size());
+ ASSERT_EQ(expected_old_initial, OldSize);
}
};
@@ -198,17 +192,17 @@
public:
CheckOldInitialMaxNewSize(size_t param1, size_t param2) : BinaryExecutor(param1, param2) { }
void execute() {
- size_t heap_alignment = CollectorPolicy::compute_heap_alignment();
+ size_t heap_alignment = GCArguments::compute_heap_alignment();
size_t new_size_value = align_up(MaxHeapSize, heap_alignment)
- param1 + param2;
- MarkSweepPolicy msp;
- msp.initialize_all();
+ SerialArguments sa;
+ sa.initialize_heap_sizes();
size_t expected_old_initial = align_up(MaxHeapSize, heap_alignment)
- new_size_value;
- ASSERT_EQ(expected_old_initial, msp.initial_old_size());
+ ASSERT_EQ(expected_old_initial, OldSize);
}
};
};