--- a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp Thu May 03 10:02:32 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp Thu May 03 11:59:58 2018 +0200
@@ -27,9 +27,17 @@
#include "gc/shared/c1/barrierSetC1.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/epsilon/epsilonBarrierSet.hpp"
+#include "gc/epsilon/epsilonThreadLocalData.hpp"
-// TODO: Merge it back to header, once JVMCI name clash via barrierSetC1.hpp is resolved
EpsilonBarrierSet::EpsilonBarrierSet() : BarrierSet(
make_barrier_set_assembler<BarrierSetAssembler>(),
make_barrier_set_c1<BarrierSetC1>(),
BarrierSet::FakeRtti(BarrierSet::Epsilon)) {};
+
+void EpsilonBarrierSet::on_thread_create(Thread *thread) {
+ EpsilonThreadLocalData::create(thread);
+}
+
+void EpsilonBarrierSet::on_thread_destroy(Thread *thread) {
+ EpsilonThreadLocalData::destroy(thread);
+}
--- a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp Thu May 03 10:02:32 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp Thu May 03 11:59:58 2018 +0200
@@ -26,7 +26,6 @@
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
-//#include "gc/shared/c1/barrierSetC1.hpp"
#include "gc/shared/barrierSet.hpp"
// No interaction with application is required for Epsilon, and therefore
@@ -39,6 +38,9 @@
virtual void print_on(outputStream *st) const {}
+ virtual void on_thread_create(Thread* thread);
+ virtual void on_thread_destroy(Thread* thread);
+
template <DecoratorSet decorators, typename BarrierSetT = EpsilonBarrierSet>
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {};
};
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Thu May 03 10:02:32 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Thu May 03 11:59:58 2018 +0200
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonMemoryPool.hpp"
+#include "gc/epsilon/epsilonThreadLocalData.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
@@ -62,9 +63,9 @@
log_info(gc)("Initialized with " SIZE_FORMAT "M non-resizeable heap", init_byte_size / M);
}
if (UseTLAB) {
- log_info(gc)("Using TLAB allocation; min: " SIZE_FORMAT "K, max: " SIZE_FORMAT "K",
- ThreadLocalAllocBuffer::min_size() * HeapWordSize / K,
- _max_tlab_size*HeapWordSize / K);
+ log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K, elasticity %.2f",
+ _max_tlab_size * HeapWordSize / K,
+ EpsilonTLABElasticity);
} else {
log_info(gc)("Not using TLAB allocation");
}
@@ -94,23 +95,9 @@
}
size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
- // This is the only way we can control TLAB sizes without having safepoints.
- // Implement exponential expansion within [MinTLABSize; _max_tlab_size], based
- // on previously "used" TLAB size.
-
- size_t size = MIN2(_max_tlab_size * HeapWordSize, MAX2(MinTLABSize, thr->tlab().used() * HeapWordSize * 2));
-
- if (log_is_enabled(Trace, gc)) {
- ResourceMark rm;
- log_trace(gc)(
- "Selecting TLAB size for \"%s\" (Desired: " SIZE_FORMAT "K, Used: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
- Thread::current()->name(),
- thr->tlab().desired_size() * HeapWordSize / K,
- thr->tlab().used() * HeapWordSize / K,
- size / K);
- }
-
- return size;
+ // Return max allocatable TLAB size, and let allocation path figure out
+ // the actual TLAB allocation size.
+ return _max_tlab_size;
}
EpsilonHeap* EpsilonHeap::heap() {
@@ -164,10 +151,46 @@
}
HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
- size_t requested_size,
- size_t* actual_size) {
- // TODO: Handle TLAB sizing here
- return allocate_work(requested_size);
+ size_t requested_size,
+ size_t* actual_size) {
+ size_t ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(Thread::current());
+
+ bool fits = (requested_size <= ergo_tlab);
+
+ // If we can fit the allocation under current TLAB size, do so.
+ // Otherwise, we want to elastically increase the TLAB size.
+ size_t size = fits ? requested_size : (size_t)(ergo_tlab * EpsilonTLABElasticity);
+
+ // Honor boundaries
+ size = MAX2(min_size, MIN2(_max_tlab_size, size));
+
+ if (log_is_enabled(Trace, gc)) {
+ ResourceMark rm;
+ log_trace(gc)(
+ "TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
+ "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
+ Thread::current()->name(),
+ requested_size * HeapWordSize / K,
+ min_size * HeapWordSize / K,
+ _max_tlab_size * HeapWordSize / K,
+ ergo_tlab * HeapWordSize / K,
+ size * HeapWordSize / K);
+ }
+
+ HeapWord* res = allocate_work(size);
+ if (res != NULL) {
+ *actual_size = size;
+
+ // Allocation successful, this our new TLAB size, if we requested expansion
+ if (!fits) {
+ EpsilonThreadLocalData::set_ergo_tlab_size(Thread::current(), size);
+ }
+ } else {
+ // Allocation failed, reset ergonomics to try an fit smaller TLABs
+ EpsilonThreadLocalData::set_ergo_tlab_size(Thread::current(), 0);
+ }
+
+ return res;
}
HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonThreadLocalData.hpp Thu May 03 11:59:58 2018 +0200
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
+#define SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
+
+class EpsilonThreadLocalData {
+private:
+ size_t _ergo_tlab_size;
+
+ EpsilonThreadLocalData() :
+ _ergo_tlab_size(0) {}
+
+ static EpsilonThreadLocalData* data(Thread* thread) {
+ assert(UseEpsilonGC, "Sanity");
+ return thread->gc_data<EpsilonThreadLocalData>();
+ }
+
+public:
+ static void create(Thread* thread) {
+ new (data(thread)) EpsilonThreadLocalData();
+ }
+
+ static void destroy(Thread* thread) {
+ data(thread)->~EpsilonThreadLocalData();
+ }
+
+ static size_t ergo_tlab_size(Thread *thread) {
+ return data(thread)->_ergo_tlab_size;
+ }
+
+ static void set_ergo_tlab_size(Thread *thread, size_t val) {
+ data(thread)->_ergo_tlab_size = val;
+ }
+};
+
+#endif // SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
--- a/src/hotspot/share/gc/epsilon/epsilon_globals.hpp Thu May 03 10:02:32 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilon_globals.hpp Thu May 03 11:59:58 2018 +0200
@@ -62,6 +62,13 @@
"asks TLAB machinery to cap TLAB sizes at this value.") \
range(1, max_intx) \
\
+ experimental(double, EpsilonTLABElasticity, 1.1, \
+ "Multiplier to use when deciding on next TLAB size. Larger value "\
+ "improves performance at the expense of per-thread memory waste." \
+ "Lower value improves memory footprint, especially for rarely " \
+ "allocating threads.") \
+ range(1, max_intx) \
+ \
experimental(size_t, EpsilonMinHeapExpand, 128 * M, \
"Min expansion step for heap. Larger value improves performance " \
"at the potential expense of memory waste.") \