--- a/src/hotspot/share/memory/metaspace/chunkAllocSequence.cpp Thu Oct 17 16:39:45 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/chunkAllocSequence.cpp Mon Oct 21 16:43:06 2019 +0200
@@ -52,7 +52,7 @@
// Caller shall repeat last allocation
return _entries[_num_entries - 1];
}
- return _entries[_num_entries];
+ return _entries[num_allocated];
}
};
@@ -61,13 +61,13 @@
///////////////////////////
// chunk allocation sequences for normal loaders:
-static const chklvl_t g_sequ_standard_nonclass[] = {
+static const chklvl_t g_sequ_standard_non_class[] = {
chklvl::CHUNK_LEVEL_4K,
chklvl::CHUNK_LEVEL_4K,
chklvl::CHUNK_LEVEL_4K,
chklvl::CHUNK_LEVEL_4K,
chklvl::CHUNK_LEVEL_64K
- -1 // .. repeat last
+ // .. repeat last
};
static const chklvl_t g_sequ_standard_class[] = {
@@ -75,20 +75,20 @@
chklvl::CHUNK_LEVEL_2K,
chklvl::CHUNK_LEVEL_2K,
chklvl::CHUNK_LEVEL_2K,
- chklvl::CHUNK_LEVEL_32K,
- -1 // .. repeat last
+ chklvl::CHUNK_LEVEL_32K
+ // .. repeat last
};
///////////////////////////
// chunk allocation sequences for reflection/anonymous loaders:
// We allocate four smallish chunks before progressing to bigger chunks.
-static const chklvl_t g_sequ_anon_nonclass[] = {
+static const chklvl_t g_sequ_anon_non_class[] = {
chklvl::CHUNK_LEVEL_1K,
chklvl::CHUNK_LEVEL_1K,
chklvl::CHUNK_LEVEL_1K,
chklvl::CHUNK_LEVEL_1K,
- chklvl::CHUNK_LEVEL_4K,
- -1 // .. repeat last
+ chklvl::CHUNK_LEVEL_2K
+ // .. repeat last
};
static const chklvl_t g_sequ_anon_class[] = {
@@ -96,16 +96,16 @@
chklvl::CHUNK_LEVEL_1K,
chklvl::CHUNK_LEVEL_1K,
chklvl::CHUNK_LEVEL_1K,
- chklvl::CHUNK_LEVEL_4K,
- -1 // .. repeat last
+ chklvl::CHUNK_LEVEL_2K
+ // .. repeat last
};
#define DEFINE_CLASS_FOR_ARRAY(what) \
- static ConstantChunkAllocSequence g_chunk_alloc_sequence_##what (g_sequ_##what, sizeof(g_sequ_##what)/sizeof(int));
+ static ConstantChunkAllocSequence g_chunk_alloc_sequence_##what (g_sequ_##what, sizeof(g_sequ_##what)/sizeof(chklvl_t));
-DEFINE_CLASS_FOR_ARRAY(standard_nonclass)
+DEFINE_CLASS_FOR_ARRAY(standard_non_class)
DEFINE_CLASS_FOR_ARRAY(standard_class)
-DEFINE_CLASS_FOR_ARRAY(anon_nonclass)
+DEFINE_CLASS_FOR_ARRAY(anon_non_class)
DEFINE_CLASS_FOR_ARRAY(anon_class)
@@ -171,10 +171,10 @@
}
} else {
switch(space_type) {
- case StandardMetaspaceType: return &g_chunk_alloc_sequence_standard_class;
+ case StandardMetaspaceType: return &g_chunk_alloc_sequence_standard_non_class;
case ReflectionMetaspaceType:
- case UnsafeAnonymousMetaspaceType: return &g_chunk_alloc_sequence_anon_class;
- case BootMetaspaceType: return &g_chunk_alloc_sequence_boot_class;
+ case UnsafeAnonymousMetaspaceType: return &g_chunk_alloc_sequence_anon_non_class;
+ case BootMetaspaceType: return &g_chunk_alloc_sequence_boot_non_class;
default: ShouldNotReachHere();
}
}
--- a/src/hotspot/share/memory/metaspace/chunkManager.cpp Thu Oct 17 16:39:45 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/chunkManager.cpp Mon Oct 21 16:43:06 2019 +0200
@@ -166,30 +166,12 @@
// 1) Attempt to find a free chunk of exactly the pref_level level
c = remove_first_chunk_at_level(pref_level);
- // Todo:
- //
- // We need to meditate about steps (2) and (3) a bit more.
- // By simply preferring to reuse small chunks vs splitting larger chunks we may emphasize
- // fragmentation, strangely enough, if callers wanting medium sized chunks take small chunks
- // instead, and taking them away from the many users which prefer small chunks.
- // Alternatives:
- // - alternating between (2) (3) and (3) (2)
- // - mixing (2) and (3) into one loop with a growing delta, and maybe a "hesitance" barrier function
- // - keeping track of chunk demand and adding this into the equation: if e.g. 4K chunks were heavily
- // preferred in the past, maybe for callers wanting larger chunks leave those alone.
- // - Take into account which chunks are committed? This would require some extra bookkeeping...
-
- // 2) Failing that, attempt to find a chunk smaller or equal the minimal level.
- if (c == NULL) {
- for (chklvl_t lvl = pref_level + 1; lvl <= max_level; lvl ++) {
- c = remove_first_chunk_at_level(lvl);
- if (c != NULL) {
- break;
- }
- }
+ // 2) Failing that, we are also willing to accept a chunk half that size, but nothing less for now...
+ if (c == NULL && pref_level < max_level) {
+ c = remove_first_chunk_at_level(pref_level + 1);
}
- // 3) Failing that, attempt to find a free chunk of larger size and split it.
+ // 3) Failing that, attempt to find a free chunk of larger size and split it to get the ideal size...
if (c == NULL) {
for (chklvl_t lvl = pref_level - 1; lvl >= chklvl::ROOT_CHUNK_LEVEL; lvl --) {
c = remove_first_chunk_at_level(lvl);
@@ -201,7 +183,18 @@
}
}
- // 4) Failing that, attempt to allocate a new chunk from the connected virtual space.
+ // 4) Failing that, before we start allocating a new root chunk, lets really scrape the barrel. Any
+ // smaller chunk is acceptable provided it fits the minimal size....
+ if (c == NULL) {
+ for (chklvl_t lvl = pref_level + 1; lvl <= max_level; lvl ++) {
+ c = remove_first_chunk_at_level(lvl);
+ if (c != NULL) {
+ break;
+ }
+ }
+ }
+
+ // 4) Failing that, attempt to allocate a new root chunk from the connected virtual space.
if (c == NULL) {
// Tracing
--- a/src/hotspot/share/memory/metaspace/spaceManager.cpp Thu Oct 17 16:39:45 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/spaceManager.cpp Mon Oct 21 16:43:06 2019 +0200
@@ -108,6 +108,10 @@
pref_level = min_level;
}
+ log_trace(metaspace)("SpaceManager %s: requested word size_ " SIZE_FORMAT ", num chunks so far: %d, preferred level: "
+ CHKLVL_FORMAT ", min level: " CHKLVL_FORMAT ".",
+ _name, requested_word_size, _chunks.size(), pref_level, min_level);
+
Metachunk* c = _chunk_manager->get_chunk(min_level, pref_level);
if (c == NULL) {
log_debug(metaspace)("SpaceManager %s: failed to allocate new chunk for requested word size " SIZE_FORMAT ".",
--- a/test/hotspot/gtest/metaspace/metaspaceTestsCommon.hpp Thu Oct 17 16:39:45 2019 +0200
+++ b/test/hotspot/gtest/metaspace/metaspaceTestsCommon.hpp Mon Oct 21 16:43:06 2019 +0200
@@ -26,16 +26,20 @@
#include "memory/allocation.hpp"
+#include "memory/metaspace/chunkAllocSequence.hpp"
#include "memory/metaspace/chunkHeaderPool.hpp"
#include "memory/metaspace/chunkLevel.hpp"
#include "memory/metaspace/chunkManager.hpp"
#include "memory/metaspace/counter.hpp"
#include "memory/metaspace/commitLimiter.hpp"
+#include "memory/metaspace/commitMask.hpp"
#include "memory/metaspace/metachunk.hpp"
#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
#include "memory/metaspace/metaspaceStatistics.hpp"
#include "memory/metaspace/virtualSpaceList.hpp"
#include "memory/metaspace/spaceManager.hpp"
+#include "memory/metaspace/settings.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"