add experimental option to cluster micro clds. stuefe-new-metaspace-branch
authorstuefe
Wed, 25 Sep 2019 12:40:57 +0200
branchstuefe-new-metaspace-branch
changeset 58333 78b2e8f46dd4
parent 58228 6e61beb13680
child 58380 0d77dd9159b1
add experimental option to cluster micro clds.
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/memory/metaspace/chunkManager.cpp
src/hotspot/share/memory/metaspace/chunkManager.hpp
src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp
src/hotspot/share/memory/metaspace/settings.cpp
src/hotspot/share/memory/metaspace/settings.hpp
src/hotspot/share/runtime/globals.hpp
--- a/src/hotspot/share/memory/metaspace.cpp	Thu Sep 19 16:28:40 2019 +0200
+++ b/src/hotspot/share/memory/metaspace.cpp	Wed Sep 25 12:40:57 2019 +0200
@@ -716,6 +716,10 @@
   ChunkManager* cm = new ChunkManager("class space chunk manager", vsl);
   ChunkManager::set_chunkmanager_class(cm);
 
+  if (metaspace::Settings::separate_micro_cld_allocations()) {
+    ChunkManager* cm2 = new ChunkManager("microcld class space chunk manager", vsl);
+    ChunkManager::set_chunkmanager_microclds_class(cm2);
+  }
 }
 
 
@@ -737,17 +741,7 @@
 void Metaspace::ergo_initialize() {
 
   // Must happen before using any setting from Settings::---
-  metaspace::Settings::strategy_t strat = metaspace::Settings::strategy_balanced_reclaim;
-  if (strcmp(MetaspaceReclaimStrategy, "balanced") == 0) {
-    strat = metaspace::Settings::strategy_balanced_reclaim;
-  } else if (strcmp(MetaspaceReclaimStrategy, "aggressive") == 0) {
-    strat = metaspace::Settings::strategy_aggressive_reclaim;
-  } else if (strcmp(MetaspaceReclaimStrategy, "none") == 0) {
-    strat = metaspace::Settings::strategy_no_reclaim;
-  } else {
-    vm_exit_during_initialization("Invalid value for MetaspaceReclaimStrategy: \"%s\".", MetaspaceReclaimStrategy);
-  }
-  metaspace::Settings::initialize(strat);
+  metaspace::Settings::ergo_initialize();
 
   if (DumpSharedSpaces) {
     // Using large pages when dumping the shared archive is currently not implemented.
@@ -840,11 +834,16 @@
   }
 
   // Initialize non-class virtual space list, and its chunk manager:
-  VirtualSpaceList* vsl = new VirtualSpaceList("Non-Class VirtualSpaceList", CommitLimiter::globalLimiter());
+  VirtualSpaceList* vsl = new VirtualSpaceList("non-class virtualspacelist", CommitLimiter::globalLimiter());
   VirtualSpaceList::set_vslist_nonclass(vsl);
-  ChunkManager* cm = new ChunkManager("Non-Class ChunkManager", vsl);
+  ChunkManager* cm = new ChunkManager("non-class chunkmanager", vsl);
   ChunkManager::set_chunkmanager_nonclass(cm);
 
+  if (metaspace::Settings::separate_micro_cld_allocations()) {
+    ChunkManager* cm2 = new ChunkManager("microcld non-class chunk manager", vsl);
+    ChunkManager::set_chunkmanager_microclds_nonclass(cm2);
+  }
+
   _tracer = new MetaspaceTracer();
 
   _initialized = true;
--- a/src/hotspot/share/memory/metaspace/chunkManager.cpp	Thu Sep 19 16:28:40 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/chunkManager.cpp	Wed Sep 25 12:40:57 2019 +0200
@@ -425,6 +425,19 @@
   _chunkmanager_nonclass = cm;
 }
 
+ChunkManager* ChunkManager::_chunkmanager_microclds_class = NULL;
+ChunkManager* ChunkManager::_chunkmanager_microclds_nonclass = NULL;
+
+void ChunkManager::set_chunkmanager_microclds_class(ChunkManager* cm) {
+  assert(_chunkmanager_microclds_class == NULL, "Sanity");
+  _chunkmanager_microclds_class = cm;
+}
+
+void ChunkManager::set_chunkmanager_microclds_nonclass(ChunkManager* cm) {
+  assert(_chunkmanager_microclds_nonclass == NULL, "Sanity");
+  _chunkmanager_microclds_nonclass = cm;
+}
+
 
 // Update statistics.
 void ChunkManager::add_to_statistics(cm_stats_t* out) const {
--- a/src/hotspot/share/memory/metaspace/chunkManager.hpp	Thu Sep 19 16:28:40 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/chunkManager.hpp	Wed Sep 25 12:40:57 2019 +0200
@@ -156,6 +156,21 @@
   static void set_chunkmanager_class(ChunkManager* cm);
   static void set_chunkmanager_nonclass(ChunkManager* cm);
 
+  // If micro clds are kept separate...
+private:
+
+  static ChunkManager* _chunkmanager_microclds_class;
+  static ChunkManager* _chunkmanager_microclds_nonclass;
+
+public:
+
+  static ChunkManager* chunkmanager_microclds_class() { return _chunkmanager_microclds_class; }
+  static ChunkManager* chunkmanager_microclds_nonclass() { return _chunkmanager_microclds_nonclass; }
+
+  static void set_chunkmanager_microclds_class(ChunkManager* cm);
+  static void set_chunkmanager_microclds_nonclass(ChunkManager* cm);
+
+
 };
 
 } // namespace metaspace
--- a/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp	Thu Sep 19 16:28:40 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp	Wed Sep 25 12:40:57 2019 +0200
@@ -42,14 +42,17 @@
 namespace metaspace {
 
 static bool use_class_space(bool is_class) {
-  if (Metaspace::using_class_space()) {
-    if (is_class) {
-      return true;
-    }
+  if (Metaspace::using_class_space() && is_class) {
+    return true;
   }
   return false;
 }
 
+static bool is_micro_cld(MetaspaceType space_type) {
+  return space_type == metaspace::UnsafeAnonymousMetaspaceType ||
+         space_type == metaspace::ReflectionMetaspaceType;
+}
+
 static bool use_class_space(MetadataType mdType) {
   return use_class_space(is_class(mdType));
 }
@@ -60,9 +63,14 @@
   , _non_class_space_manager(NULL)
   , _class_space_manager(NULL)
 {
+  ChunkManager* const non_class_cm =
+      is_micro_cld(_space_type) && Settings::separate_micro_cld_allocations() ?
+          ChunkManager::chunkmanager_microclds_nonclass() :
+          ChunkManager::chunkmanager_nonclass();
+
   // Initialize non-class spacemanager
   _non_class_space_manager = new SpaceManager(
-      ChunkManager::chunkmanager_nonclass(),
+      non_class_cm,
       ChunkAllocSequence::alloc_sequence_by_space_type(space_type, false),
       lock,
       RunningCounters::used_nonclass_counter(),
@@ -70,8 +78,12 @@
 
   // If needed, initialize class spacemanager
   if (Metaspace::using_class_space()) {
+    ChunkManager* const class_cm =
+        is_micro_cld(_space_type) && Settings::separate_micro_cld_allocations() ?
+            ChunkManager::chunkmanager_microclds_class() :
+            ChunkManager::chunkmanager_class();
     _class_space_manager = new SpaceManager(
-        ChunkManager::chunkmanager_class(),
+        class_cm,
         ChunkAllocSequence::alloc_sequence_by_space_type(space_type, true),
         lock,
         RunningCounters::used_class_counter(),
--- a/src/hotspot/share/memory/metaspace/settings.cpp	Thu Sep 19 16:28:40 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/settings.cpp	Wed Sep 25 12:40:57 2019 +0200
@@ -54,10 +54,11 @@
 size_t Settings::_uncommit_on_purge_min_word_size = 0;
 
 
-void Settings::initialize(strategy_t strat) {
+bool Settings::_separate_micro_cld_allocations = false;
 
-  switch (strat) {
-  case strategy_no_reclaim:
+void Settings::ergo_initialize() {
+
+  if (strcmp(MetaspaceReclaimStrategy, "none") == 0) {
 
     log_info(metaspace)("Initialized with strategy: no reclaim.");
 
@@ -75,9 +76,7 @@
     _uncommit_on_purge = false;
     _uncommit_on_purge_min_word_size = 3; // does not matter; should not be used resp. assert when used.
 
-    break;
-
-  case strategy_aggressive_reclaim:
+  } else if (strcmp(MetaspaceReclaimStrategy, "aggressive") == 0) {
 
     log_info(metaspace)("Initialized with strategy: aggressive reclaim.");
 
@@ -100,9 +99,7 @@
     _uncommit_on_purge = true;
     _uncommit_on_purge_min_word_size = _commit_granule_words; // does not matter; should not be used resp. assert when used.
 
-    break;
-
-  case strategy_balanced_reclaim:
+  } else if (strcmp(MetaspaceReclaimStrategy, "balanced") == 0) {
 
     log_info(metaspace)("Initialized with strategy: balanced reclaim.");
 
@@ -123,7 +120,9 @@
     _uncommit_on_purge = true;
     _uncommit_on_purge_min_word_size = _commit_granule_words;
 
-    break;
+  } else {
+
+    vm_exit_during_initialization("Invalid value for MetaspaceReclaimStrategy: \"%s\".", MetaspaceReclaimStrategy);
 
   }
 
@@ -132,6 +131,8 @@
   _enlarge_chunks_in_place = true;
   _enlarge_chunks_in_place_max_word_size = 256 * K;
 
+  // Optionally, we can shepherd micro cld metaspace allocs to an own root chunk.
+  _separate_micro_cld_allocations = MetaspaceSeparateMicroCLDs;
 
   // Sanity checks.
   guarantee(commit_granule_words() <= chklvl::MAX_CHUNK_WORD_SIZE, "Too large granule size");
--- a/src/hotspot/share/memory/metaspace/settings.hpp	Thu Sep 19 16:28:40 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/settings.hpp	Wed Sep 25 12:40:57 2019 +0200
@@ -84,6 +84,10 @@
   // Must be a multiple of and not smaller than commit granularity.
   static size_t _uncommit_on_purge_min_word_size;
 
+  // If true, allocations from micro-clds (CLDs which only load one class, e.g. hidden classes or
+  // reflection loaders) are shepherded into an own root chunk.
+  static bool _separate_micro_cld_allocations;
+
 public:
 
   static size_t commit_granule_bytes()                        { return _commit_granule_bytes; }
@@ -99,25 +103,9 @@
   static bool delete_nodes_on_purge()                         { return _delete_nodes_on_purge; }
   static bool uncommit_on_purge()                             { return _uncommit_on_purge; }
   static size_t uncommit_on_purge_min_word_size()             { return _uncommit_on_purge_min_word_size; }
-
-  // Describes a group of settings
-  enum strategy_t {
-
-    // Do not uncommit chunks. New chunks are completely committed thru from the start.
-    strategy_no_reclaim,
+  static bool separate_micro_cld_allocations()                { return _separate_micro_cld_allocations; }
 
-    // Uncommit very aggressively.
-    // - a rather small granule size of 16K
-    // - New chunks are committed for one granule size
-    // - returned chunks are uncommitted whenever possible
-    strategy_aggressive_reclaim,
-
-    // Uncommit, but try to strike a balance with CPU load
-    strategy_balanced_reclaim
-
-  };
-
-  static void initialize(strategy_t theme);
+  static void ergo_initialize();
 
   static void print_on(outputStream* st);
 
--- a/src/hotspot/share/runtime/globals.hpp	Thu Sep 19 16:28:40 2019 +0200
+++ b/src/hotspot/share/runtime/globals.hpp	Wed Sep 25 12:40:57 2019 +0200
@@ -1616,6 +1616,9 @@
   product(ccstr, MetaspaceReclaimStrategy, "balanced",                      \
           "options: balanced, aggressive, none")                            \
                                                                             \
+  product(bool, MetaspaceSeparateMicroCLDs, false,                          \
+          "Micro CLDs are separated.")                                      \
+                                                                            \
   manageable(uintx, MinHeapFreeRatio, 40,                                   \
           "The minimum percentage of heap free after GC to avoid expansion."\
           " For most GCs this applies to the old generation. In G1 and"     \