Micro loaders shall not retire chunks if the costs for retiring chunks outqweights the saved space stuefe-new-metaspace-branch
authorstuefe
Fri, 01 Nov 2019 14:18:40 +0100
branchstuefe-new-metaspace-branch
changeset 58883 08102295011d
parent 58882 58b20be7bc04
child 58948 18659e040c64
Micro loaders shall not retire chunks if the costs for retiring chunks outqweights the saved space
src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp
src/hotspot/share/memory/metaspace/spaceManager.cpp
src/hotspot/share/memory/metaspace/spaceManager.hpp
test/hotspot/gtest/metaspace/test_spacemanager.cpp
--- a/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp	Fri Nov 01 13:02:09 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp	Fri Nov 01 14:18:40 2019 +0100
@@ -67,7 +67,8 @@
       ChunkAllocSequence::alloc_sequence_by_space_type(space_type, false),
       lock,
       RunningCounters::used_nonclass_counter(),
-      "non-class sm");
+      "non-class sm",
+      is_micro());
 
   // If needed, initialize class spacemanager
   if (Metaspace::using_class_space()) {
@@ -78,7 +79,8 @@
         ChunkAllocSequence::alloc_sequence_by_space_type(space_type, true),
         lock,
         RunningCounters::used_class_counter(),
-        "class sm");
+        "class sm",
+        is_micro());
   }
 
 #ifdef ASSERT
--- a/src/hotspot/share/memory/metaspace/spaceManager.cpp	Fri Nov 01 13:02:09 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/spaceManager.cpp	Fri Nov 01 14:18:40 2019 +0100
@@ -103,7 +103,8 @@
 
   // If we have a current chunk, it should have been retired (almost empty) beforehand.
   // See: retire_current_chunk().
-  assert(current_chunk() == NULL || current_chunk()->free_below_committed_words() <= 10, "Must retire chunk beforehand");
+  assert(current_chunk() == NULL ||
+         (_is_micro_loader || current_chunk()->free_below_committed_words() <= 10), "Must retire chunk beforehand");
 
   const chklvl_t min_level = chklvl::level_fitting_word_size(requested_word_size);
   chklvl_t pref_level = _chunk_alloc_sequence->get_next_chunk_level(_chunks.size());
@@ -159,14 +160,16 @@
              const ChunkAllocSequence* alloc_sequence,
              Mutex* lock,
              SizeAtomicCounter* total_used_words_counter,
-             const char* name)
+             const char* name,
+             bool is_micro_loader)
 : _lock(lock),
   _chunk_manager(chunk_manager),
   _chunk_alloc_sequence(alloc_sequence),
   _chunks(),
   _block_freelist(NULL),
   _total_used_words_counter(total_used_words_counter),
-  _name(name)
+  _name(name),
+  _is_micro_loader(is_micro_loader)
 {
 }
 
@@ -207,7 +210,18 @@
 
   size_t raw_remaining_words = c->free_below_committed_words();
   size_t net_remaining_words = get_net_allocation_word_size(raw_remaining_words);
-  if (net_remaining_words > 0) {
+
+  // Note: Micro class loaders (lambdas, reflection) are typically the vast majority of loaders. They
+  //  will typically only once - if at all - ever retire a chunk, and the remaining size is typically
+  //  very small.
+  // That means that the structure needed to manage this left over space will not see much action. However,
+  //  that structure is expensive as well (pointer lists) and therefore we only should generate it if the
+  //  benefit of managing free space out-weights the costs for that structure.
+  // Non-micro loaders may continue loading, deallocating and retiring more chunks, so the cost of that
+  //  structure may amortize over time. But micro loaders probably never will.
+  const size_t dont_bother_below_word_size = _is_micro_loader ? 64 : 0;
+
+  if (net_remaining_words > dont_bother_below_word_size) {
 
     log_debug(metaspace)(LOGFMT_SPCMGR " @" PTR_FORMAT " : retiring chunk " METACHUNK_FULL_FORMAT ".",
                          LOGFMT_SPCMGR_ARGS, p2i(this), METACHUNK_FULL_FORMAT_ARGS(c));
@@ -225,6 +239,7 @@
 
     DEBUG_ONLY(verify_locked();)
 
+    DEBUG_ONLY(InternalStats::inc_num_chunks_retired();)
   }
 
 }
@@ -334,8 +349,6 @@
     // but may still have free committed words.
     retire_current_chunk();
 
-    DEBUG_ONLY(InternalStats::inc_num_chunks_retired();)
-
     // Allocate a new chunk.
     if (allocate_new_current_chunk(raw_word_size) == false) {
       did_hit_limit = true;
--- a/src/hotspot/share/memory/metaspace/spaceManager.hpp	Fri Nov 01 13:02:09 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/spaceManager.hpp	Fri Nov 01 14:18:40 2019 +0100
@@ -78,6 +78,9 @@
 
   const char* const _name;
 
+  // Whether or not this is a "micro loader" which is not expected to load more than one class.
+  const bool _is_micro_loader;
+
   Mutex* lock() const                           { return _lock; }
   ChunkManager* chunk_manager() const           { return _chunk_manager; }
   const ChunkAllocSequence* chunk_alloc_sequence() const    { return _chunk_alloc_sequence; }
@@ -115,7 +118,8 @@
                const ChunkAllocSequence* alloc_sequence,
                Mutex* lock,
                SizeAtomicCounter* total_used_words_counter,
-               const char* name);
+               const char* name,
+               bool is_micro_loader);
 
   ~SpaceManager();
 
--- a/test/hotspot/gtest/metaspace/test_spacemanager.cpp	Fri Nov 01 13:02:09 2019 +0100
+++ b/test/hotspot/gtest/metaspace/test_spacemanager.cpp	Fri Nov 01 14:18:40 2019 +0100
@@ -95,7 +95,7 @@
         // Pull lock during space creation, since this is what happens in the VM too
         // (see ClassLoaderData::metaspace_non_null(), which we mimick here).
         MutexLocker ml(_lock,  Mutex::_no_safepoint_check_flag);
-        _sm = new SpaceManager(cm, alloc_sequence, _lock, &_used_counter, "gtest-SpaceManagerTestBed-sm");
+        _sm = new SpaceManager(cm, alloc_sequence, _lock, &_used_counter, "gtest-SpaceManagerTestBed-sm", false);
       }
     }