LeftOverBins as an optional replacement of free block dictionary which is rather ineffective stuefe-new-metaspace-branch
authorstuefe
Fri, 01 Nov 2019 10:28:15 +0100
branchstuefe-new-metaspace-branch
changeset 59155 b537e6386306
parent 59138 714474295e0a
child 59238 6ce12ce00d3e
LeftOverBins as an optional replacement of free block dictionary which is rather ineffective
src/hotspot/share/memory/metaspace/internStat.hpp
src/hotspot/share/memory/metaspace/leftOverBins.cpp
src/hotspot/share/memory/metaspace/leftOverBins.hpp
src/hotspot/share/memory/metaspace/leftOverBins.inline.hpp
src/hotspot/share/memory/metaspace/settings.cpp
src/hotspot/share/memory/metaspace/settings.hpp
src/hotspot/share/memory/metaspace/spaceManager.cpp
src/hotspot/share/memory/metaspace/spaceManager.hpp
src/hotspot/share/runtime/globals.hpp
test/hotspot/gtest/metaspace/metaspaceTestsCommon.cpp
test/hotspot/gtest/metaspace/metaspaceTestsCommon.hpp
test/hotspot/gtest/metaspace/test_leftOverBins.cpp
test/hotspot/gtest/metaspace/test_spacemanager.cpp
--- a/src/hotspot/share/memory/metaspace/internStat.hpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/internStat.hpp	Fri Nov 01 10:28:15 2019 +0100
@@ -46,7 +46,7 @@
   /* Number of allocations. */                      \
   x_atomic(num_allocs)                              \
                                                     \
-  /* Number of deallocations */                     \
+  /* Number of deallocations (external) */          \
   x_atomic(num_deallocs)                            \
   /* Number of times an allocation was satisfied */ \
   /*  from deallocated blocks. */                   \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/leftOverBins.cpp	Fri Nov 01 10:28:15 2019 +0100
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/metaspace/leftOverBins.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+
+namespace metaspace {
+
+
+
+#ifdef ASSERT
+void LeftOverManager::verify() const {
+  _very_small_bins.verify();
+
+  if (_large_block_reserve != NULL) {
+    assert(_current != NULL, "Sanity");
+  }
+
+  assert( (_current == NULL && _current_size == 0) ||
+          (_current != NULL && _current_size > 0), "Sanity");
+
+  for (block_t* b = _large_block_reserve; b != NULL; b = b->next) {
+    assert(b->size > 0 && b->size <= 4 * M, "Weird block size");
+  }
+
+}
+#endif
+
+void LeftOverManager::large_block_statistics(block_stats_t* stats) const {
+  for (block_t* b = _large_block_reserve; b != NULL; b = b->next) {
+    stats->num_blocks ++;
+    stats->word_size += b->size;
+  }
+}
+
+void LeftOverManager::statistics(block_stats_t* stats) const {
+  stats->num_blocks = 0;
+  stats->word_size = 0;
+  _very_small_bins.statistics(stats);
+  if (_current != NULL) {
+    stats->num_blocks ++;
+    stats->word_size += _current_size;
+    large_block_statistics(stats);
+  } else {
+    assert(_large_block_reserve == NULL, "Sanity");
+  }
+}
+
+void LeftOverManager::print(outputStream* st, bool detailed) const {
+
+  block_stats_t s;
+
+  if (_current != NULL) {
+    st->print("current: " SIZE_FORMAT " words; ", _current_size);
+  }
+
+  s.num_blocks = 0; s.word_size = 0;
+  large_block_statistics(&s);
+  st->print("large blocks: %d blocks, " SIZE_FORMAT " words", s.num_blocks, s.word_size);
+  if (detailed) {
+    st->print(" (");
+    for (block_t* b = _large_block_reserve; b != NULL; b = b->next) {
+      st->print(SIZE_FORMAT "%s", b->size, b->next != NULL ? ", " : "");
+    }
+    st->print(")");
+  }
+  st->print("; ");
+
+  s.num_blocks = 0; s.word_size = 0;
+  _very_small_bins.statistics(&s);
+  st->print("small blocks: %d blocks, " SIZE_FORMAT " words", s.num_blocks, s.word_size);
+  if (detailed) {
+    st->print(" (");
+    _very_small_bins.print(st);
+    st->print(")");
+  }
+  st->print("; ");
+}
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/leftOverBins.hpp	Fri Nov 01 10:28:15 2019 +0100
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_LEFTOVERBINS_HPP
+#define SHARE_MEMORY_METASPACE_LEFTOVERBINS_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/metaspace/counter.hpp"
+#include "utilities/bitMap.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+
+class outputStream;
+
+namespace metaspace {
+
+// The LeftOverManager is responsible for managing small leftover-
+// and deallocated blocks.
+// They come from two sources:
+// a) the leftover space left in a chunk when a chunk gets retired
+//    because it cannot serve a requested allocation. These blocks
+//    can be largeish (100s - 1000s of words).
+// b) when a metaspace allocation is deallocated prematurely - e.g.
+//    due to interrupted class loading. These blocks are small or
+//    very small.
+
+class BinMap {
+
+  typedef uint32_t mask_type;
+  mask_type _mask;
+
+  static mask_type mask_for_pos(int pos) { return 1 << pos; }
+
+public:
+
+  BinMap() : _mask(0) {}
+
+  bool all_zero() const          { return _mask == 0; }
+
+  bool get_bit(int pos) const    { return (_mask & mask_for_pos(pos)) != 0 ? true : false; }
+  void set_bit(int pos)          { _mask |= mask_for_pos(pos); }
+  void clr_bit(int pos)          { _mask &= ~mask_for_pos(pos); }
+
+  // Starting at (including) pos, find the position of the next 1 bit.
+  // Return -1 if not found.
+  inline int find_next_set_bit(int pos) const;
+
+  static int size() { return sizeof(mask_type) * 8; }
+
+};
+
+struct block_t {
+  block_t* next;
+  size_t size;
+};
+
+struct block_stats_t {
+  size_t word_size;
+  int num_blocks;
+};
+
+template <
+  size_t min_word_size,
+  size_t spread,
+  int num_bins
+>
+class Bins {
+
+  STATIC_ASSERT(sizeof(block_t) <= (min_word_size * BytesPerWord));
+
+  block_t* _bins[num_bins];
+
+  BinMap _mask;
+
+  // e.g. spread = 4
+  //
+  // sz    bno (put)  bno (get)
+  //         (guarant)
+  // 0     00         00
+  // 1     00         01
+  // 2     00         01
+  // 3     00         01
+  // 4     01         01
+  // 5     01         02
+  // 6     01         02
+  // 7     01         02
+  // 8     02         02
+  // 9     02         03
+  // 10    02         03
+  // 11    02         03
+  //
+  // put -> no = wordsize / spread
+  //
+  // get -> no = (req_wordsize + spread - 1) / spread
+
+  // The bin number for a given word size.
+  static int bin_for_size(size_t word_size) {
+    assert(word_size >= min_word_size && word_size < maximal_word_size(),
+           "Word size oob (" SIZE_FORMAT ")", word_size);
+    return (word_size - min_word_size) / spread;
+  }
+
+  // [minimal, maximal) size of blocks which are held in a bin.
+  // Note that when taking a block out of the bin, only the minimum block size
+  // is guaranteed.
+  static size_t minimal_word_size_in_bin(int bno) {
+    return min_word_size + (bno * spread);
+  }
+  static size_t maximal_word_size_in_bin(int bno) {
+    return minimal_word_size_in_bin(bno) + spread;
+  }
+
+public:
+
+  Bins() : _mask() {
+    assert(BinMap::size() >= num_bins, "mask too small");
+    ::memset(_bins, 0, sizeof(_bins));
+  }
+
+  // [min, max) word size
+  static size_t minimal_word_size() { return min_word_size; }
+  static size_t maximal_word_size() { return min_word_size + (spread * num_bins); }
+
+  inline void put(MetaWord* p, size_t word_size);
+
+  inline block_t* get(size_t word_size);
+
+#ifdef ASSERT
+  void verify() const;
+#endif
+
+  void statistics(block_stats_t* stats) const;
+
+  void print(outputStream* st) const;
+
+};
+
+
+class LeftOverManager : public CHeapObj<mtInternal> {
+
+  typedef Bins<2, 2, 16> VerySmallBinsType;
+  VerySmallBinsType _very_small_bins;
+
+  block_t* _large_block_reserve;
+
+  // The current large block we gnaw on
+  MetaWord* _current;
+  size_t _current_size;
+
+  SizeCounter _total_word_size;
+
+  // Take the topmost block from the large block reserve list
+  // and make it current.
+  inline void prime_current();
+
+  // Allocate from current block. Returns NULL if current block
+  // is too small.
+  inline MetaWord* alloc_from_current(size_t word_size);
+
+  void large_block_statistics(block_stats_t* stats) const;
+
+public:
+
+  static size_t minimal_word_size() {
+    return VerySmallBinsType::minimal_word_size();
+  }
+
+  LeftOverManager() :
+    _very_small_bins(),
+    _large_block_reserve(NULL),
+    _current(NULL),
+    _current_size(0)
+  {}
+
+  inline void add_block(MetaWord* p, size_t word_size);
+
+  inline MetaWord* get_block(size_t requested_word_size);
+
+#ifdef ASSERT
+  void verify() const;
+#endif
+
+  void statistics(block_stats_t* stats) const;
+
+  void print(outputStream* st, bool detailed = false) const;
+
+  size_t total_word_size() const { return _total_word_size.get(); }
+
+};
+
+
+
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/leftOverBins.inline.hpp	Fri Nov 01 10:28:15 2019 +0100
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_LEFTOVERBINS_INLINE_HPP
+#define SHARE_MEMORY_METASPACE_LEFTOVERBINS_INLINE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/metaspace/leftOverBins.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+namespace metaspace {
+
+
+// Starting at (including) pos, find the position of the next 1 bit.
+// Return -1 if not found.
+int BinMap::find_next_set_bit(int pos) const {
+  if (get_bit(pos)) {
+    return pos;
+  }
+  mask_type m2 = _mask;
+  int pos2 = pos + 1;
+  m2 >>= pos2;
+  if (m2 > 0) {
+    while ((m2 & (mask_type)1) == 0) {
+      m2 >>= 1;
+      pos2 ++;
+    }
+    return pos2;
+  }
+  return -1;
+}
+
+///////////////////////////////////////
+
+template <size_t min_word_size, size_t spread, int num_bins>
+void Bins<min_word_size, spread, num_bins>::put(MetaWord* p, size_t word_size) {
+  assert(word_size >= minimal_word_size() && word_size < maximal_word_size(), "Invalid word size");
+  block_t* b = (block_t*)p;
+  int bno = bin_for_size(word_size);
+  assert(bno >= 0 && bno < num_bins, "Sanity");
+  assert(b != _bins[bno], "double add?");
+  b->next = _bins[bno];
+  b->size = word_size;
+  _bins[bno] = b;
+  _mask.set_bit(bno);
+}
+
+template <size_t min_word_size, size_t spread, int num_bins>
+block_t* Bins<min_word_size, spread, num_bins>::get(size_t word_size) {
+  // Adjust size for spread (we need the bin number which guarantees word_size).
+  word_size += (spread - 1);
+  if (word_size >= maximal_word_size()) {
+    return NULL;
+  }
+  int bno = bin_for_size(word_size);
+  bno = _mask.find_next_set_bit(bno);
+  if (bno != -1) {
+    assert(bno >= 0 && bno < num_bins, "Sanity");
+    assert(_bins[bno] != NULL, "Sanity");
+    block_t* b = _bins[bno];
+    _bins[bno] = b->next;
+    if (_bins[bno] == NULL) {
+      _mask.clr_bit(bno);
+    }
+    return b;
+  }
+  return NULL;
+}
+
+#ifdef ASSERT
+template <size_t min_word_size, size_t spread, int num_bins>
+void Bins<min_word_size, spread, num_bins>::verify() const {
+  for (int i = 0; i < num_bins; i ++) {
+    assert(_mask.get_bit(i) == (_bins[i] != NULL), "Sanity");
+    const size_t min_size = minimal_word_size_in_bin(i);
+    const size_t max_size = maximal_word_size_in_bin(i);
+    for(block_t* b = _bins[i]; b != NULL; b = b->next) {
+      assert(b->size >= min_size && b->size < max_size, "Sanity");
+    }
+  }
+}
+#endif // ASSERT
+
+
+template <size_t min_word_size, size_t spread, int num_bins>
+void Bins<min_word_size, spread, num_bins>::statistics(block_stats_t* stats) const {
+  for (int i = 0; i < num_bins; i ++) {
+    for(block_t* b = _bins[i]; b != NULL; b = b->next) {
+      stats->num_blocks ++;
+      stats->word_size += b->size;
+    }
+  }
+}
+
+template <size_t min_word_size, size_t spread, int num_bins>
+void Bins<min_word_size, spread, num_bins>::print(outputStream* st) const {
+  bool first = true;
+  for (int i = 0; i < num_bins; i ++) {
+    int n = 0;
+    for(block_t* b = _bins[i]; b != NULL; b = b->next) {
+      n ++;
+    }
+    if (n > 0) {
+      if (!first) {
+        st->print(", ");
+      } else {
+        first = false;
+      }
+      st->print(SIZE_FORMAT "=%d", minimal_word_size_in_bin(i), n);
+    }
+  }
+}
+
+
+
+///////////////////////////////////////
+
+// Take the topmost block from the large block reserve list
+// and make it current.
+inline void LeftOverManager::prime_current() {
+  if (_large_block_reserve != NULL) {
+    _current = (MetaWord*) _large_block_reserve;
+    _current_size = _large_block_reserve->size;
+    _large_block_reserve = _large_block_reserve->next;
+  } else {
+    _current = NULL;
+    _current_size = 0;
+  }
+}
+
+// Allocate from current block. Returns NULL if current block
+// is too small.
+inline MetaWord* LeftOverManager::alloc_from_current(size_t word_size) {
+  if (_current_size >= word_size) {
+    assert(_current != NULL, "Must be");
+    MetaWord* p = _current;
+    size_t remaining = _current_size - word_size;
+    if (remaining >= _very_small_bins.minimal_word_size()) {
+      _current = p + word_size;
+      _current_size = remaining;
+    } else {
+      // completely used up old large block. Proceed to next.
+      prime_current();
+    }
+    return p;
+  }
+  return NULL;
+}
+
+inline void LeftOverManager::add_block(MetaWord* p, size_t word_size) {
+  if (word_size >= minimal_word_size()) {
+    if (word_size < _very_small_bins.maximal_word_size()) {
+      _very_small_bins.put(p, word_size);
+    } else {
+      if (_current == NULL) {
+        assert(_large_block_reserve == NULL, "Should be primed.");
+        _current = p;
+        _current_size = word_size;
+      } else {
+        assert(sizeof(block_t) <= word_size * BytesPerWord, "must be");
+        block_t* b = (block_t*)p;
+        b->size = word_size;
+        b->next = _large_block_reserve;
+        _large_block_reserve = b;
+      }
+    }
+    _total_word_size.increment_by(word_size);
+  }
+
+  DEBUG_ONLY(verify();)
+
+}
+
+inline MetaWord* LeftOverManager::get_block(size_t requested_word_size) {
+
+  requested_word_size = MAX2(requested_word_size, minimal_word_size());
+
+  // First attempt to take from current large block because that is cheap (pointer bump)
+  // and efficient (no spread)
+  MetaWord* p = alloc_from_current(requested_word_size);
+  if (p == NULL && _current_size > 0) {
+    // current large block is too small. If it is moth-eaten enough to be put
+    // into the small remains bin, do so.
+    if (_current_size < _very_small_bins.maximal_word_size()) {
+      _very_small_bins.put(_current, _current_size);
+      prime_current(); // proceed to next large block.
+      // --- and re-attempt - but only once more. If that fails too, we give up.
+      p = alloc_from_current(requested_word_size);
+    }
+  }
+
+  if (p == NULL) {
+    // Did not work. Check the small bins.
+    if (requested_word_size < _very_small_bins.maximal_word_size()) {
+      block_t* b = _very_small_bins.get(requested_word_size);
+      if (b != NULL) {
+        p = (MetaWord*)b;
+        size_t remaining = b->size - requested_word_size;
+        if (remaining >= _very_small_bins.minimal_word_size()) {
+          MetaWord* q = p + requested_word_size;
+          _very_small_bins.put(q, remaining);
+        }
+      }
+    }
+  }
+
+  if (p != NULL) {
+    _total_word_size.decrement_by(requested_word_size);
+    DEBUG_ONLY(verify();)
+  }
+
+  return p;
+
+}
+
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP
--- a/src/hotspot/share/memory/metaspace/settings.cpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/settings.cpp	Fri Nov 01 10:28:15 2019 +0100
@@ -53,6 +53,8 @@
 bool Settings::_uncommit_on_purge = false;
 size_t Settings::_uncommit_on_purge_min_word_size = 0;
 
+bool Settings::_use_lom = false;
+
 
 
 void Settings::ergo_initialize() {
@@ -130,6 +132,8 @@
   _enlarge_chunks_in_place = MetaspaceEnlargeChunksInPlace;
   _enlarge_chunks_in_place_max_word_size = 256 * K;
 
+  _use_lom = MetaspaceUseLOM;
+
   // Sanity checks.
   guarantee(commit_granule_words() <= chklvl::MAX_CHUNK_WORD_SIZE, "Too large granule size");
   guarantee(is_power_of_2(commit_granule_words()), "granule size must be a power of 2");
@@ -161,6 +165,7 @@
   st->print_cr(" - uncommit_on_purge: %d.", (int)uncommit_on_purge());
   st->print_cr(" - uncommit_on_purge_min_word_size: " SIZE_FORMAT ".", uncommit_on_purge_min_word_size());
 
+  st->print_cr(" - use_lom: %d.", use_lom());
 
 }
 
--- a/src/hotspot/share/memory/metaspace/settings.hpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/settings.hpp	Fri Nov 01 10:28:15 2019 +0100
@@ -88,6 +88,9 @@
   // whose lower 32bits are zero.
   static const bool _do_not_return_32bit_aligned_addresses = true;
 
+  // Use Lom
+  static bool _use_lom;
+
 public:
 
   static size_t commit_granule_bytes()                        { return _commit_granule_bytes; }
@@ -105,6 +108,8 @@
   static size_t uncommit_on_purge_min_word_size()             { return _uncommit_on_purge_min_word_size; }
   static bool do_not_return_32bit_aligned_addresses()         { return _do_not_return_32bit_aligned_addresses; }
 
+  static bool use_lom()                                       { return _use_lom; }
+
   static void ergo_initialize();
 
   static void print_on(outputStream* st);
--- a/src/hotspot/share/memory/metaspace/spaceManager.cpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/spaceManager.cpp	Fri Nov 01 10:28:15 2019 +0100
@@ -26,8 +26,10 @@
 
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
+#include "memory/metaspace/blockFreelist.hpp"
 #include "memory/metaspace/chunkManager.hpp"
 #include "memory/metaspace/internStat.hpp"
+#include "memory/metaspace/leftOverBins.inline.hpp"
 #include "memory/metaspace/metachunk.hpp"
 #include "memory/metaspace/metaDebug.hpp"
 #include "memory/metaspace/metaspaceCommon.hpp"
@@ -157,6 +159,19 @@
   _block_freelist->return_block(p, word_size);
 }
 
+
+void SpaceManager::create_lom() {
+  assert(_lom == NULL, "Only call once");
+  _lom = new LeftOverManager();
+}
+
+void SpaceManager::add_allocation_to_lom(MetaWord* p, size_t word_size) {
+  if (_lom == NULL) {
+    _lom = new LeftOverManager(); // Create only on demand
+  }
+  _lom->add_block(p, word_size);
+}
+
 SpaceManager::SpaceManager(ChunkManager* chunk_manager,
              const ChunkAllocSequence* alloc_sequence,
              Mutex* lock,
@@ -167,7 +182,7 @@
   _chunk_manager(chunk_manager),
   _chunk_alloc_sequence(alloc_sequence),
   _chunks(),
-  _block_freelist(NULL),
+  _block_freelist(NULL), _lom(NULL),
   _total_used_words_counter(total_used_words_counter),
   _name(name),
   _is_micro_loader(is_micro_loader)
@@ -191,6 +206,7 @@
   DEBUG_ONLY(chunk_manager()->verify(true);)
 
   delete _block_freelist;
+  delete _lom;
 
 }
 
@@ -230,7 +246,13 @@
     bool did_hit_limit = false;
     MetaWord* ptr = c->allocate(net_remaining_words, &did_hit_limit);
     assert(ptr != NULL && did_hit_limit == false, "Should have worked");
-    add_allocation_to_block_freelist(ptr, net_remaining_words);
+
+    if (Settings::use_lom()) {
+      add_allocation_to_lom(ptr, net_remaining_words);
+    } else {
+      add_allocation_to_block_freelist(ptr, net_remaining_words);
+    }
+
     _total_used_words_counter->increment_by(net_remaining_words);
 
     // After this operation: the current chunk should have (almost) no free committed space left.
@@ -281,6 +303,19 @@
   // from the dictionary until it starts to get fat.  Is this
   // a reasonable policy?  Maybe an skinny dictionary is fast enough
   // for allocations.  Do some profiling.  JJJ
+  if (Settings::use_lom()) {
+    if (_lom != NULL) {
+      p = _lom->get_block(raw_word_size);
+      if (p != NULL) {
+        DEBUG_ONLY(InternalStats::inc_num_allocs_from_deallocated_blocks();)
+        log_trace(metaspace)(LOGFMT_SPCMGR ": .. taken from freelist.", LOGFMT_SPCMGR_ARGS);
+        // Note: space in the freeblock dictionary counts as used (see retire_current_chunk()) -
+        // that means that we must not increase the used counter again when allocating from the dictionary.
+        // Therefore we return here.
+        return p;
+      }
+    }
+  } else {
   if (_block_freelist != NULL && _block_freelist->total_size() > Settings::allocation_from_dictionary_limit()) {
     p = _block_freelist->get_block(raw_word_size);
 
@@ -294,6 +329,7 @@
     }
 
   }
+  }
 
   // 2) Failing that, attempt to allocate from the current chunk. If we hit commit limit, return NULL.
   if (p == NULL && !did_hit_limit) {
@@ -405,7 +441,11 @@
     return;
   }
 
-  add_allocation_to_block_freelist(p, raw_word_size);
+  if (Settings::use_lom()) {
+    add_allocation_to_lom(p, raw_word_size);
+  } else {
+    add_allocation_to_block_freelist(p, raw_word_size);
+  }
 
   DEBUG_ONLY(verify_locked();)
 
@@ -437,9 +477,18 @@
     }
   }
 
-  if (block_freelist() != NULL) {
-    out->free_blocks_num += block_freelist()->num_blocks();
-    out->free_blocks_word_size += block_freelist()->total_size();
+  if (Settings::use_lom()) {
+    if (lom() != NULL) {
+      block_stats_t s;
+      lom()->statistics(&s);
+      out->free_blocks_num += s.num_blocks;
+      out->free_blocks_word_size += s.word_size;
+    }
+  } else {
+    if (block_freelist() != NULL) {
+      out->free_blocks_num += block_freelist()->num_blocks();
+      out->free_blocks_word_size += block_freelist()->total_size();
+    }
   }
 
   SOMETIMES(out->verify();)
@@ -456,6 +505,12 @@
 
   _chunks.verify();
 
+  if (Settings::use_lom()) {
+    if (lom() != NULL) {
+      lom()->verify();
+    }
+  }
+
 }
 
 void SpaceManager::verify() const {
--- a/src/hotspot/share/memory/metaspace/spaceManager.hpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/spaceManager.hpp	Fri Nov 01 10:28:15 2019 +0100
@@ -40,6 +40,9 @@
 
 namespace metaspace {
 
+class BlockFreeList;
+class LeftOverManager;
+
 struct sm_stats_t;
 
 // The SpaceManager:
@@ -71,6 +74,7 @@
 
   // Prematurely released metablocks.
   BlockFreelist* _block_freelist;
+  LeftOverManager* _lom;
 
   // Points to outside size counter which we are to increase/decrease when we allocate memory
   // on behalf of a user or when we are destroyed.
@@ -89,6 +93,10 @@
   void create_block_freelist();
   void add_allocation_to_block_freelist(MetaWord* p, size_t word_size);
 
+  LeftOverManager* lom() const                  { return _lom; }
+  void create_lom();
+  void add_allocation_to_lom(MetaWord* p, size_t word_size);
+
   // The remaining committed free space in the current chunk is chopped up and stored in the block
   // free list for later use. As a result, the current chunk will remain current but completely
   // used up. This is a preparation for calling allocate_new_current_chunk().
--- a/src/hotspot/share/runtime/globals.hpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/src/hotspot/share/runtime/globals.hpp	Fri Nov 01 10:28:15 2019 +0100
@@ -1619,6 +1619,9 @@
   product(bool, MetaspaceEnlargeChunksInPlace, true,                        \
           "Metapace chunks are enlarged in place.")                         \
                                                                             \
+  product(bool, MetaspaceUseLOM, true,                                      \
+	        "MetaspaceUseLOM.")                                         \
+		                                                            \
   manageable(uintx, MinHeapFreeRatio, 40,                                   \
           "The minimum percentage of heap free after GC to avoid expansion."\
           " For most GCs this applies to the old generation. In G1 and"     \
--- a/test/hotspot/gtest/metaspace/metaspaceTestsCommon.cpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/test/hotspot/gtest/metaspace/metaspaceTestsCommon.cpp	Fri Nov 01 10:28:15 2019 +0100
@@ -159,3 +159,15 @@
   return check_marked_address(p, pattern) && check_marked_address(p + word_size - 1, pattern);
 }
 
+void mark_range(MetaWord* p, size_t word_size) {
+  assert(word_size > 0 && p != NULL, "sanity");
+  uintx pattern = (uintx)p2i(p);
+  mark_range(p, pattern, word_size);
+}
+
+bool check_marked_range(const MetaWord* p, size_t word_size) {
+  uintx pattern = (uintx)p2i(p);
+  return check_marked_range(p, pattern, word_size);
+}
+
+
--- a/test/hotspot/gtest/metaspace/metaspaceTestsCommon.hpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/test/hotspot/gtest/metaspace/metaspaceTestsCommon.hpp	Fri Nov 01 10:28:15 2019 +0100
@@ -33,6 +33,7 @@
 #include "memory/metaspace/counter.hpp"
 #include "memory/metaspace/commitLimiter.hpp"
 #include "memory/metaspace/commitMask.hpp"
+#include "memory/metaspace/leftOverBins.inline.hpp"
 #include "memory/metaspace/metachunk.hpp"
 #include "memory/metaspace/metaspaceCommon.hpp"
 #include "memory/metaspace/metaspaceEnums.hpp"
@@ -63,6 +64,7 @@
 using metaspace::SizeCounter;
 using metaspace::SizeAtomicCounter;
 using metaspace::IntCounter;
+using metaspace::LeftOverManager;
 using metaspace::Metachunk;
 using metaspace::MetachunkList;
 using metaspace::MetachunkListCluster;
@@ -197,6 +199,9 @@
 void mark_range(MetaWord* p, uintx pattern, size_t word_size);
 bool check_marked_range(const MetaWord* p, uintx pattern, size_t word_size);
 
+void mark_range(MetaWord* p, size_t word_size);
+bool check_marked_range(const MetaWord* p, size_t word_size);
+
 //////////////////////////////////////////////////////////
 // Some helpers to avoid typing out those annoying casts for NULL
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_leftOverBins.cpp	Fri Nov 01 10:28:15 2019 +0100
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+#include "precompiled.hpp"
+
+//#define LOG_PLEASE
+
+#include "metaspaceTestsCommon.hpp"
+
+class LeftOverBinsTest {
+
+  // A simple preallocated buffer used to "feed" the allocator.
+  // Mimicks chunk retirement leftover blocks.
+  class FeederBuffer {
+
+    static const size_t buf_word_size = 512 * K;
+    MetaWord* _buf;
+    size_t _used;
+
+  public:
+
+    FeederBuffer() : _used(0) {
+      _buf = NEW_C_HEAP_ARRAY(MetaWord, buf_word_size, mtInternal);
+    }
+
+    ~FeederBuffer() {
+      FREE_C_HEAP_ARRAY(MetaWord, _buf);
+    }
+
+    MetaWord* get(size_t word_size) {
+      if (_used > (buf_word_size - word_size)) {
+        return NULL;
+      }
+      MetaWord* p = _buf + _used;
+      _used += word_size;
+      return p;
+    }
+
+  };
+
+  FeederBuffer _fb;
+  LeftOverManager _lom;
+
+  // random generator for block feeding
+  RandSizeGenerator _rgen_feeding;
+
+  // random generator for allocations (and, hence, deallocations)
+  RandSizeGenerator _rgen_allocations;
+
+  SizeCounter _allocated_words;
+
+  struct allocation_t {
+    allocation_t* next;
+    size_t word_size;
+    MetaWord* p;
+  };
+
+  // Array of the same size as the pool max capacity; holds the allocated elements.
+  allocation_t* _allocations;
+
+
+  int _num_allocs;
+  int _num_deallocs;
+  int _num_feeds;
+
+  bool feed_some() {
+    size_t word_size = _rgen_feeding.get();
+    MetaWord* p = _fb.get(word_size);
+    if (p != NULL) {
+      _lom.add_block(p, word_size);
+      return true;
+    }
+    return false;
+  }
+
+  void deallocate_top() {
+
+    allocation_t* a = _allocations;
+    if (a != NULL) {
+      _allocations = a->next;
+      check_marked_range(a->p, a->word_size);
+      _lom.add_block(a->p, a->word_size);
+      delete a;
+      DEBUG_ONLY(_lom.verify();)
+    }
+  }
+
+  bool allocate() {
+
+    size_t word_size = MAX2(_rgen_allocations.get(), _lom.minimal_word_size());
+    MetaWord* p = _lom.get_block(word_size);
+    if (p != NULL) {
+      _allocated_words.increment_by(word_size);
+      allocation_t* a = new allocation_t;
+      a->p = p; a->word_size = word_size;
+      a->next = _allocations;
+      _allocations = a;
+      DEBUG_ONLY(_lom.verify();)
+      mark_range(p, word_size);
+      return true;
+    }
+    return false;
+  }
+
+  void test_all_marked_ranges() {
+    for (allocation_t* a = _allocations; a != NULL; a = a->next) {
+      check_marked_range(a->p, a->word_size);
+    }
+  }
+
+  void test_loop() {
+    // We loop and in each iteration execute one of three operations:
+    // - allocation from lom
+    // - deallocation to lom of a previously allocated block
+    // - feeding a new larger block into the lom (mimicks chunk retiring)
+    // When we have fed all large blocks into the lom (feedbuffer empty), we
+    //  switch to draining the lom completely (only allocs)
+    bool forcefeed = false;
+    bool draining = false;
+    bool stop = false;
+    int iter = 100000; // safety stop
+    while (!stop && iter > 0) {
+      iter --;
+      int surprise = (int)os::random() % 10;
+      if (!draining && (surprise >= 7 || forcefeed)) {
+        forcefeed = false;
+        if (feed_some()) {
+          _num_feeds ++;
+        } else {
+          // We fed all input memory into the LOM. Now lets proceed until the lom is drained.
+          draining = true;
+        }
+      } else if (!draining && surprise < 1) {
+        deallocate_top();
+        _num_deallocs ++;
+      } else {
+        if (allocate()) {
+          _num_allocs ++;
+        } else {
+          if (draining) {
+            stop = _lom.total_word_size() < 512;
+          } else {
+            forcefeed = true;
+          }
+        }
+      }
+      if ((iter % 1000) == 0) {
+        DEBUG_ONLY(_lom.verify();)
+        test_all_marked_ranges();
+        LOG("a %d (" SIZE_FORMAT "), d %d, f %d", _num_allocs, _allocated_words.get(), _num_deallocs, _num_feeds);
+#ifdef LOG_PLEASE
+        _lom.print(tty, true);
+        tty->cr();
+#endif
+      }
+    }
+
+    // Drain
+
+
+  }
+
+
+
+public:
+
+  LeftOverBinsTest(size_t avg_alloc_size) :
+    _fb(), _lom(),
+    _rgen_feeding(128, 4096),
+    _rgen_allocations(avg_alloc_size / 4, avg_alloc_size * 2, 0.01f, avg_alloc_size / 3, avg_alloc_size * 30),
+    _allocations(NULL),
+    _num_allocs(0), _num_deallocs(0), _num_feeds(0)
+  {
+    // some initial feeding
+    _lom.add_block(_fb.get(1024), 1024);
+  }
+
+
+  static void test_small_allocations() {
+    LeftOverBinsTest test(10);
+    test.test_loop();
+  }
+
+  static void test_medium_allocations() {
+    LeftOverBinsTest test(30);
+    test.test_loop();
+  }
+
+  static void test_large_allocations() {
+    LeftOverBinsTest test(150);
+    test.test_loop();
+  }
+
+
+};
+
+TEST_VM(metaspace, leftoverbins_mask_basic) {
+  // Basic tests
+  metaspace::BinMap map;
+  EXPECT_TRUE(map.all_zero());
+  for (int i = 0; i < map.size(); i ++) {
+    map.set_bit(i);
+    EXPECT_TRUE(map.get_bit(i));
+    map.clr_bit(i);
+    EXPECT_FALSE(map.get_bit(i));
+    EXPECT_TRUE(map.all_zero());
+  }
+}
+
+TEST_VM(metaspace, leftoverbins_mask_find_next_set_bit) {
+  metaspace::BinMap map;
+  EXPECT_TRUE(map.all_zero());
+  for (int i = 0; i < map.size(); i ++) {
+    map.set_bit(i);
+    for (int j = 0; j < i; j ++) {
+      int n = map.find_next_set_bit(j);
+      if (j <= i) {
+        EXPECT_EQ(n, i);
+      } else {
+        EXPECT_EQ(n, -1);
+      }
+    }
+    map.clr_bit(i);
+  }
+}
+
+TEST_VM(metaspace, leftoverbins_basics) {
+
+  LeftOverManager lom;
+  MetaWord tmp[1024];
+  metaspace::block_stats_t stats;
+
+  lom.add_block(tmp, 1024);
+  DEBUG_ONLY(lom.verify();)
+
+  lom.statistics(&stats);
+  EXPECT_EQ(stats.num_blocks, 1);
+  EXPECT_EQ(stats.word_size, (size_t)1024);
+
+  MetaWord* p = lom.get_block(1024);
+  EXPECT_EQ(p, tmp);
+  DEBUG_ONLY(lom.verify();)
+
+  lom.statistics(&stats);
+  EXPECT_EQ(stats.num_blocks, 0);
+  EXPECT_EQ(stats.word_size, (size_t)0);
+}
+
+TEST_VM(metaspace, leftoverbins_small) {
+  LeftOverBinsTest::test_small_allocations();
+}
+
+TEST_VM(metaspace, leftoverbins_medium) {
+  LeftOverBinsTest::test_medium_allocations();
+}
+
+TEST_VM(metaspace, leftoverbins_large) {
+  LeftOverBinsTest::test_large_allocations();
+}
+
--- a/test/hotspot/gtest/metaspace/test_spacemanager.cpp	Tue Nov 19 20:01:05 2019 +0100
+++ b/test/hotspot/gtest/metaspace/test_spacemanager.cpp	Fri Nov 01 10:28:15 2019 +0100
@@ -26,7 +26,7 @@
 
 #include "precompiled.hpp"
 
-#define LOG_PLEASE
+//#define LOG_PLEASE
 
 #include "metaspace/metaspaceTestsCommon.hpp"