Merge stuefe-new-metaspace-branch
authorstuefe
Wed, 16 Oct 2019 17:41:03 +0200
branchstuefe-new-metaspace-branch
changeset 58651 a2d3074db7a9
parent 58646 bcdba1c9f1fe (diff)
parent 58650 d068b1e534de (current diff)
child 58683 2d5dd194c65c
Merge
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -59,6 +59,8 @@
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -73,6 +75,8 @@
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
 
+using metaspace::ClassLoaderMetaspace;
+
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
 void ClassLoaderData::init_null_class_loader_data() {
@@ -759,13 +763,13 @@
     if ((metaspace = _metaspace) == NULL) {
       if (this == the_null_class_loader_data()) {
         assert (class_loader() == NULL, "Must be");
-        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
+        metaspace = new ClassLoaderMetaspace(_metaspace_lock, metaspace::BootMetaspaceType);
       } else if (is_unsafe_anonymous()) {
-        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::UnsafeAnonymousMetaspaceType);
+        metaspace = new ClassLoaderMetaspace(_metaspace_lock, metaspace::UnsafeAnonymousMetaspaceType);
       } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
-        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
+        metaspace = new ClassLoaderMetaspace(_metaspace_lock, metaspace::ReflectionMetaspaceType);
       } else {
-        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
+        metaspace = new ClassLoaderMetaspace(_metaspace_lock, metaspace::StandardMetaspaceType);
       }
       // Ensure _metaspace is stable, since it is examined without a lock
       OrderAccess::release_store(&_metaspace, metaspace);
@@ -956,9 +960,11 @@
   guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_unsafe_anonymous(), "must be");
 
   // Verify the integrity of the allocated space.
+#ifdef ASSERT
   if (metaspace_or_null() != NULL) {
     metaspace_or_null()->verify();
   }
+#endif
 
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
     guarantee(k->class_loader_data() == this, "Must be the same");
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -27,7 +27,6 @@
 
 #include "memory/allocation.hpp"
 #include "memory/memRegion.hpp"
-#include "memory/metaspace.hpp"
 #include "oops/oopHandle.hpp"
 #include "oops/weakHandle.hpp"
 #include "runtime/atomic.hpp"
@@ -63,6 +62,10 @@
 class DictionaryEntry;
 class Dictionary;
 
+namespace metaspace {
+  class ClassLoaderMetaspace;
+}
+
 // ClassLoaderData class
 
 class ClassLoaderData : public CHeapObj<mtClass> {
@@ -113,7 +116,7 @@
   OopHandle _class_loader;    // The instance of java/lang/ClassLoader associated with
                               // this ClassLoaderData
 
-  ClassLoaderMetaspace * volatile _metaspace;  // Meta-space where meta-data defined by the
+  metaspace::ClassLoaderMetaspace* volatile _metaspace;  // Meta-space where meta-data defined by the
                                     // classes in the class loader are allocated.
   Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   bool _unloading;         // true if this class loader goes away
@@ -223,7 +226,7 @@
   bool is_alive() const;
 
   // Accessors
-  ClassLoaderMetaspace* metaspace_or_null() const { return _metaspace; }
+  metaspace::ClassLoaderMetaspace* metaspace_or_null() const { return _metaspace; }
 
   static ClassLoaderData* the_null_class_loader_data() {
     return _the_null_class_loader_data;
@@ -256,7 +259,7 @@
 
   // The Metaspace is created lazily so may be NULL.  This
   // method will allocate a Metaspace if needed.
-  ClassLoaderMetaspace* metaspace_non_null();
+  metaspace::ClassLoaderMetaspace* metaspace_non_null();
 
   inline oop class_loader() const;
 
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -690,24 +690,6 @@
   return NULL;
 }
 
-ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
-  _data = ClassLoaderDataGraph::_head;
-}
-
-ClassLoaderDataGraphMetaspaceIterator::~ClassLoaderDataGraphMetaspaceIterator() {}
-
-ClassLoaderMetaspace* ClassLoaderDataGraphMetaspaceIterator::get_next() {
-  assert(_data != NULL, "Should not be NULL in call to the iterator");
-  ClassLoaderMetaspace* result = _data->metaspace_or_null();
-  _data = _data->next();
-  // This result might be NULL for class loaders without metaspace
-  // yet.  It would be nice to return only non-null results but
-  // there is no guarantee that there will be a non-null result
-  // down the list so the caller is going to have to check.
-  return result;
-}
-
 #ifndef PRODUCT
 // callable from debugger
 extern "C" int print_loader_data_graph() {
--- a/src/hotspot/share/classfile/classLoaderDataGraph.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -177,12 +177,4 @@
   static Klass* next_klass_in_cldg(Klass* klass);
 };
 
-class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
-  ClassLoaderData* _data;
- public:
-  ClassLoaderDataGraphMetaspaceIterator();
-  ~ClassLoaderDataGraphMetaspaceIterator();
-  bool repeat() { return _data != NULL; }
-  ClassLoaderMetaspace* get_next();
-};
 #endif // SHARE_CLASSFILE_CLASSLOADERDATAGRAPH_HPP
--- a/src/hotspot/share/classfile/classLoaderStats.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/classfile/classLoaderStats.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/classLoaderDataGraph.hpp"
 #include "classfile/classLoaderStats.hpp"
@@ -78,7 +79,7 @@
   }
   _total_classes += csc._num_classes;
 
-  ClassLoaderMetaspace* ms = cld->metaspace_or_null();
+  metaspace::ClassLoaderMetaspace* ms = cld->metaspace_or_null();
   if (ms != NULL) {
     if(cld->is_unsafe_anonymous()) {
       cls->_anon_chunk_sz += ms->allocated_chunks_bytes();
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1049,7 +1049,7 @@
 
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   ClassLoaderDataGraph::purge();
-  MetaspaceUtils::verify_metrics();
+  DEBUG_ONLY(MetaspaceUtils::verify(false);)
 
   // Prepare heap for normal collections.
   assert(num_free_regions() == 0, "we should not have added any free regions");
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -251,7 +251,7 @@
 
     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
     ClassLoaderDataGraph::purge();
-    MetaspaceUtils::verify_metrics();
+    DEBUG_ONLY(MetaspaceUtils::verify(false);)
 
     BiasedLocking::restore_marks();
     heap->prune_scavengable_nmethods();
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1059,7 +1059,7 @@
 
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   ClassLoaderDataGraph::purge();
-  MetaspaceUtils::verify_metrics();
+  DEBUG_ONLY(MetaspaceUtils::verify(false);)
 
   heap->prune_scavengable_nmethods();
 
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -37,6 +37,7 @@
 #include "gc/shared/memAllocator.hpp"
 #include "logging/log.hpp"
 #include "memory/metaspace.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/instanceMirrorKlass.hpp"
@@ -105,18 +106,18 @@
       MetaspaceUtils::used_bytes(),
       MetaspaceUtils::reserved_bytes());
   const MetaspaceSizes data_space(
-      MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
-      MetaspaceUtils::used_bytes(Metaspace::NonClassType),
-      MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
+      MetaspaceUtils::committed_bytes(metaspace::NonClassType),
+      MetaspaceUtils::used_bytes(metaspace::NonClassType),
+      MetaspaceUtils::reserved_bytes(metaspace::NonClassType));
   const MetaspaceSizes class_space(
-      MetaspaceUtils::committed_bytes(Metaspace::ClassType),
-      MetaspaceUtils::used_bytes(Metaspace::ClassType),
-      MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
+      MetaspaceUtils::committed_bytes(metaspace::ClassType),
+      MetaspaceUtils::used_bytes(metaspace::ClassType),
+      MetaspaceUtils::reserved_bytes(metaspace::ClassType));
 
   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
-    MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
+    MetaspaceUtils::chunk_free_list_summary(metaspace::NonClassType);
   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
-    MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
+    MetaspaceUtils::chunk_free_list_summary(metaspace::ClassType);
 
   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
@@ -255,7 +256,7 @@
 
 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                             size_t word_size,
-                                                            Metaspace::MetadataType mdtype) {
+                                                            metaspace::MetadataType mdtype) {
   uint loop_count = 0;
   uint gc_count = 0;
   uint full_gc_count = 0;
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -29,6 +29,7 @@
 #include "gc/shared/gcWhen.hpp"
 #include "gc/shared/verifyOption.hpp"
 #include "memory/allocation.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/safepoint.hpp"
@@ -357,7 +358,7 @@
 
   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                        size_t size,
-                                                       Metaspace::MetadataType mdtype);
+                                                       metaspace::MetadataType mdtype);
 
   // Returns "true" iff there is a stop-world GC in progress.  (I assume
   // that it should answer "false" for the concurrent part of a concurrent
--- a/src/hotspot/share/gc/shared/gcTrace.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcTrace.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -30,6 +30,7 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/objectCountEventSender.hpp"
 #include "gc/shared/referenceProcessorStats.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/os.hpp"
@@ -116,9 +117,9 @@
 void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {
   send_meta_space_summary_event(when, summary);
 
-  send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
+  send_metaspace_chunk_free_list_summary(when, metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
   if (UseCompressedClassPointers) {
-    send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary());
+    send_metaspace_chunk_free_list_summary(when, metaspace::ClassType, summary.class_chunk_free_list_summary());
   }
 }
 
--- a/src/hotspot/share/gc/shared/gcTrace.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcTrace.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -30,7 +30,7 @@
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcName.hpp"
 #include "gc/shared/gcWhen.hpp"
-#include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/referenceType.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
@@ -112,7 +112,7 @@
   void send_garbage_collection_event() const;
   void send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const;
   void send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const;
-  void send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype, const MetaspaceChunkFreeListSummary& summary) const;
+  void send_metaspace_chunk_free_list_summary(GCWhen::Type when, metaspace::MetadataType mdtype, const MetaspaceChunkFreeListSummary& summary) const;
   void send_reference_stats_event(ReferenceType type, size_t count) const;
   void send_phase_events(TimePartitions* time_partitions) const;
 };
--- a/src/hotspot/share/gc/shared/gcTraceSend.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcTraceSend.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -60,7 +60,7 @@
   }
 }
 
-void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype,
+void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, metaspace::MetadataType mdtype,
                                                       const MetaspaceChunkFreeListSummary& summary) const {
   EventMetaspaceChunkFreeListSummary e;
   if (e.should_commit()) {
--- a/src/hotspot/share/gc/shared/gcVMOperations.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -32,6 +32,8 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "logging/log.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.hpp"
 #include "runtime/handles.inline.hpp"
@@ -180,7 +182,7 @@
 
 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
                                                                  size_t size,
-                                                                 Metaspace::MetadataType mdtype,
+                                                                 metaspace::MetadataType mdtype,
                                                                  uint gc_count_before,
                                                                  uint full_gc_count_before,
                                                                  GCCause::Cause gc_cause)
--- a/src/hotspot/share/gc/shared/gcVMOperations.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcVMOperations.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -28,6 +28,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "memory/heapInspection.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
@@ -206,13 +207,13 @@
  private:
   MetaWord*                _result;
   size_t                   _size;     // size of object to be allocated
-  Metaspace::MetadataType  _mdtype;
+  metaspace::MetadataType  _mdtype;
   ClassLoaderData*         _loader_data;
 
  public:
   VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
                                   size_t size,
-                                  Metaspace::MetadataType mdtype,
+                                  metaspace::MetadataType mdtype,
                                   uint gc_count_before,
                                   uint full_gc_count_before,
                                   GCCause::Cause gc_cause);
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -56,6 +56,7 @@
 #include "gc/shared/workgroup.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metaspaceCounters.hpp"
+#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
@@ -686,7 +687,7 @@
 
     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
     ClassLoaderDataGraph::purge();
-    MetaspaceUtils::verify_metrics();
+    DEBUG_ONLY(MetaspaceUtils::verify(false);)
     // Resize the metaspace capacity after full collections
     MetaspaceGC::compute_new_size();
     update_full_collections_completed();
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -69,7 +69,9 @@
 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
 #endif
 
-#include "memory/metaspace.hpp"
+
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
@@ -880,7 +882,7 @@
 
 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                              size_t size,
-                                                             Metaspace::MetadataType mdtype) {
+                                                             metaspace::MetadataType mdtype) {
   MetaWord* result;
 
   // Inform metaspace OOM to GC heuristics if class unloading is possible.
@@ -2016,7 +2018,6 @@
   }
   // Resize and verify metaspace
   MetaspaceGC::compute_new_size();
-  MetaspaceUtils::verify_metrics();
 }
 
 // Process leftover weak oops: update them, if needed or assert they do not
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -32,6 +32,7 @@
 #include "gc/shenandoah/shenandoahLock.hpp"
 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "services/memoryManager.hpp"
 
 class ConcurrentGCTimer;
@@ -588,7 +589,7 @@
   HeapWord* mem_allocate(size_t size, bool* what);
   MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                size_t size,
-                                               Metaspace::MetadataType mdtype);
+                                               metaspace::MetadataType mdtype);
 
   void notify_mutator_alloc_words(size_t words, bool waste);
 
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -33,6 +33,8 @@
 #include "gc/z/zServiceability.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zUtils.inline.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/universe.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/align.hpp"
@@ -145,7 +147,7 @@
 
 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                              size_t size,
-                                                             Metaspace::MetadataType mdtype) {
+                                                             metaspace::MetadataType mdtype) {
   MetaWord* result;
 
   // Start asynchronous GC
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -34,6 +34,7 @@
 #include "gc/z/zRuntimeWorkers.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zUncommitter.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 
 class ZCollectedHeap : public CollectedHeap {
   friend class VMStructs;
@@ -79,7 +80,7 @@
   virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                        size_t size,
-                                                       Metaspace::MetadataType mdtype);
+                                                       metaspace::MetadataType mdtype);
   virtual void collect(GCCause::Cause cause);
   virtual void collect_as_vm_thread(GCCause::Cause cause);
   virtual void do_full_collection(bool clear_all_soft_refs);
--- a/src/hotspot/share/gc/z/zUnload.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/gc/z/zUnload.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -177,5 +177,5 @@
 void ZUnload::finish() {
   // Resize and verify metaspace
   MetaspaceGC::compute_new_size();
-  MetaspaceUtils::verify_metrics();
+  DEBUG_ONLY(MetaspaceUtils::verify(false);)
 }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -39,7 +39,8 @@
 #include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
 #include "jfr/support/jfrThreadLocal.hpp"
 #include "jfr/writers/jfrJavaEventWriter.hpp"
-#include "memory/metaspaceGCThresholdUpdater.hpp"
+#include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/referenceType.hpp"
 #include "memory/universe.hpp"
 #include "oops/compressedOops.hpp"
@@ -193,11 +194,11 @@
 }
 
 void MetadataTypeConstant::serialize(JfrCheckpointWriter& writer) {
-  static const u4 nof_entries = Metaspace::MetadataTypeCount;
+  static const u4 nof_entries = metaspace::MetadataTypeCount;
   writer.write_count(nof_entries);
   for (u4 i = 0; i < nof_entries; ++i) {
     writer.write_key(i);
-    writer.write(Metaspace::metadata_type_name((Metaspace::MetadataType)i));
+    writer.write(metaspace::describe_mdtype((metaspace::MetadataType)i));
   }
 }
 
--- a/src/hotspot/share/memory/filemap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/filemap.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -39,6 +39,7 @@
 #include "memory/heapShared.inline.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspace.hpp"
 #include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
@@ -1382,7 +1383,7 @@
   assert(!HeapShared::is_heap_region(i), "sanity");
   FileMapRegion* si = space_at(i);
   size_t used = si->used();
-  size_t alignment = os::vm_allocation_granularity();
+  size_t alignment = Metaspace::reserve_alignment();
   size_t size = align_up(used, alignment);
   char *requested_addr = region_addr(i);
 
--- a/src/hotspot/share/memory/metadataFactory.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metadataFactory.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -26,6 +26,8 @@
 #define SHARE_MEMORY_METADATAFACTORY_HPP
 
 #include "classfile/classLoaderData.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "oops/array.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
--- a/src/hotspot/share/memory/metaspace.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -22,65 +22,204 @@
  *
  */
 
+#include <memory/metaspace/settings.hpp>
 #include "precompiled.hpp"
+
 #include "aot/aotLoader.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metaspace.hpp"
-#include "memory/metaspace/chunkManager.hpp"
-#include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace/metaspaceCommon.hpp"
-#include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
-#include "memory/metaspace/spaceManager.hpp"
-#include "memory/metaspace/virtualSpaceList.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/metaspaceTracer.hpp"
+#include "memory/metaspace/chunkManager.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/commitLimiter.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
+#include "memory/metaspace/metaspaceReport.hpp"
+#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
+#include "memory/metaspace/runningCounters.hpp"
+#include "memory/metaspace/virtualSpaceList.hpp"
 #include "memory/universe.hpp"
 #include "oops/compressedOops.hpp"
 #include "runtime/init.hpp"
+#include "runtime/java.hpp"
 #include "runtime/orderAccess.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/formatBuffer.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "utilities/vmError.hpp"
+
+
+using metaspace::ChunkManager;
+using metaspace::ClassLoaderMetaspace;
+using metaspace::CommitLimiter;
+using metaspace::MetaspaceType;
+using metaspace::MetadataType;
+using metaspace::MetaspaceReporter;
+using metaspace::RunningCounters;
+using metaspace::VirtualSpaceList;
+
+
+// Used by MetaspaceCounters
+size_t MetaspaceUtils::free_chunks_total_words(MetadataType mdtype) {
+  return is_class(mdtype) ? RunningCounters::free_chunks_words_class() : RunningCounters::free_chunks_words_nonclass();
+}
+
+size_t MetaspaceUtils::used_words() {
+  return RunningCounters::used_words();
+}
+
+size_t MetaspaceUtils::used_words(MetadataType mdtype) {
+  return is_class(mdtype) ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
+}
+
+size_t MetaspaceUtils::reserved_words() {
+  return RunningCounters::reserved_words();
+}
+
+size_t MetaspaceUtils::reserved_words(MetadataType mdtype) {
+  return is_class(mdtype) ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
+}
+
+size_t MetaspaceUtils::committed_words() {
+  return RunningCounters::committed_words();
+}
+
+size_t MetaspaceUtils::committed_words(MetadataType mdtype) {
+  return is_class(mdtype) ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
+}
+
 
 
-using namespace metaspace;
+void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) {
+  const metaspace::MetaspaceSizesSnapshot meta_values;
 
-MetaWord* last_allocated = 0;
+  // We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
+  // - used tells you how much memory is actually used for metadata
+  // - committed tells you how much memory is committed for the purpose of metadata
+  // The difference between those two would be waste, which can have various forms (freelists,
+  //   unused parts of committed chunks etc)
+  //
+  // Left out is reserved, since this is not as exciting as the first two values: for class space,
+  // it is a constant (to uninformed users, often confusingly large). For non-class space, it would
+  // be interesting since free chunks can be uncommitted, but for now it is left out.
 
-size_t Metaspace::_compressed_class_space_size;
-const MetaspaceTracer* Metaspace::_tracer = NULL;
+  if (Metaspace::using_class_space()) {
+    log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
+                            HEAP_CHANGE_FORMAT" "
+                            HEAP_CHANGE_FORMAT,
+                            HEAP_CHANGE_FORMAT_ARGS("Metaspace",
+                                                    pre_meta_values.used(),
+                                                    pre_meta_values.committed(),
+                                                    meta_values.used(),
+                                                    meta_values.committed()),
+                            HEAP_CHANGE_FORMAT_ARGS("NonClass",
+                                                    pre_meta_values.non_class_used(),
+                                                    pre_meta_values.non_class_committed(),
+                                                    meta_values.non_class_used(),
+                                                    meta_values.non_class_committed()),
+                            HEAP_CHANGE_FORMAT_ARGS("Class",
+                                                    pre_meta_values.class_used(),
+                                                    pre_meta_values.class_committed(),
+                                                    meta_values.class_used(),
+                                                    meta_values.class_committed()));
+  } else {
+    log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
+                            HEAP_CHANGE_FORMAT_ARGS("Metaspace",
+                                                    pre_meta_values.used(),
+                                                    pre_meta_values.committed(),
+                                                    meta_values.used(),
+                                                    meta_values.committed()));
+  }
+}
 
-DEBUG_ONLY(bool Metaspace::_frozen = false;)
+
+// Prints an ASCII representation of the given space.
+void MetaspaceUtils::print_metaspace_map(outputStream* out, MetadataType mdtype) {
+  out->print_cr("-- not yet implemented ---");
+}
+
+// This will print out a basic metaspace usage report but
+// unlike print_report() is guaranteed not to lock or to walk the CLDG.
+void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
+  MetaspaceReporter::print_basic_report(out, scale);
+}
 
-static const char* space_type_name(Metaspace::MetaspaceType t) {
-  const char* s = NULL;
-  switch (t) {
-    case Metaspace::StandardMetaspaceType: s = "Standard"; break;
-    case Metaspace::BootMetaspaceType: s = "Boot"; break;
-    case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break;
-    case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
-    default: ShouldNotReachHere();
+// Prints a report about the current metaspace state.
+// Optional parts can be enabled via flags.
+// Function will walk the CLDG and will lock the expand lock; if that is not
+// convenient, use print_basic_report() instead.
+void MetaspaceUtils::print_full_report(outputStream* out, size_t scale) {
+  const int flags =
+      MetaspaceReporter::rf_show_loaders |
+      MetaspaceReporter::rf_break_down_by_chunktype |
+      MetaspaceReporter::rf_show_classes;
+  MetaspaceReporter::print_report(out, scale, flags);
+}
+
+void MetaspaceUtils::print_on(outputStream* out) {
+
+  // Used from all GCs. It first prints out totals, then, separately, the class space portion.
+
+  out->print_cr(" Metaspace       "
+                "used "      SIZE_FORMAT "K, "
+                "committed " SIZE_FORMAT "K, "
+                "reserved "  SIZE_FORMAT "K",
+                used_bytes()/K,
+                committed_bytes()/K,
+                reserved_bytes()/K);
+
+  if (Metaspace::using_class_space()) {
+    const MetadataType ct = metaspace::ClassType;
+    out->print_cr("  class space    "
+                  "used "      SIZE_FORMAT "K, "
+                  "committed " SIZE_FORMAT "K, "
+                  "reserved "  SIZE_FORMAT "K",
+                  used_bytes(ct)/K,
+                  committed_bytes(ct)/K,
+                  reserved_bytes(ct)/K);
   }
-  return s;
 }
 
+#ifdef ASSERT
+void MetaspaceUtils::verify(bool slow) {
+  if (Metaspace::initialized()) {
+
+    // Verify non-class chunkmanager...
+    ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
+    cm->verify(slow);
+
+    // ... and space list.
+    VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
+    vsl->verify(slow);
+
+    if (Metaspace::using_class_space()) {
+      // If we use compressed class pointers, verify class chunkmanager...
+      cm = ChunkManager::chunkmanager_class();
+      assert(cm != NULL, "Sanity");
+      cm->verify(slow);
+
+      // ... and class spacelist.
+      VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
+      assert(vsl != NULL, "Sanity");
+      vsl->verify(slow);
+    }
+
+  }
+}
+#endif
+
+////////////////////////////////7
+// MetaspaceGC methods
+
 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 uint MetaspaceGC::_shrink_factor = 0;
 bool MetaspaceGC::_should_concurrent_collect = false;
 
-// BlockFreelist methods
-
-// VirtualSpaceNode methods
-
-// MetaspaceGC methods
-
 // VM_CollectForMetadataAllocation is the vm operation used to GC.
 // Within the VM operation after the GC the attempt to allocate the metadata
 // should succeed.  If the GC did not free enough space for the metaspace
@@ -198,7 +337,7 @@
 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
   // Check if the compressed class space is full.
   if (is_class && Metaspace::using_class_space()) {
-    size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
+    size_t class_committed = MetaspaceUtils::committed_bytes(metaspace::ClassType);
     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
@@ -352,626 +491,21 @@
   }
 }
 
-// MetaspaceUtils
-size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
-size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
-volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
 
-// Collect used metaspace statistics. This involves walking the CLDG. The resulting
-// output will be the accumulated values for all live metaspaces.
-// Note: method does not do any locking.
-void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
-  out->reset();
-  ClassLoaderDataGraphMetaspaceIterator iter;
-   while (iter.repeat()) {
-     ClassLoaderMetaspace* msp = iter.get_next();
-     if (msp != NULL) {
-       msp->add_to_statistics(out);
-     }
-   }
-}
-
-size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
-  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  return list == NULL ? 0 : list->free_bytes();
-}
-
-size_t MetaspaceUtils::free_in_vs_bytes() {
-  return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
-}
-
-static void inc_stat_nonatomically(size_t* pstat, size_t words) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  (*pstat) += words;
-}
-
-static void dec_stat_nonatomically(size_t* pstat, size_t words) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  const size_t size_now = *pstat;
-  assert(size_now >= words, "About to decrement counter below zero "
-         "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
-         size_now, words);
-  *pstat = size_now - words;
-}
-
-static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
-  Atomic::add(words, pstat);
-}
-
-static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
-  const size_t size_now = *pstat;
-  assert(size_now >= words, "About to decrement counter below zero "
-         "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
-         size_now, words);
-  Atomic::sub(words, pstat);
-}
-
-void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  dec_stat_nonatomically(&_capacity_words[mdtype], words);
-}
-void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  inc_stat_nonatomically(&_capacity_words[mdtype], words);
-}
-void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
-  dec_stat_atomically(&_used_words[mdtype], words);
-}
-void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
-  inc_stat_atomically(&_used_words[mdtype], words);
-}
-void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
-  dec_stat_nonatomically(&_overhead_words[mdtype], words);
-}
-void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
-  inc_stat_nonatomically(&_overhead_words[mdtype], words);
-}
-
-size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
-  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  return list == NULL ? 0 : list->reserved_bytes();
-}
-
-size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
-  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  return list == NULL ? 0 : list->committed_bytes();
-}
-
-size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
 
-size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
-  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
-  if (chunk_manager == NULL) {
-    return 0;
-  }
-  return chunk_manager->free_chunks_total_words();
-}
-
-size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
-  return free_chunks_total_words(mdtype) * BytesPerWord;
-}
-
-size_t MetaspaceUtils::free_chunks_total_words() {
-  return free_chunks_total_words(Metaspace::ClassType) +
-         free_chunks_total_words(Metaspace::NonClassType);
-}
-
-size_t MetaspaceUtils::free_chunks_total_bytes() {
-  return free_chunks_total_words() * BytesPerWord;
-}
-
-bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
-  return Metaspace::get_chunk_manager(mdtype) != NULL;
-}
-
-MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
-  if (!has_chunk_free_list(mdtype)) {
-    return MetaspaceChunkFreeListSummary();
-  }
-
-  const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
-  return cm->chunk_free_list_summary();
-}
-
-void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) {
-  const metaspace::MetaspaceSizesSnapshot meta_values;
-
-  if (Metaspace::using_class_space()) {
-    log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
-                            HEAP_CHANGE_FORMAT" "
-                            HEAP_CHANGE_FORMAT,
-                            HEAP_CHANGE_FORMAT_ARGS("Metaspace",
-                                                    pre_meta_values.used(),
-                                                    pre_meta_values.committed(),
-                                                    meta_values.used(),
-                                                    meta_values.committed()),
-                            HEAP_CHANGE_FORMAT_ARGS("NonClass",
-                                                    pre_meta_values.non_class_used(),
-                                                    pre_meta_values.non_class_committed(),
-                                                    meta_values.non_class_used(),
-                                                    meta_values.non_class_committed()),
-                            HEAP_CHANGE_FORMAT_ARGS("Class",
-                                                    pre_meta_values.class_used(),
-                                                    pre_meta_values.class_committed(),
-                                                    meta_values.class_used(),
-                                                    meta_values.class_committed()));
-  } else {
-    log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
-                            HEAP_CHANGE_FORMAT_ARGS("Metaspace",
-                                                    pre_meta_values.used(),
-                                                    pre_meta_values.committed(),
-                                                    meta_values.used(),
-                                                    meta_values.committed()));
-  }
-}
-
-void MetaspaceUtils::print_on(outputStream* out) {
-  Metaspace::MetadataType nct = Metaspace::NonClassType;
-
-  out->print_cr(" Metaspace       "
-                "used "      SIZE_FORMAT "K, "
-                "capacity "  SIZE_FORMAT "K, "
-                "committed " SIZE_FORMAT "K, "
-                "reserved "  SIZE_FORMAT "K",
-                used_bytes()/K,
-                capacity_bytes()/K,
-                committed_bytes()/K,
-                reserved_bytes()/K);
-
-  if (Metaspace::using_class_space()) {
-    Metaspace::MetadataType ct = Metaspace::ClassType;
-    out->print_cr("  class space    "
-                  "used "      SIZE_FORMAT "K, "
-                  "capacity "  SIZE_FORMAT "K, "
-                  "committed " SIZE_FORMAT "K, "
-                  "reserved "  SIZE_FORMAT "K",
-                  used_bytes(ct)/K,
-                  capacity_bytes(ct)/K,
-                  committed_bytes(ct)/K,
-                  reserved_bytes(ct)/K);
-  }
-}
+//////  Metaspace methods /////
 
 
-void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
-  const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
-  const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
-  {
-    if (Metaspace::using_class_space()) {
-      out->print("  Non-class space:  ");
-    }
-    print_scaled_words(out, reserved_nonclass_words, scale, 7);
-    out->print(" reserved, ");
-    print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
-    out->print_cr(" committed ");
 
-    if (Metaspace::using_class_space()) {
-      const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
-      const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
-      out->print("      Class space:  ");
-      print_scaled_words(out, reserved_class_words, scale, 7);
-      out->print(" reserved, ");
-      print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
-      out->print_cr(" committed ");
-
-      const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
-      const size_t committed_words = committed_nonclass_words + committed_class_words;
-      out->print("             Both:  ");
-      print_scaled_words(out, reserved_words, scale, 7);
-      out->print(" reserved, ");
-      print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
-      out->print_cr(" committed ");
-    }
-  }
-}
-
-static void print_basic_switches(outputStream* out, size_t scale) {
-  out->print("MaxMetaspaceSize: ");
-  if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) {
-    // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real
-    // value is smaller.
-    out->print("unlimited");
-  } else {
-    print_human_readable_size(out, MaxMetaspaceSize, scale);
-  }
-  out->cr();
-  if (Metaspace::using_class_space()) {
-    out->print("CompressedClassSpaceSize: ");
-    print_human_readable_size(out, CompressedClassSpaceSize, scale);
-  }
-  out->cr();
-}
-
-// This will print out a basic metaspace usage report but
-// unlike print_report() is guaranteed not to lock or to walk the CLDG.
-void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
-
-  if (!Metaspace::initialized()) {
-    out->print_cr("Metaspace not yet initialized.");
-    return;
-  }
-
-  out->cr();
-  out->print_cr("Usage:");
-
-  if (Metaspace::using_class_space()) {
-    out->print("  Non-class:  ");
-  }
-
-  // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
-  // MetaspaceUtils.
-  const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
-  const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
-  const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
-  const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
-
-  print_scaled_words(out, cap_nc, scale, 5);
-  out->print(" capacity, ");
-  print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
-  out->print(" used, ");
-  print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
-  out->print(" free+waste, ");
-  print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
-  out->print(" overhead. ");
-  out->cr();
-
-  if (Metaspace::using_class_space()) {
-    const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
-    const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
-    const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
-    const size_t free_and_waste_c = cap_c - overhead_c - used_c;
-    out->print("      Class:  ");
-    print_scaled_words(out, cap_c, scale, 5);
-    out->print(" capacity, ");
-    print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
-    out->print(" used, ");
-    print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
-    out->print(" free+waste, ");
-    print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
-    out->print(" overhead. ");
-    out->cr();
-
-    out->print("       Both:  ");
-    const size_t cap = cap_nc + cap_c;
-
-    print_scaled_words(out, cap, scale, 5);
-    out->print(" capacity, ");
-    print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
-    out->print(" used, ");
-    print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
-    out->print(" free+waste, ");
-    print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
-    out->print(" overhead. ");
-    out->cr();
-  }
-
-  out->cr();
-  out->print_cr("Virtual space:");
-
-  print_vs(out, scale);
-
-  out->cr();
-  out->print_cr("Chunk freelists:");
-
-  if (Metaspace::using_class_space()) {
-    out->print("   Non-Class:  ");
-  }
-  print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
-  out->cr();
-  if (Metaspace::using_class_space()) {
-    out->print("       Class:  ");
-    print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes(), scale);
-    out->cr();
-    out->print("        Both:  ");
-    print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes() +
-                              Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
-    out->cr();
-  }
-
-  out->cr();
-
-  // Print basic settings
-  print_basic_switches(out, scale);
-
-  out->cr();
-
-}
-
-void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
-
-  if (!Metaspace::initialized()) {
-    out->print_cr("Metaspace not yet initialized.");
-    return;
-  }
-
-  const bool print_loaders = (flags & rf_show_loaders) > 0;
-  const bool print_classes = (flags & rf_show_classes) > 0;
-  const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
-  const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
-
-  // Some report options require walking the class loader data graph.
-  PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype);
-  if (print_loaders) {
-    out->cr();
-    out->print_cr("Usage per loader:");
-    out->cr();
-  }
-
-  ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print
-
-  // Print totals, broken up by space type.
-  if (print_by_spacetype) {
-    out->cr();
-    out->print_cr("Usage per space type:");
-    out->cr();
-    for (int space_type = (int)Metaspace::ZeroMetaspaceType;
-         space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
-    {
-      uintx num_loaders = cl._num_loaders_by_spacetype[space_type];
-      uintx num_classes = cl._num_classes_by_spacetype[space_type];
-      out->print("%s - " UINTX_FORMAT " %s",
-        space_type_name((Metaspace::MetaspaceType)space_type),
-        num_loaders, loaders_plural(num_loaders));
-      if (num_classes > 0) {
-        out->print(", ");
-        print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]);
-        out->print(":");
-        cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
-      } else {
-        out->print(".");
-        out->cr();
-      }
-      out->cr();
-    }
-  }
-
-  // Print totals for in-use data:
-  out->cr();
-  {
-    uintx num_loaders = cl._num_loaders;
-    out->print("Total Usage - " UINTX_FORMAT " %s, ",
-      num_loaders, loaders_plural(num_loaders));
-    print_number_of_classes(out, cl._num_classes, cl._num_classes_shared);
-    out->print(":");
-    cl._stats_total.print_on(out, scale, print_by_chunktype);
-    out->cr();
-  }
-
-  // -- Print Virtual space.
-  out->cr();
-  out->print_cr("Virtual space:");
-
-  print_vs(out, scale);
-
-  // -- Print VirtualSpaceList details.
-  if ((flags & rf_show_vslist) > 0) {
-    out->cr();
-    out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
-
-    if (Metaspace::using_class_space()) {
-      out->print_cr("   Non-Class:");
-    }
-    Metaspace::space_list()->print_on(out, scale);
-    if (Metaspace::using_class_space()) {
-      out->print_cr("       Class:");
-      Metaspace::class_space_list()->print_on(out, scale);
-    }
-  }
-  out->cr();
-
-  // -- Print VirtualSpaceList map.
-  if ((flags & rf_show_vsmap) > 0) {
-    out->cr();
-    out->print_cr("Virtual space map:");
-
-    if (Metaspace::using_class_space()) {
-      out->print_cr("   Non-Class:");
-    }
-    Metaspace::space_list()->print_map(out);
-    if (Metaspace::using_class_space()) {
-      out->print_cr("       Class:");
-      Metaspace::class_space_list()->print_map(out);
-    }
-  }
-  out->cr();
-
-  // -- Print Freelists (ChunkManager) details
-  out->cr();
-  out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
-
-  ChunkManagerStatistics non_class_cm_stat;
-  Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
-
-  if (Metaspace::using_class_space()) {
-    out->print_cr("   Non-Class:");
-  }
-  non_class_cm_stat.print_on(out, scale);
-
-  if (Metaspace::using_class_space()) {
-    ChunkManagerStatistics class_cm_stat;
-    Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
-    out->print_cr("       Class:");
-    class_cm_stat.print_on(out, scale);
-  }
-
-  // As a convenience, print a summary of common waste.
-  out->cr();
-  out->print("Waste ");
-  // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
-  const size_t committed_words = committed_bytes() / BytesPerWord;
-
-  out->print("(percentages refer to total committed size ");
-  print_scaled_words(out, committed_words, scale);
-  out->print_cr("):");
-
-  // Print space committed but not yet used by any class loader
-  const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
-  out->print("              Committed unused: ");
-  print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
-  out->cr();
-
-  // Print waste for in-use chunks.
-  UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
-  UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
-  UsedChunksStatistics ucs_all;
-  ucs_all.add(ucs_nonclass);
-  ucs_all.add(ucs_class);
-
-  out->print("        Waste in chunks in use: ");
-  print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
-  out->cr();
-  out->print("         Free in chunks in use: ");
-  print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
-  out->cr();
-  out->print("     Overhead in chunks in use: ");
-  print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
-  out->cr();
-
-  // Print waste in free chunks.
-  const size_t total_capacity_in_free_chunks =
-      Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
-     (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
-  out->print("                In free chunks: ");
-  print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
-  out->cr();
-
-  // Print waste in deallocated blocks.
-  const uintx free_blocks_num =
-      cl._stats_total.nonclass_sm_stats().free_blocks_num() +
-      cl._stats_total.class_sm_stats().free_blocks_num();
-  const size_t free_blocks_cap_words =
-      cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
-      cl._stats_total.class_sm_stats().free_blocks_cap_words();
-  out->print("Deallocated from chunks in use: ");
-  print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
-  out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
-  out->cr();
-
-  // Print total waste.
-  const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
-      + free_blocks_cap_words + unused_words_in_vs;
-  out->print("                       -total-: ");
-  print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
-  out->cr();
-
-  // Print internal statistics
-#ifdef ASSERT
-  out->cr();
-  out->cr();
-  out->print_cr("Internal statistics:");
-  out->cr();
-  out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
-  out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
-  out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
-  out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
-  out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
-  out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
-  out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
-  out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
-  out->print_cr("Number of chunks added to freelist: " UINTX_FORMAT ".",
-                g_internal_statistics.num_chunks_added_to_freelist);
-  out->print_cr("Number of chunks removed from freelist: " UINTX_FORMAT ".",
-                g_internal_statistics.num_chunks_removed_from_freelist);
-  out->print_cr("Number of chunk merges: " UINTX_FORMAT ", split-ups: " UINTX_FORMAT ".",
-                g_internal_statistics.num_chunk_merges, g_internal_statistics.num_chunk_splits);
-
-  out->cr();
-#endif
-
-  // Print some interesting settings
-  out->cr();
-  out->cr();
-  print_basic_switches(out, scale);
-
-  out->cr();
-  out->print("InitialBootClassLoaderMetaspaceSize: ");
-  print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
-
-  out->cr();
-  out->cr();
-
-} // MetaspaceUtils::print_report()
-
-// Prints an ASCII representation of the given space.
-void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
-  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
-  const bool for_class = mdtype == Metaspace::ClassType ? true : false;
-  VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
-  if (vsl != NULL) {
-    if (for_class) {
-      if (!Metaspace::using_class_space()) {
-        out->print_cr("No Class Space.");
-        return;
-      }
-      out->print_raw("---- Metaspace Map (Class Space) ----");
-    } else {
-      out->print_raw("---- Metaspace Map (Non-Class Space) ----");
-    }
-    // Print legend:
-    out->cr();
-    out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
-    out->cr();
-    VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
-    vsl->print_map(out);
-    out->cr();
-  }
-}
-
-void MetaspaceUtils::verify_free_chunks() {
-#ifdef ASSERT
-  Metaspace::chunk_manager_metadata()->verify(false);
-  if (Metaspace::using_class_space()) {
-    Metaspace::chunk_manager_class()->verify(false);
-  }
-#endif
-}
-
-void MetaspaceUtils::verify_metrics() {
-#ifdef ASSERT
-  // Please note: there are time windows where the internal counters are out of sync with
-  // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
-  // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
-  // not be counted when iterating the CLDG. So be careful when you call this method.
-  ClassLoaderMetaspaceStatistics total_stat;
-  collect_statistics(&total_stat);
-  UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
-  UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
-
-  bool mismatch = false;
-  for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
-    Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
-    UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
-    if (capacity_words(mdtype) != chunk_stat.cap() ||
-        used_words(mdtype) != chunk_stat.used() ||
-        overhead_words(mdtype) != chunk_stat.overhead()) {
-      mismatch = true;
-      tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
-      tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
-                    capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
-      tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
-                    chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
-      tty->flush();
-    }
-  }
-  assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
-#endif
-}
-
-// Metaspace methods
-
-size_t Metaspace::_first_chunk_word_size = 0;
-size_t Metaspace::_first_class_chunk_word_size = 0;
-
+MetaWord* Metaspace::_compressed_class_space_base = NULL;
+size_t Metaspace::_compressed_class_space_size = 0;
+const MetaspaceTracer* Metaspace::_tracer = NULL;
+bool Metaspace::_initialized = false;
 size_t Metaspace::_commit_alignment = 0;
 size_t Metaspace::_reserve_alignment = 0;
 
-VirtualSpaceList* Metaspace::_space_list = NULL;
-VirtualSpaceList* Metaspace::_class_space_list = NULL;
+DEBUG_ONLY(bool Metaspace::_frozen = false;)
 
-ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
-ChunkManager* Metaspace::_chunk_manager_class = NULL;
-
-bool Metaspace::_initialized = false;
-
-#define VIRTUALSPACEMULTIPLIER 2
 
 #ifdef _LP64
 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
@@ -1002,6 +536,15 @@
     }
   }
 
+  // We must prevent any metaspace object from being allocated directly at
+  // CompressedKlassPointers::base() - that would translate to a narrow Klass
+  // pointer of 0, which has a special meaning (invalid) (Note: that was
+  // never a problem in old metaspace, since every chunk was prefixed by its
+  // header, so allocation at position 0 in a chunk was never possible).
+  if (lower_base == metaspace_base) {
+    lower_base -= os::vm_page_size();
+  }
+
   CompressedKlassPointers::set_base(lower_base);
 
   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
@@ -1047,23 +590,23 @@
   bool large_pages = false;
 
 #if !(defined(AARCH64) || defined(AIX))
-  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
+  ReservedSpace rs = ReservedSpace(compressed_class_space_size(),
                                              _reserve_alignment,
                                              large_pages,
                                              requested_addr);
 #else // AARCH64
-  ReservedSpace metaspace_rs;
+  ReservedSpace rs;
 
   // Our compressed klass pointers may fit nicely into the lower 32
   // bits.
   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
-    metaspace_rs = ReservedSpace(compressed_class_space_size(),
+    rs = ReservedSpace(compressed_class_space_size(),
                                  _reserve_alignment,
                                  large_pages,
                                  requested_addr);
   }
 
-  if (! metaspace_rs.is_reserved()) {
+  if (! rs.is_reserved()) {
     // Aarch64: Try to align metaspace so that we can decode a compressed
     // klass with a single MOVK instruction.  We can do this iff the
     // compressed class base is a multiple of 4G.
@@ -1083,7 +626,7 @@
           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
         // We failed to find an aligned base that will reach.  Fall
         // back to using our requested addr.
-        metaspace_rs = ReservedSpace(compressed_class_space_size(),
+        rs = ReservedSpace(compressed_class_space_size(),
                                      _reserve_alignment,
                                      large_pages,
                                      requested_addr);
@@ -1091,18 +634,18 @@
       }
 #endif
 
-      metaspace_rs = ReservedSpace(compressed_class_space_size(),
+      rs = ReservedSpace(compressed_class_space_size(),
                                    _reserve_alignment,
                                    large_pages,
                                    a);
-      if (metaspace_rs.is_reserved())
+      if (rs.is_reserved())
         break;
     }
   }
 
 #endif // AARCH64
 
-  if (!metaspace_rs.is_reserved()) {
+  if (!rs.is_reserved()) {
 #if INCLUDE_CDS
     if (UseSharedSpaces) {
       size_t increment = align_up(1*G, _reserve_alignment);
@@ -1111,10 +654,10 @@
       // by 1GB each time, until we reach an address that will no longer allow
       // use of CDS with compressed klass pointers.
       char *addr = requested_addr;
-      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
+      while (!rs.is_reserved() && (addr + increment > addr) &&
              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
         addr = addr + increment;
-        metaspace_rs = ReservedSpace(compressed_class_space_size(),
+        rs = ReservedSpace(compressed_class_space_size(),
                                      _reserve_alignment, large_pages, addr);
       }
     }
@@ -1124,10 +667,10 @@
     // metaspace as if UseCompressedClassPointers is off because too much
     // initialization has happened that depends on UseCompressedClassPointers.
     // So, UseCompressedClassPointers cannot be turned off at this point.
-    if (!metaspace_rs.is_reserved()) {
-      metaspace_rs = ReservedSpace(compressed_class_space_size(),
+    if (!rs.is_reserved()) {
+      rs = ReservedSpace(compressed_class_space_size(),
                                    _reserve_alignment, large_pages);
-      if (!metaspace_rs.is_reserved()) {
+      if (!rs.is_reserved()) {
         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
                                               compressed_class_space_size()));
       }
@@ -1135,19 +678,21 @@
   }
 
   // If we got here then the metaspace got allocated.
-  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
+  MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
+
+  _compressed_class_space_base = (MetaWord*)rs.base();
 
 #if INCLUDE_CDS
   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
-  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
+  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(rs.base(), cds_base)) {
     FileMapInfo::stop_sharing_and_unmap(
         "Could not allocate metaspace at a compatible address");
   }
 #endif
-  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
+  set_narrow_klass_base_and_shift((address)rs.base(),
                                   UseSharedSpaces ? (address)cds_base : 0);
 
-  initialize_class_space(metaspace_rs);
+  initialize_class_space(rs);
 
   LogTarget(Trace, gc, metaspace) lt;
   if (lt.is_enabled()) {
@@ -1157,13 +702,29 @@
   }
 }
 
+// For UseCompressedClassPointers the class space is reserved above the top of
+// the Java heap.  The argument passed in is at the base of the compressed space.
+void Metaspace::initialize_class_space(ReservedSpace rs) {
+
+  // The reserved space size may be bigger because of alignment, esp with UseLargePages
+  assert(rs.size() >= CompressedClassSpaceSize,
+         SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
+  assert(using_class_space(), "Must be using class space");
+
+  VirtualSpaceList* vsl = new VirtualSpaceList("class space list", rs, CommitLimiter::globalLimiter());
+  VirtualSpaceList::set_vslist_class(vsl);
+  ChunkManager* cm = new ChunkManager("class space chunk manager", vsl);
+  ChunkManager::set_chunkmanager_class(cm);
+
+}
+
+
 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
                p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
-  if (_class_space_list != NULL) {
-    address base = (address)_class_space_list->current_virtual_space()->bottom();
+  if (Metaspace::using_class_space()) {
     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
-                 compressed_class_space_size(), p2i(base));
+                 compressed_class_space_size(), p2i(compressed_class_space_base()));
     if (requested_addr != 0) {
       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
     }
@@ -1171,24 +732,13 @@
   }
 }
 
-// For UseCompressedClassPointers the class space is reserved above the top of
-// the Java heap.  The argument passed in is at the base of the compressed space.
-void Metaspace::initialize_class_space(ReservedSpace rs) {
-  // The reserved space size may be bigger because of alignment, esp with UseLargePages
-  assert(rs.size() >= CompressedClassSpaceSize,
-         SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
-  assert(using_class_space(), "Must be using class space");
-  _class_space_list = new VirtualSpaceList(rs);
-  _chunk_manager_class = new ChunkManager(true/*is_class*/);
-
-  if (!_class_space_list->initialization_succeeded()) {
-    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
-  }
-}
-
 #endif
 
 void Metaspace::ergo_initialize() {
+
+  // Must happen before using any setting from Settings::---
+  metaspace::Settings::ergo_initialize();
+
   if (DumpSharedSpaces) {
     // Using large pages when dumping the shared archive is currently not implemented.
     FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
@@ -1199,8 +749,15 @@
     page_size = os::large_page_size();
   }
 
-  _commit_alignment  = page_size;
-  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
+  // Commit alignment: (I would rather hide this since this is an implementation detail but we need it
+  // when calculating the gc threshold).
+  _commit_alignment  = metaspace::Settings::commit_granule_bytes();
+
+  // Reserve alignment: all Metaspace memory mappings are to be aligned to the size of a root chunk.
+  _reserve_alignment = MAX2(page_size, (size_t)metaspace::chklvl::MAX_CHUNK_BYTE_SIZE);
+
+  assert(is_aligned(_reserve_alignment, os::vm_allocation_granularity()),
+         "root chunk size must be a multiple of alloc granularity");
 
   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
   // override if MaxMetaspaceSize was set on the command line or not.
@@ -1225,28 +782,26 @@
 
   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
 
-  // Initial virtual space size will be calculated at global_initialize()
-  size_t min_metaspace_sz =
-      VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
+  // Note: InitialBootClassLoaderMetaspaceSize is an old parameter which is used to determine the chunk size
+  // of the first non-class chunk handed to the boot class loader. See metaspace/chunkAllocSequence.hpp.
+  size_t min_metaspace_sz = align_up(InitialBootClassLoaderMetaspaceSize, _reserve_alignment);
   if (UseCompressedClassPointers) {
-    if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
-      if (min_metaspace_sz >= MaxMetaspaceSize) {
-        vm_exit_during_initialization("MaxMetaspaceSize is too small.");
-      } else {
-        FLAG_SET_ERGO(CompressedClassSpaceSize,
-                      MaxMetaspaceSize - min_metaspace_sz);
-      }
+    if (min_metaspace_sz >= MaxMetaspaceSize) {
+      vm_exit_during_initialization("MaxMetaspaceSize is too small.");
+    } else if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
+      FLAG_SET_ERGO(CompressedClassSpaceSize, MaxMetaspaceSize - min_metaspace_sz);
     }
   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
     FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
                   min_metaspace_sz);
   }
 
-  set_compressed_class_space_size(CompressedClassSpaceSize);
+  _compressed_class_space_size = CompressedClassSpaceSize;
+
 }
 
 void Metaspace::global_initialize() {
-  MetaspaceGC::initialize();
+  MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
 
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
@@ -1262,10 +817,10 @@
   if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
     vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
   }
+#endif // INCLUDE_CDS
 
-  if (!DumpSharedSpaces && !UseSharedSpaces)
-#endif // INCLUDE_CDS
-  {
+  // Initialize class space:
+  if (CDS_ONLY(!DumpSharedSpaces && !UseSharedSpaces) NOT_CDS(true)) {
 #ifdef _LP64
     if (using_class_space()) {
       char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
@@ -1274,27 +829,11 @@
 #endif // _LP64
   }
 
-  // Initialize these before initializing the VirtualSpaceList
-  _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
-  _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
-  // Make the first class chunk bigger than a medium chunk so it's not put
-  // on the medium chunk list.   The next chunk will be small and progress
-  // from there.  This size calculated by -version.
-  _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
-                                     (CompressedClassSpaceSize/BytesPerWord)*2);
-  _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
-  // Arbitrarily set the initial virtual space to a multiple
-  // of the boot class loader size.
-  size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
-  word_size = align_up(word_size, Metaspace::reserve_alignment_words());
-
-  // Initialize the list of virtual spaces.
-  _space_list = new VirtualSpaceList(word_size);
-  _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
-
-  if (!_space_list->initialization_succeeded()) {
-    vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
-  }
+  // Initialize non-class virtual space list, and its chunk manager:
+  VirtualSpaceList* vsl = new VirtualSpaceList("non-class virtualspacelist", CommitLimiter::globalLimiter());
+  VirtualSpaceList::set_vslist_nonclass(vsl);
+  ChunkManager* cm = new ChunkManager("non-class chunkmanager", vsl);
+  ChunkManager::set_chunkmanager_nonclass(cm);
 
   _tracer = new MetaspaceTracer();
 
@@ -1306,21 +845,6 @@
   MetaspaceGC::post_initialize();
 }
 
-void Metaspace::verify_global_initialization() {
-  assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
-  assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
-
-  if (using_class_space()) {
-    assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
-    assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
-  }
-}
-
-size_t Metaspace::align_word_size_up(size_t word_size) {
-  size_t byte_size = word_size * wordSize;
-  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
-}
-
 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
                               MetaspaceObj::Type type, TRAPS) {
   assert(!_frozen, "sanity");
@@ -1334,7 +858,7 @@
   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
         "ClassLoaderData::the_null_class_loader_data() should have been used.");
 
-  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
+  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? metaspace::ClassType : metaspace::NonClassType;
 
   // Try to allocate metadata.
   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
@@ -1367,6 +891,8 @@
   // Zero initialize.
   Copy::fill_to_words((HeapWord*)result, word_size, 0);
 
+  log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
+
   return result;
 }
 
@@ -1377,7 +903,7 @@
   Log(gc, metaspace, freelist, oom) log;
   if (log.is_info()) {
     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
-             is_class_space_allocation(mdtype) ? "class" : "data", word_size);
+             is_class(mdtype) ? "class" : "data", word_size);
     ResourceMark rm;
     if (log.is_debug()) {
       if (loader_data->metaspace_or_null() != NULL) {
@@ -1390,12 +916,17 @@
     MetaspaceUtils::print_basic_report(&ls, 0);
   }
 
+  // Which limit did we hit? CompressedClassSpaceSize or MaxMetaspaceSize?
   bool out_of_compressed_class_space = false;
-  if (is_class_space_allocation(mdtype)) {
+  if (is_class(mdtype)) {
     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
     out_of_compressed_class_space =
-      MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
-      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
+      MetaspaceUtils::committed_bytes(metaspace::ClassType) +
+      // TODO: Okay this is just cheesy.
+      // Of course this may fail and return incorrect results.
+      // Think this over - we need some clean way to remember which limit
+      // exactly we hit during an allocation. Some sort of allocation context structure?
+      align_up(word_size * BytesPerWord, 4 * M) >
       CompressedClassSpaceSize;
   }
 
@@ -1422,26 +953,16 @@
   }
 }
 
-const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
-  switch (mdtype) {
-    case Metaspace::ClassType: return "Class";
-    case Metaspace::NonClassType: return "Metadata";
-    default:
-      assert(false, "Got bad mdtype: %d", (int) mdtype);
-      return NULL;
+void Metaspace::purge() {
+  ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
+  if (cm != NULL) {
+    cm->wholesale_reclaim();
   }
-}
-
-void Metaspace::purge(MetadataType mdtype) {
-  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
-}
-
-void Metaspace::purge() {
-  MutexLocker cl(MetaspaceExpand_lock,
-                 Mutex::_no_safepoint_check_flag);
-  purge(NonClassType);
   if (using_class_space()) {
-    purge(ClassType);
+    cm = ChunkManager::chunkmanager_class();
+    if (cm != NULL) {
+      cm->wholesale_reclaim();
+    }
   }
 }
 
@@ -1453,214 +974,9 @@
 }
 
 bool Metaspace::contains_non_shared(const void* ptr) {
-  if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
+  if (using_class_space() && VirtualSpaceList::vslist_class()->contains((MetaWord*)ptr)) {
      return true;
   }
 
-  return get_space_list(NonClassType)->contains(ptr);
-}
-
-// ClassLoaderMetaspace
-
-ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
-  : _space_type(type)
-  , _lock(lock)
-  , _vsm(NULL)
-  , _class_vsm(NULL)
-{
-  initialize(lock, type);
-}
-
-ClassLoaderMetaspace::~ClassLoaderMetaspace() {
-  Metaspace::assert_not_frozen();
-  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
-  delete _vsm;
-  if (Metaspace::using_class_space()) {
-    delete _class_vsm;
-  }
-}
-
-void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
-  Metachunk* chunk = get_initialization_chunk(type, mdtype);
-  if (chunk != NULL) {
-    // Add to this manager's list of chunks in use and make it the current_chunk().
-    get_space_manager(mdtype)->add_chunk(chunk, true);
-  }
-}
-
-Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
-  size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
-
-  // Get a chunk from the chunk freelist
-  Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
-
-  if (chunk == NULL) {
-    chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
-                                                  get_space_manager(mdtype)->medium_chunk_bunch());
-  }
-
-  return chunk;
-}
-
-void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
-  Metaspace::verify_global_initialization();
-
-  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
-
-  // Allocate SpaceManager for metadata objects.
-  _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
-
-  if (Metaspace::using_class_space()) {
-    // Allocate SpaceManager for classes.
-    _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
-  }
-
-  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
-
-  // Allocate chunk for metadata objects
-  initialize_first_chunk(type, Metaspace::NonClassType);
-
-  // Allocate chunk for class metadata objects
-  if (Metaspace::using_class_space()) {
-    initialize_first_chunk(type, Metaspace::ClassType);
-  }
-}
-
-MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
-  Metaspace::assert_not_frozen();
-
-  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
-
-  // Don't use class_vsm() unless UseCompressedClassPointers is true.
-  if (Metaspace::is_class_space_allocation(mdtype)) {
-    return  class_vsm()->allocate(word_size);
-  } else {
-    return  vsm()->allocate(word_size);
-  }
+  return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
 }
-
-MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
-  Metaspace::assert_not_frozen();
-  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
-  assert(delta_bytes > 0, "Must be");
-
-  size_t before = 0;
-  size_t after = 0;
-  bool can_retry = true;
-  MetaWord* res;
-  bool incremented;
-
-  // Each thread increments the HWM at most once. Even if the thread fails to increment
-  // the HWM, an allocation is still attempted. This is because another thread must then
-  // have incremented the HWM and therefore the allocation might still succeed.
-  do {
-    incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
-    res = allocate(word_size, mdtype);
-  } while (!incremented && res == NULL && can_retry);
-
-  if (incremented) {
-    Metaspace::tracer()->report_gc_threshold(before, after,
-                                  MetaspaceGCThresholdUpdater::ExpandAndAllocate);
-    log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
-  }
-
-  return res;
-}
-
-size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
-  return (vsm()->used_words() +
-      (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
-}
-
-size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
-  return (vsm()->capacity_words() +
-      (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
-}
-
-void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
-  Metaspace::assert_not_frozen();
-  assert(!SafepointSynchronize::is_at_safepoint()
-         || Thread::current()->is_VM_thread(), "should be the VM thread");
-
-  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
-
-  MutexLocker ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
-
-  if (is_class && Metaspace::using_class_space()) {
-    class_vsm()->deallocate(ptr, word_size);
-  } else {
-    vsm()->deallocate(ptr, word_size);
-  }
-}
-
-size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
-  assert(Metaspace::using_class_space(), "Has to use class space");
-  return class_vsm()->calc_chunk_size(word_size);
-}
-
-void ClassLoaderMetaspace::print_on(outputStream* out) const {
-  // Print both class virtual space counts and metaspace.
-  if (Verbose) {
-    vsm()->print_on(out);
-    if (Metaspace::using_class_space()) {
-      class_vsm()->print_on(out);
-    }
-  }
-}
-
-void ClassLoaderMetaspace::verify() {
-  vsm()->verify();
-  if (Metaspace::using_class_space()) {
-    class_vsm()->verify();
-  }
-}
-
-void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
-  assert_lock_strong(lock());
-  vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
-  if (Metaspace::using_class_space()) {
-    class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
-  }
-}
-
-void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
-  MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
-  add_to_statistics_locked(out);
-}
-
-/////////////// Unit tests ///////////////
-
-struct chunkmanager_statistics_t {
-  int num_specialized_chunks;
-  int num_small_chunks;
-  int num_medium_chunks;
-  int num_humongous_chunks;
-};
-
-extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
-  ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
-  ChunkManagerStatistics stat;
-  chunk_manager->collect_statistics(&stat);
-  out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
-  out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
-  out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
-  out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
-}
-
-struct chunk_geometry_t {
-  size_t specialized_chunk_word_size;
-  size_t small_chunk_word_size;
-  size_t medium_chunk_word_size;
-};
-
-extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
-  if (mdType == Metaspace::NonClassType) {
-    out->specialized_chunk_word_size = SpecializedChunk;
-    out->small_chunk_word_size = SmallChunk;
-    out->medium_chunk_word_size = MediumChunk;
-  } else {
-    out->specialized_chunk_word_size = ClassSpecializedChunk;
-    out->small_chunk_word_size = ClassSmallChunk;
-    out->medium_chunk_word_size = ClassMediumChunk;
-  }
-}
--- a/src/hotspot/share/memory/metaspace.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -26,11 +26,12 @@
 
 #include "memory/allocation.hpp"
 #include "memory/memRegion.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/metaspaceChunkFreeListSummary.hpp"
 #include "memory/virtualspace.hpp"
-#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/exceptions.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 // Metaspace
 //
@@ -58,23 +59,17 @@
 //
 
 class ClassLoaderData;
+class MetaspaceShared;
 class MetaspaceTracer;
-class Mutex;
 class outputStream;
 
-class CollectedHeap;
 
 namespace metaspace {
-  class ChunkManager;
-  class ClassLoaderMetaspaceStatistics;
-  class Metablock;
-  class Metachunk;
-  class PrintCLDMetaspaceInfoClosure;
-  class SpaceManager;
-  class VirtualSpaceList;
-  class VirtualSpaceNode;
+class MetaspaceSizesSnapshot;
 }
 
+////////////////// Metaspace ///////////////////////
+
 // Metaspaces each have a  SpaceManager and allocations
 // are done by the SpaceManager.  Allocations are done
 // out of the current Metachunk.  When the current Metachunk
@@ -94,74 +89,22 @@
 
   friend class MetaspaceShared;
 
- public:
-  enum MetadataType {
-    ClassType,
-    NonClassType,
-    MetadataTypeCount
-  };
-  enum MetaspaceType {
-    ZeroMetaspaceType = 0,
-    StandardMetaspaceType = ZeroMetaspaceType,
-    BootMetaspaceType = StandardMetaspaceType + 1,
-    UnsafeAnonymousMetaspaceType = BootMetaspaceType + 1,
-    ReflectionMetaspaceType = UnsafeAnonymousMetaspaceType + 1,
-    MetaspaceTypeCount
-  };
-
- private:
-
-  // Align up the word size to the allocation word size
-  static size_t align_word_size_up(size_t);
-
-  // Aligned size of the metaspace.
+  // Base and size of the compressed class space.
+  static MetaWord* _compressed_class_space_base;
   static size_t _compressed_class_space_size;
 
-  static size_t compressed_class_space_size() {
-    return _compressed_class_space_size;
-  }
-
-  static void set_compressed_class_space_size(size_t size) {
-    _compressed_class_space_size = size;
-  }
-
-  static size_t _first_chunk_word_size;
-  static size_t _first_class_chunk_word_size;
-
   static size_t _commit_alignment;
   static size_t _reserve_alignment;
   DEBUG_ONLY(static bool   _frozen;)
 
-  // Virtual Space lists for both classes and other metadata
-  static metaspace::VirtualSpaceList* _space_list;
-  static metaspace::VirtualSpaceList* _class_space_list;
-
-  static metaspace::ChunkManager* _chunk_manager_metadata;
-  static metaspace::ChunkManager* _chunk_manager_class;
-
   static const MetaspaceTracer* _tracer;
 
   static bool _initialized;
 
- public:
-  static metaspace::VirtualSpaceList* space_list()       { return _space_list; }
-  static metaspace::VirtualSpaceList* class_space_list() { return _class_space_list; }
-  static metaspace::VirtualSpaceList* get_space_list(MetadataType mdtype) {
-    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
-    return mdtype == ClassType ? class_space_list() : space_list();
-  }
+  static MetaWord* compressed_class_space_base()              { return _compressed_class_space_base; }
+  static size_t compressed_class_space_size()                 { return _compressed_class_space_size; }
 
-  static metaspace::ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
-  static metaspace::ChunkManager* chunk_manager_class()    { return _chunk_manager_class; }
-  static metaspace::ChunkManager* get_chunk_manager(MetadataType mdtype) {
-    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
-    return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
-  }
-
-  // convenience function
-  static metaspace::ChunkManager* get_chunk_manager(bool is_class) {
-    return is_class ? chunk_manager_class() : chunk_manager_metadata();
-  }
+public:
 
   static const MetaspaceTracer* tracer() { return _tracer; }
   static void freeze() {
@@ -192,15 +135,13 @@
   static void global_initialize();
   static void post_initialize();
 
-  static void verify_global_initialization();
-
-  static size_t first_chunk_word_size() { return _first_chunk_word_size; }
-  static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
-
+  // The alignment at which Metaspace mappings are reserved.
   static size_t reserve_alignment()       { return _reserve_alignment; }
   static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
+
+  // The granularity at which Metaspace is committed and uncommitted.
   static size_t commit_alignment()        { return _commit_alignment; }
-  static size_t commit_alignment_words()  { return _commit_alignment / BytesPerWord; }
+  static size_t commit_words()            { return _commit_alignment / BytesPerWord; }
 
   static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
                             MetaspaceObj::Type type, TRAPS);
@@ -209,13 +150,10 @@
   static bool contains_non_shared(const void* ptr);
 
   // Free empty virtualspaces
-  static void purge(MetadataType mdtype);
   static void purge();
 
   static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
-                                   MetaspaceObj::Type type, MetadataType mdtype, TRAPS);
-
-  static const char* metadata_type_name(Metaspace::MetadataType mdtype);
+                                   MetaspaceObj::Type type, metaspace::MetadataType mdtype, TRAPS);
 
   static void print_compressed_class_space(outputStream* st, const char* requested_addr = 0) NOT_LP64({});
 
@@ -224,216 +162,38 @@
     return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers);
   }
 
-  static bool is_class_space_allocation(MetadataType mdType) {
-    return mdType == ClassType && using_class_space();
-  }
-
   static bool initialized() { return _initialized; }
 
 };
 
-// Manages the metaspace portion belonging to a class loader
-class ClassLoaderMetaspace : public CHeapObj<mtClass> {
-  friend class CollectedHeap; // For expand_and_allocate()
-  friend class ZCollectedHeap; // For expand_and_allocate()
-  friend class ShenandoahHeap; // For expand_and_allocate()
-  friend class Metaspace;
-  friend class MetaspaceUtils;
-  friend class metaspace::PrintCLDMetaspaceInfoClosure;
-  friend class VM_CollectForMetadataAllocation; // For expand_and_allocate()
-
- private:
-
-  void initialize(Mutex* lock, Metaspace::MetaspaceType type);
-
-  // Initialize the first chunk for a Metaspace.  Used for
-  // special cases such as the boot class loader, reflection
-  // class loader and anonymous class loader.
-  void initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype);
-  metaspace::Metachunk* get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype);
-
-  const Metaspace::MetaspaceType _space_type;
-  Mutex* const  _lock;
-  metaspace::SpaceManager* _vsm;
-  metaspace::SpaceManager* _class_vsm;
-
-  metaspace::SpaceManager* vsm() const { return _vsm; }
-  metaspace::SpaceManager* class_vsm() const { return _class_vsm; }
-  metaspace::SpaceManager* get_space_manager(Metaspace::MetadataType mdtype) {
-    assert(mdtype != Metaspace::MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
-    return mdtype == Metaspace::ClassType ? class_vsm() : vsm();
-  }
-
-  Mutex* lock() const { return _lock; }
-
-  MetaWord* expand_and_allocate(size_t size, Metaspace::MetadataType mdtype);
-
-  size_t class_chunk_size(size_t word_size);
-
-  // Adds to the given statistic object. Must be locked with CLD metaspace lock.
-  void add_to_statistics_locked(metaspace::ClassLoaderMetaspaceStatistics* out) const;
-
-  Metaspace::MetaspaceType space_type() const { return _space_type; }
-
- public:
-
-  ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type);
-  ~ClassLoaderMetaspace();
-
-  // Allocate space for metadata of type mdtype. This is space
-  // within a Metachunk and is used by
-  //   allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
-  MetaWord* allocate(size_t word_size, Metaspace::MetadataType mdtype);
-
-  size_t allocated_blocks_bytes() const;
-  size_t allocated_chunks_bytes() const;
-
-  void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
-
-  void print_on(outputStream* st) const;
-  // Debugging support
-  void verify();
-
-  // Adds to the given statistic object. Will lock with CLD metaspace lock.
-  void add_to_statistics(metaspace::ClassLoaderMetaspaceStatistics* out) const;
-
-}; // ClassLoaderMetaspace
-
-class MetaspaceUtils : AllStatic {
-
-  // Spacemanager updates running counters.
-  friend class metaspace::SpaceManager;
-
-  // Special access for error reporting (checks without locks).
-  friend class oopDesc;
-  friend class Klass;
-
-  // Running counters for statistics concerning in-use chunks.
-  // Note: capacity = used + free + waste + overhead. Note that we do not
-  // count free and waste. Their sum can be deduces from the three other values.
-  // For more details, one should call print_report() from within a safe point.
-  static size_t _capacity_words [Metaspace:: MetadataTypeCount];
-  static size_t _overhead_words [Metaspace:: MetadataTypeCount];
-  static volatile size_t _used_words [Metaspace:: MetadataTypeCount];
-
-  // Atomically decrement or increment in-use statistic counters
-  static void dec_capacity(Metaspace::MetadataType mdtype, size_t words);
-  static void inc_capacity(Metaspace::MetadataType mdtype, size_t words);
-  static void dec_used(Metaspace::MetadataType mdtype, size_t words);
-  static void inc_used(Metaspace::MetadataType mdtype, size_t words);
-  static void dec_overhead(Metaspace::MetadataType mdtype, size_t words);
-  static void inc_overhead(Metaspace::MetadataType mdtype, size_t words);
-
-
-  // Getters for the in-use counters.
-  static size_t capacity_words(Metaspace::MetadataType mdtype)        { return _capacity_words[mdtype]; }
-  static size_t used_words(Metaspace::MetadataType mdtype)            { return _used_words[mdtype]; }
-  static size_t overhead_words(Metaspace::MetadataType mdtype)        { return _overhead_words[mdtype]; }
-
-  static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
-
-  // Helper for print_xx_report.
-  static void print_vs(outputStream* out, size_t scale);
-
-public:
-
-  // Collect used metaspace statistics. This involves walking the CLDG. The resulting
-  // output will be the accumulated values for all live metaspaces.
-  // Note: method does not do any locking.
-  static void collect_statistics(metaspace::ClassLoaderMetaspaceStatistics* out);
-
-  // Used by MetaspaceCounters
-  static size_t free_chunks_total_words();
-  static size_t free_chunks_total_bytes();
-  static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
-
-  static size_t capacity_words() {
-    return capacity_words(Metaspace::NonClassType) +
-           capacity_words(Metaspace::ClassType);
-  }
-  static size_t capacity_bytes(Metaspace::MetadataType mdtype) {
-    return capacity_words(mdtype) * BytesPerWord;
-  }
-  static size_t capacity_bytes() {
-    return capacity_words() * BytesPerWord;
-  }
-
-  static size_t used_words() {
-    return used_words(Metaspace::NonClassType) +
-           used_words(Metaspace::ClassType);
-  }
-  static size_t used_bytes(Metaspace::MetadataType mdtype) {
-    return used_words(mdtype) * BytesPerWord;
-  }
-  static size_t used_bytes() {
-    return used_words() * BytesPerWord;
-  }
-
-  // Space committed but yet unclaimed by any class loader.
-  static size_t free_in_vs_bytes();
-  static size_t free_in_vs_bytes(Metaspace::MetadataType mdtype);
-
-  static size_t reserved_bytes(Metaspace::MetadataType mdtype);
-  static size_t reserved_bytes() {
-    return reserved_bytes(Metaspace::ClassType) +
-           reserved_bytes(Metaspace::NonClassType);
-  }
-
-  static size_t committed_bytes(Metaspace::MetadataType mdtype);
-  static size_t committed_bytes() {
-    return committed_bytes(Metaspace::ClassType) +
-           committed_bytes(Metaspace::NonClassType);
-  }
-
-  static size_t min_chunk_size_words();
-
-  // Flags for print_report().
-  enum ReportFlag {
-    // Show usage by class loader.
-    rf_show_loaders                 = (1 << 0),
-    // Breaks report down by chunk type (small, medium, ...).
-    rf_break_down_by_chunktype      = (1 << 1),
-    // Breaks report down by space type (anonymous, reflection, ...).
-    rf_break_down_by_spacetype      = (1 << 2),
-    // Print details about the underlying virtual spaces.
-    rf_show_vslist                  = (1 << 3),
-    // Print metaspace map.
-    rf_show_vsmap                   = (1 << 4),
-    // If show_loaders: show loaded classes for each loader.
-    rf_show_classes                 = (1 << 5)
-  };
-
-  // This will print out a basic metaspace usage report but
-  // unlike print_report() is guaranteed not to lock or to walk the CLDG.
-  static void print_basic_report(outputStream* st, size_t scale);
-
-  // Prints a report about the current metaspace state.
-  // Optional parts can be enabled via flags.
-  // Function will walk the CLDG and will lock the expand lock; if that is not
-  // convenient, use print_basic_report() instead.
-  static void print_report(outputStream* out, size_t scale = 0, int flags = 0);
-
-  static bool has_chunk_free_list(Metaspace::MetadataType mdtype);
-  static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype);
-
-  // Log change in used metadata.
-  static void print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values);
-  static void print_on(outputStream * out);
-
-  // Prints an ASCII representation of the given space.
-  static void print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype);
-
-  static void dump(outputStream* out);
-  static void verify_free_chunks();
-  // Check internal counters (capacity, used).
-  static void verify_metrics();
-};
+////////////////// MetaspaceGC ///////////////////////
 
 // Metaspace are deallocated when their class loader are GC'ed.
 // This class implements a policy for inducing GC's to recover
 // Metaspaces.
 
-class MetaspaceGC : AllStatic {
+class MetaspaceGCThresholdUpdater : public AllStatic {
+ public:
+  enum Type {
+    ComputeNewSize,
+    ExpandAndAllocate,
+    Last
+  };
+
+  static const char* to_string(MetaspaceGCThresholdUpdater::Type updater) {
+    switch (updater) {
+      case ComputeNewSize:
+        return "compute_new_size";
+      case ExpandAndAllocate:
+        return "expand_and_allocate";
+      default:
+        assert(false, "Got bad updater: %d", (int) updater);
+        return NULL;
+    };
+  }
+};
+
+class MetaspaceGC : public AllStatic {
 
   // The current high-water-mark for inducing a GC.
   // When committed memory of all metaspaces reaches this value,
@@ -481,4 +241,64 @@
   static void compute_new_size();
 };
 
+
+
+
+class MetaspaceUtils : AllStatic {
+public:
+
+  // Committed space actually in use by Metadata
+  static size_t used_words();
+  static size_t used_words(metaspace::MetadataType mdtype);
+
+  // Space committed for Metaspace
+  static size_t committed_words();
+  static size_t committed_words(metaspace::MetadataType mdtype);
+
+  // Space reserved for Metaspace
+  static size_t reserved_words();
+  static size_t reserved_words(metaspace::MetadataType mdtype);
+
+  // _bytes() variants for convenience...
+  static size_t used_bytes()                                    { return used_words() * BytesPerWord; }
+  static size_t used_bytes(metaspace::MetadataType mdtype)      { return used_words(mdtype) * BytesPerWord; }
+  static size_t committed_bytes()                               { return committed_words() * BytesPerWord; }
+  static size_t committed_bytes(metaspace::MetadataType mdtype) { return committed_words(mdtype) * BytesPerWord; }
+  static size_t reserved_bytes()                                { return reserved_words() * BytesPerWord; }
+  static size_t reserved_bytes(metaspace::MetadataType mdtype)  { return reserved_words(mdtype) * BytesPerWord; }
+
+  // TODO. Do we need this really? This number is kind of uninformative.
+  static size_t capacity_bytes()                                { return 0; }
+  static size_t capacity_bytes(metaspace::MetadataType mdtype)  { return 0; }
+
+  // Todo. Consolidate.
+  // Committed space in freelists
+  static size_t free_chunks_total_words(metaspace::MetadataType mdtype);
+
+  // Todo. Implement or Consolidate.
+  static MetaspaceChunkFreeListSummary chunk_free_list_summary(metaspace::MetadataType mdtype) {
+    return MetaspaceChunkFreeListSummary(0,0,0,0,0,0,0,0);
+  }
+
+  // Log change in used metadata.
+  static void print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values);
+
+  // Prints an ASCII representation of the given space.
+  static void print_metaspace_map(outputStream* out, metaspace::MetadataType mdtype);
+
+  // This will print out a basic metaspace usage report but
+  // unlike print_report() is guaranteed not to lock or to walk the CLDG.
+  static void print_basic_report(outputStream* st, size_t scale = 0);
+
+  // Prints a report about the current metaspace state.
+  // Function will walk the CLDG and will lock the expand lock; if that is not
+  // convenient, use print_basic_report() instead.
+  static void print_full_report(outputStream* out, size_t scale = 0);
+
+  static void print_on(outputStream * out);
+
+  DEBUG_ONLY(static void verify(bool slow);)
+
+};
+
 #endif // SHARE_MEMORY_METASPACE_HPP
--- a/src/hotspot/share/memory/metaspace/blockFreelist.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/blockFreelist.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -43,10 +43,11 @@
 }
 
 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
-  assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
+  assert(word_size >= SmallBlocks::small_block_min_word_size(),
+         "attempting to return dark matter (" SIZE_FORMAT ").", word_size);
 
   Metablock* free_chunk = ::new (p) Metablock(word_size);
-  if (word_size < SmallBlocks::small_block_max_size()) {
+  if (word_size < SmallBlocks::small_block_max_word_size()) {
     small_blocks()->return_block(free_chunk, word_size);
   } else {
   dictionary()->return_chunk(free_chunk);
@@ -56,10 +57,10 @@
 }
 
 MetaWord* BlockFreelist::get_block(size_t word_size) {
-  assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
+  assert(word_size >= SmallBlocks::small_block_min_word_size(), "never get dark matter");
 
   // Try small_blocks first.
-  if (word_size < SmallBlocks::small_block_max_size()) {
+  if (word_size < SmallBlocks::small_block_max_word_size()) {
     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
     // this space manager.
     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
@@ -89,7 +90,7 @@
   MetaWord* new_block = (MetaWord*)free_block;
   assert(block_size >= word_size, "Incorrect size of block from freelist");
   const size_t unused = block_size - word_size;
-  if (unused >= SmallBlocks::small_block_min_size()) {
+  if (unused >= SmallBlocks::small_block_min_word_size()) {
     return_block(new_block + word_size, unused);
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/chunkAllocSequence.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "memory/metaspace/chunkAllocSequence.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+namespace metaspace {
+
+// A chunk allocation sequence which can be encoded with a simple const array.
+class ConstantChunkAllocSequence : public ChunkAllocSequence {
+
+  // integer array specifying chunk level allocation progression.
+  // Last chunk is to be an endlessly repeated allocation.
+  const chklvl_t* const _entries;
+  const int _num_entries;
+
+public:
+
+  ConstantChunkAllocSequence(const chklvl_t* array, int num_entries)
+    : _entries(array)
+    , _num_entries(num_entries)
+  {
+    assert(_num_entries > 0, "must not be empty.");
+  }
+
+  chklvl_t get_next_chunk_level(int num_allocated) const {
+    if (num_allocated >= _num_entries) {
+      // Caller shall repeat last allocation
+      return _entries[_num_entries - 1];
+    }
+    return _entries[_num_entries];
+  }
+
+};
+
+// hard-coded chunk allocation sequences for various space types
+
+///////////////////////////
+// chunk allocation sequences for normal loaders:
+static const chklvl_t g_sequ_standard_nonclass[] = {
+    chklvl::CHUNK_LEVEL_4K,
+    chklvl::CHUNK_LEVEL_4K,
+    chklvl::CHUNK_LEVEL_4K,
+    chklvl::CHUNK_LEVEL_4K,
+    chklvl::CHUNK_LEVEL_64K
+    -1 // .. repeat last
+};
+
+static const chklvl_t g_sequ_standard_class[] = {
+    chklvl::CHUNK_LEVEL_2K,
+    chklvl::CHUNK_LEVEL_2K,
+    chklvl::CHUNK_LEVEL_2K,
+    chklvl::CHUNK_LEVEL_2K,
+    chklvl::CHUNK_LEVEL_32K,
+    -1 // .. repeat last
+};
+
+///////////////////////////
+// chunk allocation sequences for reflection/anonymous loaders:
+// We allocate four smallish chunks before progressing to bigger chunks.
+static const chklvl_t g_sequ_anon_nonclass[] = {
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_4K,
+    -1 // .. repeat last
+};
+
+static const chklvl_t g_sequ_anon_class[] = {
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_1K,
+    chklvl::CHUNK_LEVEL_4K,
+    -1 // .. repeat last
+};
+
+#define DEFINE_CLASS_FOR_ARRAY(what) \
+  static ConstantChunkAllocSequence g_chunk_alloc_sequence_##what (g_sequ_##what, sizeof(g_sequ_##what)/sizeof(int));
+
+DEFINE_CLASS_FOR_ARRAY(standard_nonclass)
+DEFINE_CLASS_FOR_ARRAY(standard_class)
+DEFINE_CLASS_FOR_ARRAY(anon_nonclass)
+DEFINE_CLASS_FOR_ARRAY(anon_class)
+
+
+class BootLoaderChunkAllocSequence : public ChunkAllocSequence {
+
+  // For now, this mirrors what the old code did
+  // (see SpaceManager::get_initial_chunk_size() and SpaceManager::calc_chunk_size).
+
+  // Not sure how much sense this still makes, especially with CDS - by default we
+  // now load JDK classes from CDS and therefore most of the boot loader
+  // chunks remain unoccupied.
+
+  // Also, InitialBootClassLoaderMetaspaceSize was/is confusing since it only applies
+  // to the non-class chunk.
+
+  const bool _is_class;
+
+  static chklvl_t calc_initial_chunk_level(bool is_class) {
+
+    size_t word_size = 0;
+    if (is_class) {
+      // In the old version first class space chunk for boot loader was always medium class chunk size * 6.
+      word_size = (32 * K * 6) / BytesPerWord;
+
+    } else {
+      //assert(InitialBootClassLoaderMetaspaceSize < chklvl::MAX_CHUNK_BYTE_SIZE,
+      //       "InitialBootClassLoaderMetaspaceSize too large");
+      word_size = MIN2(InitialBootClassLoaderMetaspaceSize,
+                       chklvl::MAX_CHUNK_BYTE_SIZE) / BytesPerWord;
+    }
+    return chklvl::level_fitting_word_size(word_size);
+  }
+
+public:
+
+  BootLoaderChunkAllocSequence(bool is_class)
+    : _is_class(is_class)
+  {}
+
+  chklvl_t get_next_chunk_level(int num_allocated) const {
+    if (num_allocated == 0) {
+      return calc_initial_chunk_level(_is_class);
+    }
+    // bit arbitrary, but this is what the old code did. Can tweak later if needed.
+    return chklvl::CHUNK_LEVEL_64K;
+  }
+
+};
+
+static BootLoaderChunkAllocSequence g_chunk_alloc_sequence_boot_non_class(false);
+static BootLoaderChunkAllocSequence g_chunk_alloc_sequence_boot_class(true);
+
+
+const ChunkAllocSequence* ChunkAllocSequence::alloc_sequence_by_space_type(MetaspaceType space_type, bool is_class) {
+
+  if (is_class) {
+    switch(space_type) {
+    case StandardMetaspaceType:          return &g_chunk_alloc_sequence_standard_class;
+    case ReflectionMetaspaceType:
+    case UnsafeAnonymousMetaspaceType:   return &g_chunk_alloc_sequence_anon_class;
+    case BootMetaspaceType:              return &g_chunk_alloc_sequence_boot_non_class;
+    default: ShouldNotReachHere();
+    }
+  } else {
+    switch(space_type) {
+    case StandardMetaspaceType:          return &g_chunk_alloc_sequence_standard_class;
+    case ReflectionMetaspaceType:
+    case UnsafeAnonymousMetaspaceType:   return &g_chunk_alloc_sequence_anon_class;
+    case BootMetaspaceType:              return &g_chunk_alloc_sequence_boot_class;
+    default: ShouldNotReachHere();
+    }
+  }
+
+  return NULL;
+
+}
+
+
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/chunkAllocSequence.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_CHUNKALLOCSEQUENCE_HPP
+#define SHARE_MEMORY_METASPACE_CHUNKALLOCSEQUENCE_HPP
+
+#include "memory/metaspace.hpp" // For Metaspace::MetaspaceType
+#include "memory/metaspace/chunkLevel.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
+
+namespace metaspace {
+
+
+class ChunkAllocSequence {
+public:
+
+  virtual chklvl_t get_next_chunk_level(int num_allocated) const = 0;
+
+  // Given a space type, return the correct allocation sequence to use.
+  // The returned object is static and read only.
+  static const ChunkAllocSequence* alloc_sequence_by_space_type(MetaspaceType space_type, bool is_class);
+
+};
+
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_CHUNKALLOCSEQUENCE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/chunkHeaderPool.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "memory/metaspace/chunkHeaderPool.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+namespace metaspace {
+
+
+// Returns reference to the one global chunk header pool.
+ChunkHeaderPool ChunkHeaderPool::_chunkHeaderPool(false);
+
+
+ChunkHeaderPool::ChunkHeaderPool(bool delete_on_destruction)
+  : _num_slabs(), _first_slab(NULL), _current_slab(NULL), _delete_on_destruction(delete_on_destruction)
+{
+}
+
+ChunkHeaderPool::~ChunkHeaderPool() {
+  if (_delete_on_destruction) {
+    // This is only done when tests are running, but not for the global chunk pool,
+    // since that is supposed to live until the process ends.
+    slab_t* s = _first_slab;
+    while (s != NULL) {
+      slab_t* next_slab = s->next;
+      os::free(s);
+      s = next_slab;
+    }
+  }
+}
+
+void ChunkHeaderPool::allocate_new_slab() {
+  slab_t* slab = (slab_t*)os::malloc(sizeof(slab_t), mtInternal);
+  memset(slab, 0, sizeof(slab_t));
+  if (_current_slab != NULL) {
+    _current_slab->next = slab;
+  }
+  _current_slab = slab;
+  if (_first_slab == NULL) {
+    _first_slab = slab;
+  }
+  _num_slabs.increment();
+}
+
+// Returns size of memory used.
+size_t ChunkHeaderPool::memory_footprint_words() const {
+  return (_num_slabs.get() * sizeof(slab_t)) / BytesPerWord;
+}
+
+#ifdef ASSERT
+void ChunkHeaderPool::verify(bool slow) const {
+  const slab_t* s = _first_slab;
+  int num = 0;
+  while (s != NULL) {
+    assert(s->top >= 0 && s->top <= slab_capacity,
+           "invalid slab at " PTR_FORMAT ", top: %d, slab cap: %d",
+           p2i(s), s->top, slab_capacity );
+    s = s->next;
+    num ++;
+  }
+  _num_slabs.check(num);
+}
+#endif
+
+} // namespace metaspace
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/chunkHeaderPool.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_CHUNKHEADERPOOL_HPP
+#define SHARE_MEMORY_METASPACE_CHUNKHEADERPOOL_HPP
+
+#include "memory/metaspace/internStat.hpp"
+#include "memory/metaspace/metachunk.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+namespace metaspace {
+
+// A simple helper class; A pool of unused chunk headers.
+
+class ChunkHeaderPool {
+
+  static const int slab_capacity = 128;
+
+  struct slab_t {
+    slab_t* next;
+    int top;
+    Metachunk elems [slab_capacity]; // var sized;
+  };
+
+  IntCounter _num_slabs;
+  slab_t* _first_slab;
+  slab_t* _current_slab;
+
+  IntCounter _num_handed_out;
+
+  MetachunkList _freelist;
+
+  const bool _delete_on_destruction;
+
+  void allocate_new_slab();
+
+  static ChunkHeaderPool _chunkHeaderPool;
+
+
+public:
+
+  ChunkHeaderPool(bool delete_on_destruction);
+
+  ~ChunkHeaderPool();
+
+  // Allocates a Metachunk structure. The structure is uninitialized.
+  Metachunk* allocate_chunk_header() {
+
+    Metachunk* c = NULL;
+
+    DEBUG_ONLY(verify(false));
+
+    c = _freelist.remove_first();
+    assert(c == NULL || c->is_dead(), "Not a freelist chunk header?");
+
+    if (c == NULL) {
+
+      if (_current_slab == NULL ||
+          _current_slab->top == slab_capacity) {
+        allocate_new_slab();
+        assert(_current_slab->top < slab_capacity, "Sanity");
+      }
+
+      c = _current_slab->elems + _current_slab->top;
+      _current_slab->top ++;
+
+    }
+
+    _num_handed_out.increment();
+
+    // By contract, the returned structure is uninitialized.
+    // Zap to make this clear.
+    DEBUG_ONLY(c->zap_header(0xBB);)
+
+    return c;
+
+  }
+
+  void return_chunk_header(Metachunk* c) {
+    // We only ever should return free chunks, since returning chunks
+    // happens only on merging and merging only works with free chunks.
+    assert(c != NULL && c->is_free(), "Sanity");
+#ifdef ASSERT
+    // In debug, fill dead header with pattern.
+    c->zap_header(0xCC);
+    c->set_next(NULL);
+    c->set_prev(NULL);
+#endif
+    c->set_dead();
+    _freelist.add(c);
+    _num_handed_out.decrement();
+
+  }
+
+  // Returns number of allocated elements.
+  int used() const                   { return _num_handed_out.get(); }
+
+  // Returns number of elements in free list.
+  int freelist_size() const          { return _freelist.size(); }
+
+  // Returns size of memory used.
+  size_t memory_footprint_words() const;
+
+  DEBUG_ONLY(void verify(bool slow) const;)
+
+  // Returns reference to the one global chunk header pool.
+  static ChunkHeaderPool& pool() { return _chunkHeaderPool; }
+
+};
+
+
+} // namespace metaspace
+
+
+
+
+#endif // SHARE_MEMORY_METASPACE_CHUNKHEADERPOOL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/chunkLevel.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include <memory/metaspace/settings.hpp>
+#include "precompiled.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+namespace metaspace {
+
+chklvl_t chklvl::level_fitting_word_size(size_t word_size) {
+
+  assert(chklvl::MAX_CHUNK_WORD_SIZE >= word_size,
+         "too large allocation size (" SIZE_FORMAT ")", word_size * BytesPerWord);
+
+  // TODO: This can be done much better.
+  chklvl_t l = chklvl::HIGHEST_CHUNK_LEVEL;
+  while (l >= chklvl::LOWEST_CHUNK_LEVEL &&
+         word_size > chklvl::word_size_for_level(l)) {
+    l --;
+  }
+
+  return l;
+}
+
+void chklvl::print_chunk_size(outputStream* st, chklvl_t lvl) {
+  if (chklvl::is_valid_level(lvl)) {
+    const size_t s = chklvl::word_size_for_level(lvl) * BytesPerWord;
+    if (s < 1 * M) {
+      st->print("%3uk", (unsigned)(s / K));
+    } else {
+      st->print("%3um", (unsigned)(s / M));
+    }
+  } else {
+    st->print("?-?");
+  }
+
+}
+
+} // namespace metaspace
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/chunkLevel.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_CHUNKLEVEL_HPP
+#define SHARE_MEMORY_METASPACE_CHUNKLEVEL_HPP
+
+#include "utilities/globalDefinitions.hpp"
+
+// Constants for the chunk levels and some utility functions.
+
+class outputStream;
+
+namespace metaspace {
+
+// Metachunk level (must be signed)
+typedef signed char chklvl_t;
+
+#define CHKLVL_FORMAT "lv%.2d"
+
+// Chunks are managed by a binary buddy allocator.
+
+// Chunk sizes range from 1K to 4MB (64bit).
+//
+// Reasoning: .... TODO explain
+
+// Each chunk has a level; the level corresponds to its position in the tree
+// and describes its size.
+//
+// The largest chunks are called root chunks, of 4MB in size, and have level 0.
+// From there on it goes:
+//
+// size    level
+// 4MB     0
+// 2MB     1
+// 1MB     2
+// 512K    3
+// 256K    4
+// 128K    5
+// 64K     6
+// 32K     7
+// 16K     8
+// 8K      9
+// 4K      10
+// 2K      11
+// 1K      12
+
+namespace chklvl {
+
+static const size_t   MAX_CHUNK_BYTE_SIZE    = 4 * M;
+static const int      NUM_CHUNK_LEVELS       = 13;
+static const size_t   MIN_CHUNK_BYTE_SIZE    = (MAX_CHUNK_BYTE_SIZE >> (size_t)NUM_CHUNK_LEVELS);
+
+static const size_t   MIN_CHUNK_WORD_SIZE    = MIN_CHUNK_BYTE_SIZE / sizeof(MetaWord);
+static const size_t   MAX_CHUNK_WORD_SIZE    = MAX_CHUNK_BYTE_SIZE / sizeof(MetaWord);
+
+static const chklvl_t ROOT_CHUNK_LEVEL       = 0;
+
+static const chklvl_t HIGHEST_CHUNK_LEVEL    = NUM_CHUNK_LEVELS - 1;
+static const chklvl_t LOWEST_CHUNK_LEVEL     = 0;
+
+static const chklvl_t INVALID_CHUNK_LEVEL    = (chklvl_t) -1;
+
+inline bool is_valid_level(chklvl_t level) {
+  return level >= LOWEST_CHUNK_LEVEL &&
+         level <= HIGHEST_CHUNK_LEVEL;
+}
+
+inline void check_valid_level(chklvl_t lvl) {
+  assert(is_valid_level(lvl), "invalid level (%d)", (int)lvl);
+}
+
+// Given a level return the chunk size, in words.
+inline size_t word_size_for_level(chklvl_t level) {
+  check_valid_level(level);
+  return (MAX_CHUNK_BYTE_SIZE >> level) / BytesPerWord;
+}
+
+// Given an arbitrary word size smaller than the highest chunk size,
+// return the highest chunk level able to hold this size.
+// Returns INVALID_CHUNK_LEVEL if no fitting level can be found.
+chklvl_t level_fitting_word_size(size_t word_size);
+
+// Shorthands to refer to exact sizes
+static const chklvl_t CHUNK_LEVEL_4M =     ROOT_CHUNK_LEVEL;
+static const chklvl_t CHUNK_LEVEL_2M =    (ROOT_CHUNK_LEVEL + 1);
+static const chklvl_t CHUNK_LEVEL_1M =    (ROOT_CHUNK_LEVEL + 2);
+static const chklvl_t CHUNK_LEVEL_512K =  (ROOT_CHUNK_LEVEL + 3);
+static const chklvl_t CHUNK_LEVEL_256K =  (ROOT_CHUNK_LEVEL + 4);
+static const chklvl_t CHUNK_LEVEL_128K =  (ROOT_CHUNK_LEVEL + 5);
+static const chklvl_t CHUNK_LEVEL_64K =   (ROOT_CHUNK_LEVEL + 6);
+static const chklvl_t CHUNK_LEVEL_32K =   (ROOT_CHUNK_LEVEL + 7);
+static const chklvl_t CHUNK_LEVEL_16K =   (ROOT_CHUNK_LEVEL + 8);
+static const chklvl_t CHUNK_LEVEL_8K =    (ROOT_CHUNK_LEVEL + 9);
+static const chklvl_t CHUNK_LEVEL_4K =    (ROOT_CHUNK_LEVEL + 10);
+static const chklvl_t CHUNK_LEVEL_2K =    (ROOT_CHUNK_LEVEL + 11);
+static const chklvl_t CHUNK_LEVEL_1K =    (ROOT_CHUNK_LEVEL + 12);
+
+STATIC_ASSERT(CHUNK_LEVEL_1K == HIGHEST_CHUNK_LEVEL);
+
+/////////////////////////////////////////////////////////
+// print helpers
+void print_chunk_size(outputStream* st, chklvl_t lvl);
+
+
+} // namespace chklvl
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_BLOCKFREELIST_HPP
--- a/src/hotspot/share/memory/metaspace/chunkManager.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/chunkManager.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -21,619 +22,464 @@
  * questions.
  *
  */
+#include <memory/metaspace/settings.hpp>
 #include "precompiled.hpp"
 
+
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
-#include "memory/binaryTreeDictionary.inline.hpp"
-#include "memory/freeList.inline.hpp"
+#include "memory/metaspace/chunkAllocSequence.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
 #include "memory/metaspace/chunkManager.hpp"
+#include "memory/metaspace/internStat.hpp"
 #include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace/metaDebug.hpp"
 #include "memory/metaspace/metaspaceCommon.hpp"
 #include "memory/metaspace/metaspaceStatistics.hpp"
-#include "memory/metaspace/occupancyMap.hpp"
 #include "memory/metaspace/virtualSpaceNode.hpp"
+#include "memory/metaspace/virtualSpaceList.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "utilities/ostream.hpp"
 
 namespace metaspace {
 
-ChunkManager::ChunkManager(bool is_class)
-      : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
-  _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
-  _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
-  _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
-}
+
+// Return a single chunk to the freelist and adjust accounting. No merge is attempted.
+void ChunkManager::return_chunk_simple(Metachunk* c) {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  DEBUG_ONLY(c->verify(false));
 
-void ChunkManager::remove_chunk(Metachunk* chunk) {
-  size_t word_size = chunk->word_size();
-  ChunkIndex index = list_index(word_size);
-  if (index != HumongousIndex) {
-    free_chunks(index)->remove_chunk(chunk);
-  } else {
-    humongous_dictionary()->remove_chunk(chunk);
-  }
+  const chklvl_t lvl = c->level();
+  _chunks.add(c);
+  c->reset_used_words();
 
-  // Chunk has been removed from the chunks free list, update counters.
-  account_for_removed_chunk(chunk);
+  // Tracing
+  log_debug(metaspace)("ChunkManager %s: returned chunk " METACHUNK_FORMAT ".",
+                       _name, METACHUNK_FORMAT_ARGS(c));
+
 }
 
-bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  assert(chunk != NULL, "invalid chunk pointer");
-  // Check for valid merge combinations.
-  assert((chunk->get_chunk_type() == SpecializedIndex &&
-          (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
-         (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
-        "Invalid chunk merge combination.");
+// Take a single chunk from the given freelist and adjust counters. Returns NULL
+// if there is no fitting chunk for this level.
+Metachunk* ChunkManager::remove_first_chunk_at_level(chklvl_t l) {
 
-  const size_t target_chunk_word_size =
-    get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
-
-  // [ prospective merge region )
-  MetaWord* const p_merge_region_start =
-    (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
-  MetaWord* const p_merge_region_end =
-    p_merge_region_start + target_chunk_word_size;
+  assert_lock_strong(MetaspaceExpand_lock);
+  DEBUG_ONLY(chklvl::check_valid_level(l);)
 
-  // We need the VirtualSpaceNode containing this chunk and its occupancy map.
-  VirtualSpaceNode* const vsn = chunk->container();
-  OccupancyMap* const ocmap = vsn->occupancy_map();
-
-  // The prospective chunk merge range must be completely contained by the
-  // committed range of the virtual space node.
-  if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
-    return false;
-  }
+  Metachunk* c = _chunks.remove_first(l);
 
-  // Only attempt to merge this range if at its start a chunk starts and at its end
-  // a chunk ends. If a chunk (can only be humongous) straddles either start or end
-  // of that range, we cannot merge.
-  if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
-    return false;
-  }
-  if (p_merge_region_end < vsn->top() &&
-      !ocmap->chunk_starts_at_address(p_merge_region_end)) {
-    return false;
-  }
-
-  // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
-  if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
-    return false;
+  // Tracing
+  if (c != NULL) {
+    log_debug(metaspace)("ChunkManager %s: removed chunk " METACHUNK_FORMAT ".",
+                         _name, METACHUNK_FORMAT_ARGS(c));
+  } else {
+    log_trace(metaspace)("ChunkManager %s: no chunk found for level " CHKLVL_FORMAT,
+                         _name, l);
   }
 
-  // Success! Remove all chunks in this region...
-  log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
-    (is_class() ? "class space" : "metaspace"),
-    p_merge_region_start, p_merge_region_end);
-
-  const int num_chunks_removed =
-    remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
-
-  // ... and create a single new bigger chunk.
-  Metachunk* const p_new_chunk =
-      ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
-  assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
-  p_new_chunk->set_origin(origin_merge);
-
-  log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
-    (is_class() ? "class space" : "metaspace"),
-    p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
-
-  // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
-  ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
-  ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
-
-  // Mark chunk as free. Note: it is not necessary to update the occupancy
-  // map in-use map, because the old chunks were also free, so nothing
-  // should have changed.
-  p_new_chunk->set_is_tagged_free(true);
-
-  // Add new chunk to its freelist.
-  ChunkList* const list = free_chunks(target_chunk_type);
-  list->return_chunk_at_head(p_new_chunk);
-
-  // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
-  // should not have changed, because the size of the space should be the same)
-  _free_chunks_count -= num_chunks_removed;
-  _free_chunks_count ++;
-
-  // VirtualSpaceNode::chunk_count does not have to be modified:
-  // it means "number of active (non-free) chunks", so merging free chunks
-  // should not affect that count.
-
-  // At the end of a chunk merge, run verification tests.
-#ifdef ASSERT
-
-  EVERY_NTH(VerifyMetaspaceInterval)
-    locked_verify(true);
-    vsn->verify(true);
-  END_EVERY_NTH
-
-  g_internal_statistics.num_chunk_merges ++;
-
-#endif
-
-  return true;
+  return c;
 }
 
-// Remove all chunks in the given area - the chunks are supposed to be free -
-// from their corresponding freelists. Mark them as invalid.
-// - This does not correct the occupancy map.
-// - This does not adjust the counters in ChunkManager.
-// - Does not adjust container count counter in containing VirtualSpaceNode
-// Returns number of chunks removed.
-int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
-  assert(p != NULL && word_size > 0, "Invalid range.");
-  const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
-  assert_is_aligned(word_size, smallest_chunk_size);
-
-  Metachunk* const start = (Metachunk*) p;
-  const Metachunk* const end = (Metachunk*)(p + word_size);
-  Metachunk* cur = start;
-  int num_removed = 0;
-  while (cur < end) {
-    Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
-    DEBUG_ONLY(do_verify_chunk(cur));
-    assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
-    assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
-    log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
-      (is_class() ? "class space" : "metaspace"),
-      cur, cur->word_size() * sizeof(MetaWord));
-    cur->remove_sentinel();
-    // Note: cannot call ChunkManager::remove_chunk, because that
-    // modifies the counters in ChunkManager, which we do not want. So
-    // we call remove_chunk on the freelist directly (see also the
-    // splitting function which does the same).
-    ChunkList* const list = free_chunks(list_index(cur->word_size()));
-    list->remove_chunk(cur);
-    num_removed ++;
-    cur = next;
-  }
-  return num_removed;
+// Creates a chunk manager with a given name (which is for debug purposes only)
+// and an associated space list which will be used to request new chunks from
+// (see get_chunk())
+ChunkManager::ChunkManager(const char* name, VirtualSpaceList* space_list)
+  : _vslist(space_list),
+    _name(name),
+    _chunks()
+{
 }
 
-// Update internal accounting after a chunk was added
-void ChunkManager::account_for_added_chunk(const Metachunk* c) {
+// Given a chunk we are about to handout to the caller, make sure it is committed
+// according to constants::committed_words_on_fresh_chunks
+bool ChunkManager::commit_chunk_before_handout(Metachunk* c) {
   assert_lock_strong(MetaspaceExpand_lock);
-  _free_chunks_count ++;
-  _free_chunks_total += c->word_size();
-}
-
-// Update internal accounting after a chunk was removed
-void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  assert(_free_chunks_count >= 1,
-    "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
-  assert(_free_chunks_total >= c->word_size(),
-    "ChunkManager::_free_chunks_total: about to go negative"
-     "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
-  _free_chunks_count --;
-  _free_chunks_total -= c->word_size();
+  const size_t must_be_committed = MIN2(c->word_size(), Settings::committed_words_on_fresh_chunks());
+  return c->ensure_committed_locked(must_be_committed);
 }
 
-ChunkIndex ChunkManager::list_index(size_t size) {
-  return get_chunk_type_by_size(size, is_class());
-}
-
-size_t ChunkManager::size_by_index(ChunkIndex index) const {
-  index_bounds_check(index);
-  assert(index != HumongousIndex, "Do not call for humongous chunks.");
-  return get_size_for_nonhumongous_chunktype(index, is_class());
-}
-
-#ifdef ASSERT
-void ChunkManager::verify(bool slow) const {
-  MutexLocker cl(MetaspaceExpand_lock,
-                     Mutex::_no_safepoint_check_flag);
-  locked_verify(slow);
-}
-
-void ChunkManager::locked_verify(bool slow) const {
-  log_trace(gc, metaspace, freelist)("verifying %s chunkmanager (%s).",
-    (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick"));
+// Given a chunk which must be outside of a freelist and must be free, split it to
+// meet a target level and return it. Splinters are added to the freelist.
+Metachunk* ChunkManager::split_chunk_and_add_splinters(Metachunk* c, chklvl_t target_level) {
 
   assert_lock_strong(MetaspaceExpand_lock);
 
-  size_t chunks_counted = 0;
-  size_t wordsize_chunks_counted = 0;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
-    const ChunkList* list = _free_chunks + i;
-    if (list != NULL) {
-      Metachunk* chunk = list->head();
-      while (chunk) {
-        if (slow) {
-          do_verify_chunk(chunk);
-        }
-        assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
-        chunks_counted ++;
-        wordsize_chunks_counted += chunk->size();
-        chunk = chunk->next();
+  assert(c->is_free() && c->level() < target_level, "Invalid chunk for splitting");
+  DEBUG_ONLY(chklvl::check_valid_level(target_level);)
+
+  DEBUG_ONLY(c->verify(true);)
+  DEBUG_ONLY(c->vsnode()->verify(true);)
+
+  // Chunk must be outside of our freelists
+  assert(_chunks.contains(c) == false, "Chunk is in freelist.");
+
+  log_debug(metaspace)("ChunkManager %s: will split chunk " METACHUNK_FORMAT " to " CHKLVL_FORMAT ".",
+                       _name, METACHUNK_FORMAT_ARGS(c), target_level);
+
+  const chklvl_t orig_level = c->level();
+  c = c->vsnode()->split(target_level, c, &_chunks);
+
+  // Splitting should never fail.
+  assert(c != NULL, "Split failed");
+  assert(c->level() == target_level, "Sanity");
+
+  DEBUG_ONLY(c->verify(false));
+
+  DEBUG_ONLY(verify_locked(true);)
+
+  DEBUG_ONLY(c->vsnode()->verify(true);)
+
+  return c;
+}
+
+// Get a chunk and be smart about it.
+// - 1) Attempt to find a free chunk of exactly the pref_level level
+// - 2) Failing that, attempt to find a chunk smaller or equal the max level.
+// - 3) Failing that, attempt to find a free chunk of larger size and split it.
+// - 4) Failing that, attempt to allocate a new chunk from the connected virtual space.
+// - Failing that, give up and return NULL.
+// Note: this is not guaranteed to return a *committed* chunk. The chunk manager will
+//   attempt to commit the returned chunk according to constants::committed_words_on_fresh_chunks;
+//   but this may fail if we hit a commit limit. In that case, a partly uncommit chunk
+//   will be returned, and the commit is attempted again when we allocate from the chunk's
+//   uncommitted area. See also Metachunk::allocate.
+Metachunk* ChunkManager::get_chunk(chklvl_t max_level, chklvl_t pref_level) {
+
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+
+  DEBUG_ONLY(verify_locked(false);)
+
+  DEBUG_ONLY(chklvl::check_valid_level(max_level);)
+  DEBUG_ONLY(chklvl::check_valid_level(pref_level);)
+  assert(max_level >= pref_level, "invalid level.");
+
+  Metachunk* c = NULL;
+
+  // Tracing
+  log_debug(metaspace)("ChunkManager %s: get chunk: max " CHKLVL_FORMAT " (" SIZE_FORMAT "),"
+                       "preferred " CHKLVL_FORMAT " (" SIZE_FORMAT ").",
+                       _name, max_level, chklvl::word_size_for_level(max_level),
+                       pref_level, chklvl::word_size_for_level(pref_level));
+
+  // 1) Attempt to find a free chunk of exactly the pref_level level
+  c = remove_first_chunk_at_level(pref_level);
+
+  // Todo:
+  //
+  // We need to meditate about steps (2) and (3) a bit more.
+  // By simply preferring to reuse small chunks vs splitting larger chunks we may emphasize
+  // fragmentation, strangely enough, if callers wanting medium sized chunks take small chunks
+  // instead, and taking them away from the many users which prefer small chunks.
+  // Alternatives:
+  // - alternating between (2) (3) and (3) (2)
+  // - mixing (2) and (3) into one loop with a growing delta, and maybe a "hesitance" barrier function
+  // - keeping track of chunk demand and adding this into the equation: if e.g. 4K chunks were heavily
+  //   preferred in the past, maybe for callers wanting larger chunks leave those alone.
+  // - Take into account which chunks are committed? This would require some extra bookkeeping...
+
+  // 2) Failing that, attempt to find a chunk smaller or equal the minimal level.
+  if (c == NULL) {
+    for (chklvl_t lvl = pref_level + 1; lvl <= max_level; lvl ++) {
+      c = remove_first_chunk_at_level(lvl);
+      if (c != NULL) {
+        break;
+      }
+    }
+  }
+
+  // 3) Failing that, attempt to find a free chunk of larger size and split it.
+  if (c == NULL) {
+    for (chklvl_t lvl = pref_level - 1; lvl >= chklvl::ROOT_CHUNK_LEVEL; lvl --) {
+      c = remove_first_chunk_at_level(lvl);
+      if (c != NULL) {
+        // Split chunk; add splinters to freelist
+        c = split_chunk_and_add_splinters(c, pref_level);
+        break;
       }
     }
   }
 
-  chunks_counted += humongous_dictionary()->total_free_blocks();
-  wordsize_chunks_counted += humongous_dictionary()->total_size();
-
-  assert(chunks_counted == _free_chunks_count && wordsize_chunks_counted == _free_chunks_total,
-         "freelist accounting mismatch: "
-         "we think: " SIZE_FORMAT " chunks, total " SIZE_FORMAT " words, "
-         "reality: " SIZE_FORMAT " chunks, total " SIZE_FORMAT " words.",
-         _free_chunks_count, _free_chunks_total,
-         chunks_counted, wordsize_chunks_counted);
-}
-#endif // ASSERT
-
-void ChunkManager::locked_print_free_chunks(outputStream* st) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
-                _free_chunks_total, _free_chunks_count);
-}
+  // 4) Failing that, attempt to allocate a new chunk from the connected virtual space.
+  if (c == NULL) {
 
-ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
-  assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
-         "Bad index: %d", (int)index);
-  return &_free_chunks[index];
-}
-
-ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
-  ChunkIndex index = list_index(word_size);
-  assert(index < HumongousIndex, "No humongous list");
-  return free_chunks(index);
-}
-
-// Helper for chunk splitting: given a target chunk size and a larger free chunk,
-// split up the larger chunk into n smaller chunks, at least one of which should be
-// the target chunk of target chunk size. The smaller chunks, including the target
-// chunk, are returned to the freelist. The pointer to the target chunk is returned.
-// Note that this chunk is supposed to be removed from the freelist right away.
-Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
-  assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
-
-  const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
-  const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
-
-  MetaWord* const region_start = (MetaWord*)larger_chunk;
-  const size_t region_word_len = larger_chunk->word_size();
-  MetaWord* const region_end = region_start + region_word_len;
-  VirtualSpaceNode* const vsn = larger_chunk->container();
-  OccupancyMap* const ocmap = vsn->occupancy_map();
+    // Tracing
+    log_debug(metaspace)("ChunkManager %s: need new root chunk.", _name);
 
-  // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
-  // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
-  // at an address suitable to place the smaller target chunk.
-  assert_is_aligned(region_start, target_chunk_word_size);
-
-  // Remove old chunk.
-  free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
-  larger_chunk->remove_sentinel();
-
-  // Prevent access to the old chunk from here on.
-  larger_chunk = NULL;
-  // ... and wipe it.
-  DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
-
-  // In its place create first the target chunk...
-  MetaWord* p = region_start;
-  Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
-  assert(target_chunk == (Metachunk*)p, "Sanity");
-  target_chunk->set_origin(origin_split);
-
-  // Note: we do not need to mark its start in the occupancy map
-  // because it coincides with the old chunk start.
+    c = _vslist->allocate_root_chunk();
 
-  // Mark chunk as free and return to the freelist.
-  do_update_in_use_info_for_chunk(target_chunk, false);
-  free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
-
-  // This chunk should now be valid and can be verified.
-  DEBUG_ONLY(do_verify_chunk(target_chunk));
-
-  // In the remaining space create the remainder chunks.
-  p += target_chunk->word_size();
-  assert(p < region_end, "Sanity");
-
-  while (p < region_end) {
-
-    // Find the largest chunk size which fits the alignment requirements at address p.
-    ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
-    size_t this_chunk_word_size = 0;
-    for(;;) {
-      this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
-      if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
-        break;
-      } else {
-        this_chunk_index = prev_chunk_index(this_chunk_index);
-        assert(this_chunk_index >= target_chunk_index, "Sanity");
-      }
+    // This may have failed if the virtual space list is exhausted but it cannot be expanded
+    // by a new node (class space).
+    if (c == NULL) {
+      return NULL;
     }
 
-    assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
-    assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
-    assert(p + this_chunk_word_size <= region_end, "Sanity");
+    assert(c->level() == chklvl::LOWEST_CHUNK_LEVEL, "Not a root chunk?");
+
+    // Split this root chunk to the desired chunk size.
+    if (pref_level > c->level()) {
+      c = split_chunk_and_add_splinters(c, pref_level);
+    }
+  }
+
+  // Note that we should at this point have a chunk; should always work. If we hit
+  // a commit limit in the meantime, the chunk may still be uncommitted, but the chunk
+  // itself should exist now.
+  assert(c != NULL, "Unexpected");
 
-    // Create splitting chunk.
-    Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
-    assert(this_chunk == (Metachunk*)p, "Sanity");
-    this_chunk->set_origin(origin_split);
-    ocmap->set_chunk_starts_at_address(p, true);
-    do_update_in_use_info_for_chunk(this_chunk, false);
+  // Before returning the chunk, attempt to commit it according to the handout rules.
+  // If that fails, we ignore the error and return the uncommitted chunk.
+  if (commit_chunk_before_handout(c) == false) {
+    log_info(gc, metaspace)("Failed to commit chunk prior to handout.");
+  }
+
+  // Any chunk returned from ChunkManager shall be marked as in use.
+  c->set_in_use();
+
+  DEBUG_ONLY(verify_locked(false);)
+
+  log_debug(metaspace)("ChunkManager %s: handing out chunk " METACHUNK_FORMAT ".",
+                       _name, METACHUNK_FORMAT_ARGS(c));
+
+
+  DEBUG_ONLY(InternalStats::inc_num_chunks_taken_from_freelist();)
+
+  return c;
+
+} // ChunkManager::get_chunk
+
 
-    // This chunk should be valid and can be verified.
-    DEBUG_ONLY(do_verify_chunk(this_chunk));
+// Return a single chunk to the ChunkManager and adjust accounting. May merge chunk
+//  with neighbors.
+// As a side effect this removes the chunk from whatever list it has been in previously.
+// Happens after a Classloader was unloaded and releases its metaspace chunks.
+// !! Note: this may invalidate the chunk. Do not access the chunk after
+//    this function returns !!
+void ChunkManager::return_chunk(Metachunk* c) {
 
-    // Return this chunk to freelist and correct counter.
-    free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
-    _free_chunks_count ++;
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+
+  log_debug(metaspace)("ChunkManager %s: returning chunk " METACHUNK_FORMAT ".",
+                       _name, METACHUNK_FORMAT_ARGS(c));
+
+  DEBUG_ONLY(verify_locked(false);)
+  DEBUG_ONLY(c->verify(false);)
+
+  assert(!_chunks.contains(c), "A chunk to be added to the freelist must not be in the freelist already.");
 
-    log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
-      SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
-      p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
-      p2i(region_start), p2i(region_end));
+  assert(c->is_in_use(), "Unexpected chunk state");
+  assert(!c->in_list(), "Remove from list first");
+  c->set_free();
+  c->reset_used_words();
+
+  const chklvl_t orig_lvl = c->level();
+
+  Metachunk* merged = NULL;
+  if (!c->is_root_chunk()) {
+    // Only attempt merging if we are not of the lowest level already.
+    merged = c->vsnode()->merge(c, &_chunks);
+  }
 
-    p += this_chunk_word_size;
+  if (merged != NULL) {
+
+    DEBUG_ONLY(merged->verify(false));
+
+    // We did merge our chunk into a different chunk.
+
+    // We did merge chunks and now have a bigger chunk.
+    assert(merged->level() < orig_lvl, "Sanity");
+
+    log_trace(metaspace)("ChunkManager %s: merged into chunk " METACHUNK_FORMAT ".",
+                         _name, METACHUNK_FORMAT_ARGS(merged));
+
+    c = merged;
 
   }
 
-  // Note: at this point, the VirtualSpaceNode is invalid since we split a chunk and
-  // did not yet hand out part of that split; so, vsn->verify_free_chunks_are_ideally_merged()
-  // would assert. Instead, do all verifications in the caller.
-
-  DEBUG_ONLY(g_internal_statistics.num_chunk_splits ++);
-
-  return target_chunk;
-}
-
-Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
-  assert_lock_strong(MetaspaceExpand_lock);
-
-  Metachunk* chunk = NULL;
-  bool we_did_split_a_chunk = false;
-
-  if (list_index(word_size) != HumongousIndex) {
-
-    ChunkList* free_list = find_free_chunks_list(word_size);
-    assert(free_list != NULL, "Sanity check");
-
-    chunk = free_list->head();
-
-    if (chunk == NULL) {
-      // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
-      // This is the counterpart of the coalescing-upon-chunk-return.
-
-      ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
-
-      // Is there a larger chunk we could split?
-      Metachunk* larger_chunk = NULL;
-      ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
-      while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
-        larger_chunk = free_chunks(larger_chunk_index)->head();
-        if (larger_chunk == NULL) {
-          larger_chunk_index = next_chunk_index(larger_chunk_index);
-        }
-      }
-
-      if (larger_chunk != NULL) {
-        assert(larger_chunk->word_size() > word_size, "Sanity");
-        assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
-
-        // We found a larger chunk. Lets split it up:
-        // - remove old chunk
-        // - in its place, create new smaller chunks, with at least one chunk
-        //   being of target size, the others sized as large as possible. This
-        //   is to make sure the resulting chunks are "as coalesced as possible"
-        //   (similar to VirtualSpaceNode::retire()).
-        // Note: during this operation both ChunkManager and VirtualSpaceNode
-        //  are temporarily invalid, so be careful with asserts.
-
-        log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
-           ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
-          (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
-          chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
-
-        chunk = split_chunk(word_size, larger_chunk);
-
-        // This should have worked.
-        assert(chunk != NULL, "Sanity");
-        assert(chunk->word_size() == word_size, "Sanity");
-        assert(chunk->is_tagged_free(), "Sanity");
-
-        we_did_split_a_chunk = true;
-
-      }
-    }
-
-    if (chunk == NULL) {
-      return NULL;
-    }
-
-    // Remove the chunk as the head of the list.
-    free_list->remove_chunk(chunk);
-
-    log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
-                                       p2i(free_list), free_list->count());
-
-  } else {
-    chunk = humongous_dictionary()->get_chunk(word_size);
-
-    if (chunk == NULL) {
-      return NULL;
-    }
-
-    log_trace(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
-                                    chunk->word_size(), word_size, chunk->word_size() - word_size);
+  if (Settings::uncommit_on_return() &&
+      Settings::uncommit_on_return_min_word_size() <= c->word_size())
+  {
+    log_trace(metaspace)("ChunkManager %s: uncommitting free chunk " METACHUNK_FORMAT ".",
+                         _name, METACHUNK_FORMAT_ARGS(c));
+    c->uncommit_locked();
   }
 
-  // Chunk has been removed from the chunk manager; update counters.
-  account_for_removed_chunk(chunk);
-  do_update_in_use_info_for_chunk(chunk, true);
-  chunk->container()->inc_container_count();
-  chunk->inc_use_count();
+  return_chunk_simple(c);
 
-  // Remove it from the links to this freelist
-  chunk->set_next(NULL);
-  chunk->set_prev(NULL);
+  DEBUG_ONLY(verify_locked(false);)
+  DEBUG_ONLY(c->vsnode()->verify(true);)
 
-  // Run some verifications (some more if we did a chunk split)
-#ifdef ASSERT
+  DEBUG_ONLY(InternalStats::inc_num_chunks_returned_to_freelist();)
+
+}
 
-  EVERY_NTH(VerifyMetaspaceInterval)
-    // Be extra verify-y when chunk split happened.
-    locked_verify(true);
-    VirtualSpaceNode* const vsn = chunk->container();
-    vsn->verify(true);
-    if (we_did_split_a_chunk) {
-      vsn->verify_free_chunks_are_ideally_merged();
-    }
-  END_EVERY_NTH
-
-  g_internal_statistics.num_chunks_removed_from_freelist ++;
-
-#endif
-
-  return chunk;
+// Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
+// enlarge it in place by claiming its trailing buddy.
+//
+// This will only work if c is the leader of the buddy pair and the trailing buddy is free.
+//
+// If successful, the follower chunk will be removed from the freelists, the leader chunk c will
+// double in size (level decreased by one).
+//
+// On success, true is returned, false otherwise.
+bool ChunkManager::attempt_enlarge_chunk(Metachunk* c) {
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+  return c->vsnode()->attempt_enlarge_chunk(c, &_chunks);
 }
 
-Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
-  assert_lock_strong(MetaspaceExpand_lock);
-
-  // Take from the beginning of the list
-  Metachunk* chunk = free_chunks_get(word_size);
-  if (chunk == NULL) {
-    return NULL;
+static void print_word_size_delta(outputStream* st, size_t word_size_1, size_t word_size_2) {
+  if (word_size_1 == word_size_2) {
+    print_scaled_words(st, word_size_1);
+    st->print (" (no change)");
+  } else {
+    print_scaled_words(st, word_size_1);
+    st->print("->");
+    print_scaled_words(st, word_size_2);
+    st->print(" (");
+    if (word_size_2 <= word_size_1) {
+      st->print("-");
+      print_scaled_words(st, word_size_1 - word_size_2);
+    } else {
+      st->print("+");
+      print_scaled_words(st, word_size_2 - word_size_1);
+    }
+    st->print(")");
   }
+}
 
-  assert((word_size <= chunk->word_size()) ||
-         (list_index(chunk->word_size()) == HumongousIndex),
-         "Non-humongous variable sized chunk");
-  LogTarget(Trace, gc, metaspace, freelist) lt;
-  if (lt.is_enabled()) {
-    size_t list_count;
-    if (list_index(word_size) < HumongousIndex) {
-      ChunkList* list = find_free_chunks_list(word_size);
-      list_count = list->count();
-    } else {
-      list_count = humongous_dictionary()->total_count();
-    }
-    LogStream ls(lt);
-    ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
-             p2i(this), p2i(chunk), chunk->word_size(), list_count);
-    ResourceMark rm;
-    locked_print_free_chunks(&ls);
+// Attempt to reclaim free areas in metaspace wholesale:
+// - first, attempt to purge nodes of the backing virtual space. This can only be successful
+//   if whole nodes are only containing free chunks, so it highly depends on fragmentation.
+// - then, it will uncommit areas of free chunks according to the rules laid down in
+//   settings (see settings.hpp).
+void ChunkManager::wholesale_reclaim() {
+
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+
+  log_info(metaspace)("ChunkManager \"%s\": reclaiming memory...", _name);
+
+  const size_t reserved_before = _vslist->reserved_words();
+  const size_t committed_before = _vslist->committed_words();
+  int num_nodes_purged = 0;
+
+  if (Settings::delete_nodes_on_purge()) {
+    num_nodes_purged = _vslist->purge(&_chunks);
+    DEBUG_ONLY(InternalStats::inc_num_purges();)
   }
 
-  return chunk;
-}
-
-void ChunkManager::return_single_chunk(Metachunk* chunk) {
-
-#ifdef ASSERT
-  EVERY_NTH(VerifyMetaspaceInterval)
-    this->locked_verify(false);
-    do_verify_chunk(chunk);
-  END_EVERY_NTH
-#endif
+  if (Settings::uncommit_on_purge()) {
+    const chklvl_t max_level =
+        chklvl::level_fitting_word_size(Settings::uncommit_on_purge_min_word_size());
+    for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL;
+         l <= max_level;
+         l ++)
+    {
+      for (Metachunk* c = _chunks.first_at_level(l); c != NULL; c = c->next()) {
+        c->uncommit_locked();
+      }
+    }
+    DEBUG_ONLY(InternalStats::inc_num_wholesale_uncommits();)
+  }
 
-  const ChunkIndex index = chunk->get_chunk_type();
-  assert_lock_strong(MetaspaceExpand_lock);
-  DEBUG_ONLY(g_internal_statistics.num_chunks_added_to_freelist ++;)
-  assert(chunk != NULL, "Expected chunk.");
-  assert(chunk->container() != NULL, "Container should have been set.");
-  assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
-  index_bounds_check(index);
-
-  // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
-  // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
-  // keeps tree node pointers in the chunk payload area which mangle will overwrite.
-  DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
-
-  // may need node for verification later after chunk may have been merged away.
-  DEBUG_ONLY(VirtualSpaceNode* vsn = chunk->container(); )
+  const size_t reserved_after = _vslist->reserved_words();
+  const size_t committed_after = _vslist->committed_words();
 
-  if (index != HumongousIndex) {
-    // Return non-humongous chunk to freelist.
-    ChunkList* list = free_chunks(index);
-    assert(list->size() == chunk->word_size(), "Wrong chunk type.");
-    list->return_chunk_at_head(chunk);
-    log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
-        chunk_size_name(index), p2i(chunk));
+  // Print a nice report.
+  if (reserved_after == reserved_before && committed_after == committed_before) {
+    log_info(metaspace)("ChunkManager %s: ... nothing reclaimed.", _name);
   } else {
-    // Return humongous chunk to dictionary.
-    assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
-    assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
-           "Humongous chunk has wrong alignment.");
-    _humongous_dictionary.return_chunk(chunk);
-    log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
-        chunk_size_name(index), p2i(chunk), chunk->word_size());
-  }
-  chunk->container()->dec_container_count();
-  do_update_in_use_info_for_chunk(chunk, false);
+    LogTarget(Info, metaspace) lt;
+    if (lt.is_enabled()) {
+      LogStream ls(lt);
+      ls.print_cr("ChunkManager %s: finished reclaiming memory: ", _name);
 
-  // Chunk has been added; update counters.
-  account_for_added_chunk(chunk);
+      ls.print("reserved: ");
+      print_word_size_delta(&ls, reserved_before, reserved_after);
+      ls.cr();
 
-  // Attempt coalesce returned chunks with its neighboring chunks:
-  // if this chunk is small or special, attempt to coalesce to a medium chunk.
-  if (index == SmallIndex || index == SpecializedIndex) {
-    if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
-      // This did not work. But if this chunk is special, we still may form a small chunk?
-      if (index == SpecializedIndex) {
-        if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
-          // give up.
-        }
-      }
+      ls.print("committed: ");
+      print_word_size_delta(&ls, committed_before, committed_after);
+      ls.cr();
+
+      ls.print_cr("full nodes purged: %d", num_nodes_purged);
     }
   }
 
-  // From here on do not access chunk anymore, it may have been merged with another chunk.
+  DEBUG_ONLY(_vslist->verify_locked(true));
+  DEBUG_ONLY(verify_locked(true));
+
+}
+
+
+ChunkManager* ChunkManager::_chunkmanager_class = NULL;
+ChunkManager* ChunkManager::_chunkmanager_nonclass = NULL;
+
+void ChunkManager::set_chunkmanager_class(ChunkManager* cm) {
+  assert(_chunkmanager_class == NULL, "Sanity");
+  _chunkmanager_class = cm;
+}
 
-#ifdef ASSERT
-  EVERY_NTH(VerifyMetaspaceInterval)
-    this->locked_verify(true);
-    vsn->verify(true);
-    vsn->verify_free_chunks_are_ideally_merged();
-  END_EVERY_NTH
-#endif
+void ChunkManager::set_chunkmanager_nonclass(ChunkManager* cm) {
+  assert(_chunkmanager_nonclass == NULL, "Sanity");
+  _chunkmanager_nonclass = cm;
+}
+
+
+
+// Update statistics.
+void ChunkManager::add_to_statistics(cm_stats_t* out) const {
+
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+
+  for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    out->num_chunks[l] += _chunks.num_chunks_at_level(l);
+    out->committed_word_size[l] += _chunks.committed_word_size_at_level(l);
+  }
+
+  DEBUG_ONLY(out->verify();)
 
 }
 
-void ChunkManager::return_chunk_list(Metachunk* chunks) {
-  if (chunks == NULL) {
-    return;
-  }
-  LogTarget(Trace, gc, metaspace, freelist) log;
-  if (log.is_enabled()) { // tracing
-    log.print("returning list of chunks...");
-  }
-  unsigned num_chunks_returned = 0;
-  size_t size_chunks_returned = 0;
-  Metachunk* cur = chunks;
-  while (cur != NULL) {
-    // Capture the next link before it is changed
-    // by the call to return_chunk_at_head();
-    Metachunk* next = cur->next();
-    if (log.is_enabled()) { // tracing
-      num_chunks_returned ++;
-      size_chunks_returned += cur->word_size();
-    }
-    return_single_chunk(cur);
-    cur = next;
-  }
-  if (log.is_enabled()) { // tracing
-    log.print("returned %u chunks to freelist, total word size " SIZE_FORMAT ".",
-        num_chunks_returned, size_chunks_returned);
-  }
+#ifdef ASSERT
+
+void ChunkManager::verify(bool slow) const {
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+  verify_locked(slow);
 }
 
-void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
-  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
+void ChunkManager::verify_locked(bool slow) const {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  assert(_vslist != NULL, "No vslist");
+
+  // This checks that the lists are wired up correctly, that the counters are valid and
+  // that each chunk is (only) in its correct list.
+  _chunks.verify(true);
+
+  // Need to check that each chunk is free..._size = 0;
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    for (const Metachunk* c = _chunks.first_at_level(l); c != NULL; c = c->next()) {
+      assert(c->is_free(), "Chunk is not free.");
+      assert(c->used_words() == 0, "Chunk should have not used words.");
+    }
   }
+
+}
+
+#endif // ASSERT
+
+void ChunkManager::print_on(outputStream* st) const {
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+  print_on_locked(st);
+}
+
+void ChunkManager::print_on_locked(outputStream* st) const {
+  assert_lock_strong(MetaspaceExpand_lock);
+  st->print_cr("cm %s: %d chunks, total word size: " SIZE_FORMAT ", committed word size: " SIZE_FORMAT, _name,
+               total_num_chunks(), total_word_size(), _chunks.total_committed_word_size());
+  _chunks.print_on(st);
 }
 
 } // namespace metaspace
--- a/src/hotspot/share/memory/metaspace/chunkManager.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/chunkManager.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,173 +27,138 @@
 #define SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP
 
 #include "memory/allocation.hpp"
-#include "memory/binaryTreeDictionary.hpp"
-#include "memory/freeList.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
+#include "memory/metaspace/counter.hpp"
 #include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace/metaspaceStatistics.hpp"
-#include "memory/metaspaceChunkFreeListSummary.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class ChunkManagerTestAccessor;
 
 namespace metaspace {
 
-typedef class FreeList<Metachunk> ChunkList;
-typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
-
-// Manages the global free lists of chunks.
-class ChunkManager : public CHeapObj<mtInternal> {
-  friend class ::ChunkManagerTestAccessor;
+class VirtualSpaceList;
+struct cm_stats_t;
 
-  // Free list of chunks of different sizes.
-  //   SpecializedChunk
-  //   SmallChunk
-  //   MediumChunk
-  ChunkList _free_chunks[NumberOfFreeLists];
+// class ChunkManager
+//
+// The ChunkManager has a central role. Callers request chunks from it.
+// It keeps the freelists for chunks. If the freelist is exhausted it
+// allocates new chunks from a connected VirtualSpaceList.
+//
+class ChunkManager : public CHeapObj<mtInternal> {
 
-  // Whether or not this is the class chunkmanager.
-  const bool _is_class;
+  // A chunk manager is connected to a virtual space list which is used
+  // to allocate new root chunks when no free chunks are found.
+  VirtualSpaceList* const _vslist;
 
-  // Return non-humongous chunk list by its index.
-  ChunkList* free_chunks(ChunkIndex index);
+  // Name
+  const char* const _name;
 
-  // Returns non-humongous chunk list for the given chunk word size.
-  ChunkList* find_free_chunks_list(size_t word_size);
-
-  //   HumongousChunk
-  ChunkTreeDictionary _humongous_dictionary;
+  // Freelist
+  MetachunkListCluster _chunks;
 
-  // Returns the humongous chunk dictionary.
-  ChunkTreeDictionary* humongous_dictionary() { return &_humongous_dictionary; }
-  const ChunkTreeDictionary* humongous_dictionary() const { return &_humongous_dictionary; }
+  // Returns true if this manager contains the given chunk. Slow (walks free list) and
+  // only needed for verifications.
+  DEBUG_ONLY(bool contains_chunk(Metachunk* c) const;)
 
-  // Size, in metaspace words, of all chunks managed by this ChunkManager
-  size_t _free_chunks_total;
-  // Number of chunks in this ChunkManager
-  size_t _free_chunks_count;
+  // Given a chunk we are about to handout to the caller, make sure it is committed
+  // according to constants::committed_words_on_fresh_chunks.
+  // May fail if we hit the commit limit.
+  static bool commit_chunk_before_handout(Metachunk* c);
 
-  // Update counters after a chunk was added or removed removed.
-  void account_for_added_chunk(const Metachunk* c);
-  void account_for_removed_chunk(const Metachunk* c);
-
-  // Given a pointer to a chunk, attempts to merge it with neighboring
-  // free chunks to form a bigger chunk. Returns true if successful.
-  bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
+  // Take a single chunk from the given freelist and adjust counters. Returns NULL
+  // if there is no fitting chunk for this level.
+  Metachunk* remove_first_chunk_at_level(chklvl_t l);
 
-  // Helper for chunk merging:
-  //  Given an address range with 1-n chunks which are all supposed to be
-  //  free and hence currently managed by this ChunkManager, remove them
-  //  from this ChunkManager and mark them as invalid.
-  // - This does not correct the occupancy map.
-  // - This does not adjust the counters in ChunkManager.
-  // - Does not adjust container count counter in containing VirtualSpaceNode.
-  // Returns number of chunks removed.
-  int remove_chunks_in_area(MetaWord* p, size_t word_size);
+  // Given a chunk which must be outside of a freelist and must be free, split it to
+  // meet a target level and return it. Splinters are added to the freelist.
+  Metachunk* split_chunk_and_add_splinters(Metachunk* c, chklvl_t target_level);
+
+  // Uncommit all chunks equal or below the given level.
+  void uncommit_free_chunks(chklvl_t max_level);
+
+public:
 
-  // Helper for chunk splitting: given a target chunk size and a larger free chunk,
-  // split up the larger chunk into n smaller chunks, at least one of which should be
-  // the target chunk of target chunk size. The smaller chunks, including the target
-  // chunk, are returned to the freelist. The pointer to the target chunk is returned.
-  // Note that this chunk is supposed to be removed from the freelist right away.
-  Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
-
- public:
-
-  ChunkManager(bool is_class);
+  // Creates a chunk manager with a given name (which is for debug purposes only)
+  // and an associated space list which will be used to request new chunks from
+  // (see get_chunk())
+  ChunkManager(const char* name, VirtualSpaceList* space_list);
 
-  // Add or delete (return) a chunk to the global freelist.
-  Metachunk* chunk_freelist_allocate(size_t word_size);
-
-  // Map a size to a list index assuming that there are lists
-  // for special, small, medium, and humongous chunks.
-  ChunkIndex list_index(size_t size);
-
-  // Map a given index to the chunk size.
-  size_t size_by_index(ChunkIndex index) const;
-
-  bool is_class() const { return _is_class; }
+  // Get a chunk and be smart about it.
+  // - 1) Attempt to find a free chunk of exactly the pref_level level
+  // - 2) Failing that, attempt to find a chunk smaller or equal the maximal level.
+  // - 3) Failing that, attempt to find a free chunk of larger size and split it.
+  // - 4) Failing that, attempt to allocate a new chunk from the connected virtual space.
+  // - Failing that, give up and return NULL.
+  // Note: this is not guaranteed to return a *committed* chunk. The chunk manager will
+  //   attempt to commit the returned chunk according to constants::committed_words_on_fresh_chunks;
+  //   but this may fail if we hit a commit limit. In that case, a partly uncommitted chunk
+  //   will be returned, and the commit is attempted again when we allocate from the chunk's
+  //   uncommitted area. See also Metachunk::allocate.
+  Metachunk* get_chunk(chklvl_t max_level, chklvl_t pref_level);
 
-  // Convenience accessors.
-  size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
-  size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
-  size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
+  // Return a single chunk to the ChunkManager and adjust accounting. May merge chunk
+  //  with neighbors.
+  // Happens after a Classloader was unloaded and releases its metaspace chunks.
+  // !! Notes:
+  //    1) After this method returns, c may not be valid anymore. Do not access the chunk after this function returns.
+  //    2) This function will not remove c from its current chunk list. This has to be done by the caller prior to
+  //       calling this method.
+  void return_chunk(Metachunk* c);
 
-  // Take a chunk from the ChunkManager. The chunk is expected to be in
-  // the chunk manager (the freelist if non-humongous, the dictionary if
-  // humongous).
-  void remove_chunk(Metachunk* chunk);
-
-  // Return a single chunk of type index to the ChunkManager.
-  void return_single_chunk(Metachunk* chunk);
-
-  // Add the simple linked list of chunks to the freelist of chunks
-  // of type index.
-  void return_chunk_list(Metachunk* chunk);
+  // Return a single chunk to the freelist and adjust accounting. No merge is attempted.
+  void return_chunk_simple(Metachunk* c);
 
-  // Total of the space in the free chunks list
-  size_t free_chunks_total_words() const { return _free_chunks_total; }
-  size_t free_chunks_total_bytes() const { return free_chunks_total_words() * BytesPerWord; }
-
-  // Number of chunks in the free chunks list
-  size_t free_chunks_count() const { return _free_chunks_count; }
-
-  // Remove from a list by size.  Selects list based on size of chunk.
-  Metachunk* free_chunks_get(size_t chunk_word_size);
+  // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
+  // enlarge it in place by claiming its trailing buddy.
+  //
+  // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
+  //
+  // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
+  // double in size (level decreased by one).
+  //
+  // On success, true is returned, false otherwise.
+  bool attempt_enlarge_chunk(Metachunk* c);
 
-#define index_bounds_check(index)                                         \
-  assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
-
-  size_t num_free_chunks(ChunkIndex index) const {
-    index_bounds_check(index);
-
-    if (index == HumongousIndex) {
-      return _humongous_dictionary.total_free_blocks();
-    }
+  // Attempt to reclaim free areas in metaspace wholesale:
+  // - first, attempt to purge nodes of the backing virtual space. This can only be successful
+  //   if whole nodes are only containing free chunks, so it highly depends on fragmentation.
+  // - then, it will uncommit areas of free chunks according to the rules laid down in
+  //   settings (see settings.hpp).
+  void wholesale_reclaim();
 
-    ssize_t count = _free_chunks[index].count();
-    return count == -1 ? 0 : (size_t) count;
-  }
+  // Run verifications. slow=true: verify chunk-internal integrity too.
+  DEBUG_ONLY(void verify(bool slow) const;)
+  DEBUG_ONLY(void verify_locked(bool slow) const;)
 
-  size_t size_free_chunks_in_bytes(ChunkIndex index) const {
-    index_bounds_check(index);
+  // Returns the name of this chunk manager.
+  const char* name() const                  { return _name; }
 
-    size_t word_size = 0;
-    if (index == HumongousIndex) {
-      word_size = _humongous_dictionary.total_size();
-    } else {
-      const size_t size_per_chunk_in_words = _free_chunks[index].size();
-      word_size = size_per_chunk_in_words * num_free_chunks(index);
-    }
+  // Returns total number of chunks
+  int total_num_chunks() const              { return _chunks.total_num_chunks(); }
 
-    return word_size * BytesPerWord;
-  }
+  // Returns number of words in all free chunks.
+  size_t total_word_size() const            { return _chunks.total_word_size(); }
+
+  // Update statistics.
+  void add_to_statistics(cm_stats_t* out) const;
 
-  MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
-    return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
-                                         num_free_chunks(SmallIndex),
-                                         num_free_chunks(MediumIndex),
-                                         num_free_chunks(HumongousIndex),
-                                         size_free_chunks_in_bytes(SpecializedIndex),
-                                         size_free_chunks_in_bytes(SmallIndex),
-                                         size_free_chunks_in_bytes(MediumIndex),
-                                         size_free_chunks_in_bytes(HumongousIndex));
-  }
+  void print_on(outputStream* st) const;
+  void print_on_locked(outputStream* st) const;
+
+private:
+
+  static ChunkManager* _chunkmanager_class;
+  static ChunkManager* _chunkmanager_nonclass;
 
-#ifdef ASSERT
-  // Debug support
-  // Verify free list integrity. slow=true: verify chunk-internal integrity too.
-  void verify(bool slow) const;
-  void locked_verify(bool slow) const;
-#endif
+public:
 
-  void locked_print_free_chunks(outputStream* st);
+  static ChunkManager* chunkmanager_class() { return _chunkmanager_class; }
+  static ChunkManager* chunkmanager_nonclass() { return _chunkmanager_nonclass; }
 
-  // Fill in current statistic values to the given statistics object.
-  void collect_statistics(ChunkManagerStatistics* out) const;
+  static void set_chunkmanager_class(ChunkManager* cm);
+  static void set_chunkmanager_nonclass(ChunkManager* cm);
+
 
 };
 
 } // namespace metaspace
 
-
 #endif // SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "logging/log.hpp"
+#include "memory/metaspace.hpp"
+#include "memory/metaspaceTracer.hpp"
+#include "memory/metaspace/chunkAllocSequence.hpp"
+#include "memory/metaspace/chunkManager.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/internStat.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
+#include "memory/metaspace/metaspaceStatistics.hpp"
+#include "memory/metaspace/runningCounters.hpp"
+#include "memory/metaspace/settings.hpp"
+#include "memory/metaspace/spaceManager.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+
+namespace metaspace {
+
+static bool use_class_space(bool is_class) {
+  if (Metaspace::using_class_space() && is_class) {
+    return true;
+  }
+  return false;
+}
+
+static bool use_class_space(MetadataType mdType) {
+  return use_class_space(is_class(mdType));
+}
+
+ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, MetaspaceType space_type)
+  : _lock(lock)
+  , _space_type(space_type)
+  , _non_class_space_manager(NULL)
+  , _class_space_manager(NULL)
+{
+  ChunkManager* const non_class_cm =
+          ChunkManager::chunkmanager_nonclass();
+
+  // Initialize non-class spacemanager
+  _non_class_space_manager = new SpaceManager(
+      non_class_cm,
+      ChunkAllocSequence::alloc_sequence_by_space_type(space_type, false),
+      lock,
+      RunningCounters::used_nonclass_counter(),
+      "non-class sm");
+
+  // If needed, initialize class spacemanager
+  if (Metaspace::using_class_space()) {
+    ChunkManager* const class_cm =
+            ChunkManager::chunkmanager_class();
+    _class_space_manager = new SpaceManager(
+        class_cm,
+        ChunkAllocSequence::alloc_sequence_by_space_type(space_type, true),
+        lock,
+        RunningCounters::used_class_counter(),
+        "class sm");
+  }
+
+#ifdef ASSERT
+  InternalStats::inc_num_metaspace_births();
+  if (_space_type == metaspace::UnsafeAnonymousMetaspaceType) {
+    InternalStats::inc_num_anon_cld_births();
+  }
+#endif
+}
+
+ClassLoaderMetaspace::~ClassLoaderMetaspace() {
+  Metaspace::assert_not_frozen();
+
+  delete _non_class_space_manager;
+  delete _class_space_manager;
+
+#ifdef ASSERT
+  InternalStats::inc_num_metaspace_deaths();
+  if (_space_type == metaspace::UnsafeAnonymousMetaspaceType) {
+    InternalStats::inc_num_anon_cld_deaths();
+  }
+#endif
+
+}
+
+// Allocate word_size words from Metaspace.
+MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, MetadataType mdType) {
+  Metaspace::assert_not_frozen();
+  if (use_class_space(mdType)) {
+    return class_space_manager()->allocate(word_size);
+  } else {
+    return non_class_space_manager()->allocate(word_size);
+  }
+}
+
+// Attempt to expand the GC threshold to be good for at least another word_size words
+// and allocate. Returns NULL if failure. Used during Metaspace GC.
+MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, MetadataType mdType) {
+  Metaspace::assert_not_frozen();
+  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
+  assert(delta_bytes > 0, "Must be");
+
+  size_t before = 0;
+  size_t after = 0;
+  bool can_retry = true;
+  MetaWord* res;
+  bool incremented;
+
+  // Each thread increments the HWM at most once. Even if the thread fails to increment
+  // the HWM, an allocation is still attempted. This is because another thread must then
+  // have incremented the HWM and therefore the allocation might still succeed.
+  do {
+    incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
+    res = allocate(word_size, mdType);
+  } while (!incremented && res == NULL && can_retry);
+
+  if (incremented) {
+    Metaspace::tracer()->report_gc_threshold(before, after,
+                                  MetaspaceGCThresholdUpdater::ExpandAndAllocate);
+    log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
+  }
+
+  return res;
+}
+
+// Prematurely returns a metaspace allocation to the _block_freelists
+// because it is not needed anymore.
+void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
+
+  Metaspace::assert_not_frozen();
+
+  if (use_class_space(is_class)) {
+    class_space_manager()->deallocate(ptr, word_size);
+  } else {
+    non_class_space_manager()->deallocate(ptr, word_size);
+  }
+
+  DEBUG_ONLY(InternalStats::inc_num_deallocs();)
+
+}
+
+// Update statistics. This walks all in-use chunks.
+void ClassLoaderMetaspace::add_to_statistics(clms_stats_t* out) const {
+  if (non_class_space_manager() != NULL) {
+    non_class_space_manager()->add_to_statistics(&out->sm_stats_nonclass);
+  }
+  if (class_space_manager() != NULL) {
+    class_space_manager()->add_to_statistics(&out->sm_stats_class);
+  }
+}
+
+#ifdef ASSERT
+void ClassLoaderMetaspace::verify() const {
+  check_valid_spacetype(_space_type);
+  if (non_class_space_manager() != NULL) {
+    non_class_space_manager()->verify();
+  }
+  if (class_space_manager() != NULL) {
+    class_space_manager()->verify();
+  }
+}
+#endif // ASSERT
+
+} // end namespace metaspace
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/classLoaderMetaspace.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_CLASSLOADERMETASPACE_HPP
+#define SHARE_MEMORY_METASPACE_CLASSLOADERMETASPACE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
+
+class Mutex;
+
+namespace metaspace {
+
+class SpaceManager;
+struct clms_stats_t;
+
+class ClassLoaderMetaspace : public CHeapObj<mtClass> {
+
+  // The CLD lock.
+  Mutex* const _lock;
+
+  const MetaspaceType _space_type;
+
+  SpaceManager* _non_class_space_manager;
+  SpaceManager* _class_space_manager;
+
+  Mutex* lock() const                            { return _lock; }
+  SpaceManager* non_class_space_manager() const  { return _non_class_space_manager; }
+  SpaceManager* class_space_manager() const      { return _class_space_manager; }
+
+  SpaceManager* get_space_manager(bool is_class) {
+    return is_class ? class_space_manager() : non_class_space_manager();
+  }
+
+  // Returns true if this class loader is of a type which will only ever load one class.
+  bool is_micro() const {
+    return _space_type == metaspace::UnsafeAnonymousMetaspaceType ||
+           _space_type == metaspace::ReflectionMetaspaceType;
+  }
+
+public:
+
+  ClassLoaderMetaspace(Mutex* lock, MetaspaceType space_type);
+
+  ~ClassLoaderMetaspace();
+
+  MetaspaceType space_type() const { return _space_type; }
+
+  // Allocate word_size words from Metaspace.
+  MetaWord* allocate(size_t word_size, MetadataType mdType);
+
+  // Attempt to expand the GC threshold to be good for at least another word_size words
+  // and allocate. Returns NULL if failure. Used during Metaspace GC.
+  MetaWord* expand_and_allocate(size_t word_size, MetadataType mdType);
+
+  // Prematurely returns a metaspace allocation to the _block_freelists
+  // because it is not needed anymore.
+  void deallocate(MetaWord* ptr, size_t word_size, bool is_class);
+
+  // Update statistics. This walks all in-use chunks.
+  void add_to_statistics(clms_stats_t* out) const;
+
+  DEBUG_ONLY(void verify() const;)
+
+  // TODO
+  size_t allocated_blocks_bytes() const { return 0; }
+  size_t allocated_chunks_bytes() const { return 0; }
+
+};
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_CLASSLOADERMETASPACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/commitLimiter.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "memory/metaspace/commitLimiter.hpp"
+#include "memory/metaspace.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+namespace metaspace {
+
+// Returns the size, in words, by which we may expand the metaspace committed area without:
+// - _cap == 0: hitting GC threshold or the MaxMetaspaceSize
+// - _cap > 0: hitting cap (this is just for testing purposes)
+size_t CommitLimiter::possible_expansion_words() const {
+
+  if (_cap > 0) { // Testing.
+    assert(_cnt.get() <= _cap, "Beyond limit?");
+    return _cap - _cnt.get();
+  }
+
+  assert(_cnt.get() * BytesPerWord <= MaxMetaspaceSize, "Beyond limit?");
+  const size_t words_left_below_max = MaxMetaspaceSize / BytesPerWord - _cnt.get();
+
+  const size_t words_left_below_gc_threshold = MetaspaceGC::allowed_expansion();
+
+  return MIN2(words_left_below_max, words_left_below_gc_threshold);
+
+}
+
+static CommitLimiter g_global_limiter(0);
+
+// Returns the global metaspace commit counter
+CommitLimiter* CommitLimiter::globalLimiter() {
+  return &g_global_limiter;
+}
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/commitLimiter.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_COMMITLIMITER_HPP
+#define SHARE_MEMORY_METASPACE_COMMITLIMITER_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/metaspace/counter.hpp"
+
+namespace metaspace {
+
+class CommitLimiter : public CHeapObj<mtInternal> {
+
+  // Counts total words committed for metaspace
+  SizeCounter _cnt;
+
+  // Purely for testing purposes: cap, in words.
+  const size_t _cap;
+
+public:
+
+  // Create a commit limiter. This is only useful for testing, with a cap != 0,
+  // since normal code should use the global commit limiter.
+  // If cap != 0 (word size), the cap replaces the internal logic of limiting.
+  CommitLimiter(size_t cap = 0) : _cnt(), _cap(cap) {}
+
+  // Returns the size, in words, by which we may expand the metaspace committed area without:
+  // - _cap == 0: hitting GC threshold or the MaxMetaspaceSize
+  // - _cap > 0: hitting cap (this is just for testing purposes)
+  size_t possible_expansion_words() const;
+
+  void increase_committed(size_t word_size)   { _cnt.increment_by(word_size); }
+  void decrease_committed(size_t word_size)   { _cnt.decrement_by(word_size); }
+  size_t committed_words() const              { return _cnt.get(); }
+
+  // Returns the global metaspace commit counter
+  static CommitLimiter* globalLimiter();
+
+};
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_COMMITLIMITER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/commitMask.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#include <memory/metaspace/settings.hpp>
+#include "precompiled.hpp"
+
+#include "memory/metaspace/commitMask.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/settings.hpp"
+#include "runtime/stubRoutines.hpp"
+
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+namespace metaspace {
+
+CommitMask::CommitMask(const MetaWord* start, size_t word_size)
+  : CHeapBitMap(mask_size(word_size, Settings::commit_granule_words()))
+  , _base(start)
+  , _word_size(word_size)
+  , _words_per_bit(Settings::commit_granule_words())
+{
+  assert(_word_size > 0 && _words_per_bit > 0 &&
+         is_aligned(_word_size, _words_per_bit), "Sanity");
+}
+
+#ifdef ASSERT
+
+static const bool TEST_UNCOMMITTED_REGION = false;
+
+volatile u1 x;
+
+void CommitMask::verify(bool slow, bool do_touch_test) const {
+
+  // Walk the whole commit mask.
+  // For each 1 bit, check if the associated granule is accessible.
+  // For each 0 bit, check if the associated granule is not accessible. Slow mode only.
+
+  assert_is_aligned(_base, _words_per_bit * BytesPerWord);
+  assert_is_aligned(_word_size, _words_per_bit);
+
+  if (do_touch_test) {
+    for (idx_t i = 0; i < size(); i ++) {
+      const MetaWord* const p = _base + (i * _words_per_bit);
+      if (at(i)) {
+        // Should be accessible. Just touch it.
+        x ^= *(u1*)p;
+      } else {
+        // Should not be accessible.
+        if (slow) {
+          // Note: results may differ between platforms. On Linux, this should be true since
+          // we uncommit memory by setting protection to PROT_NONE. We may have to look if
+          // this works as expected on other platforms.
+          if (TEST_UNCOMMITTED_REGION && CanUseSafeFetch32()) {
+            assert(os::is_readable_pointer(p) == false,
+                   "index %u, pointer " PTR_FORMAT ", should not be accessible.",
+                   (unsigned)i, p2i(p));
+          }
+        }
+      }
+    }
+  }
+
+}
+
+#endif // ASSERT
+
+void CommitMask::print_on(outputStream* st) const {
+
+  st->print("commit mask, base " PTR_FORMAT ":", p2i(base()));
+
+  for (idx_t i = 0; i < size(); i ++) {
+    st->print("%c", at(i) ? 'X' : '-');
+  }
+
+  st->cr();
+
+}
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/commitMask.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_COMMITMASK_HPP
+#define SHARE_MEMORY_METASPACE_COMMITMASK_HPP
+
+#include "utilities/debug.hpp"
+#include "utilities/bitMap.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class outputStream;
+
+namespace metaspace {
+
+// A bitmap covering a range of metaspace; each bit in this mask corresponds to
+//
+class CommitMask : public CHeapBitMap {
+
+  const MetaWord* const _base;
+  const size_t _word_size;
+  const size_t _words_per_bit;
+
+  // Given an offset, in words, into the area, return the number of the bit
+  // covering it.
+  static idx_t bitno_for_word_offset(size_t offset, size_t words_per_bit) {
+    return offset / words_per_bit;
+  }
+
+  idx_t bitno_for_address(const MetaWord* p) const {
+    // Note: we allow one-beyond since this is a typical need.
+    assert(p >= _base && p <= _base + _word_size, "Invalid address");
+    const size_t off = p - _base;
+    return bitno_for_word_offset(off, _words_per_bit);
+  }
+
+  static idx_t mask_size(size_t word_size, size_t words_per_bit) {
+    return bitno_for_word_offset(word_size, words_per_bit);
+  }
+
+  struct BitCounterClosure : public BitMapClosure {
+    idx_t cnt;
+    bool do_bit(BitMap::idx_t offset) { cnt ++; return true; }
+  };
+
+  // Missing from BitMap.
+  // Count 1 bits in range [start, end).
+  idx_t count_one_bits_in_range(idx_t start, idx_t end) const {
+    assert(start < end, "Zero range");
+    // TODO: This can be done more efficiently.
+    BitCounterClosure bcc;
+    bcc.cnt = 0;
+    iterate(&bcc, start, end);
+    return bcc.cnt;
+  }
+
+#ifdef ASSERT
+  // Given a pointer, check if it points into the range this bitmap covers.
+  bool is_pointer_valid(const MetaWord* p) const {
+    return p >= _base && p < _base + _word_size;
+  }
+
+  // Given a pointer, check if it points into the range this bitmap covers.
+  void check_pointer(const MetaWord* p) const {
+    assert(is_pointer_valid(p),
+           "Pointer " PTR_FORMAT " not in range of this bitmap [" PTR_FORMAT ", " PTR_FORMAT ").",
+           p2i(p), p2i(_base), p2i(_base + _word_size));
+  }
+  // Given a pointer, check if it points into the range this bitmap covers,
+  // and if it is aligned to commit granule border.
+  void check_pointer_aligned(const MetaWord* p) const {
+    check_pointer(p);
+    assert(is_aligned(p, _words_per_bit * BytesPerWord),
+           "Pointer " PTR_FORMAT " should be aligned to commit granule size " SIZE_FORMAT ".",
+           p2i(p), _words_per_bit * BytesPerWord);
+  }
+  // Given a range, check if it points into the range this bitmap covers,
+  // and if its borders are aligned to commit granule border.
+  void check_range(const MetaWord* start, size_t word_size) const {
+    check_pointer_aligned(start);
+    assert(is_aligned(word_size, _words_per_bit),
+           "Range " SIZE_FORMAT " should be aligned to commit granule size " SIZE_FORMAT ".",
+           word_size, _words_per_bit);
+    check_pointer(start + word_size - 1);
+  }
+#endif
+
+  // Marks a single commit granule as committed (value == true)
+  // or uncomitted (value == false) and returns
+  // its prior state.
+  bool mark_granule(idx_t bitno, bool value) {
+    bool b = at(bitno);
+    at_put(bitno, value);
+    return b;
+  }
+
+public:
+
+  CommitMask(const MetaWord* start, size_t word_size);
+
+  const MetaWord* base() const  { return _base; }
+  size_t word_size() const      { return _word_size; }
+  const MetaWord* end() const   { return _base + word_size(); }
+
+  // Given an address, returns true if the address is committed, false if not.
+  bool is_committed_address(const MetaWord* p) const {
+    DEBUG_ONLY(check_pointer(p));
+    const idx_t bitno = bitno_for_address(p);
+    return at(bitno);
+  }
+
+  // Given an address range, return size, in number of words, of committed area within that range.
+  size_t get_committed_size_in_range(const MetaWord* start, size_t word_size) const {
+    DEBUG_ONLY(check_range(start, word_size));
+    assert(word_size > 0, "zero range");
+    const idx_t b1 = bitno_for_address(start);
+    const idx_t b2 = bitno_for_address(start + word_size);
+    const idx_t num_bits = count_one_bits_in_range(b1, b2);
+    return num_bits * _words_per_bit;
+  }
+
+  // Return total committed size, in number of words.
+  size_t get_committed_size() const {
+    return count_one_bits() * _words_per_bit;
+  }
+
+  // Mark a whole address range [start, end) as committed.
+  // Return the number of words which had already been committed before this operation.
+  size_t mark_range_as_committed(const MetaWord* start, size_t word_size) {
+    DEBUG_ONLY(check_range(start, word_size));
+    assert(word_size > 0, "zero range");
+    const idx_t b1 = bitno_for_address(start);
+    const idx_t b2 = bitno_for_address(start + word_size);
+    if (b1 == b2) { // Simple case, 1 granule
+      bool was_committed = mark_granule(b1, true);
+      return was_committed ? _words_per_bit : 0;
+    }
+    const idx_t one_bits_in_range_before = count_one_bits_in_range(b1, b2);
+    set_range(b1, b2);
+    return one_bits_in_range_before * _words_per_bit;
+  }
+
+  // Mark a whole address range [start, end) as uncommitted.
+  // Return the number of words which had already been uncommitted before this operation.
+  size_t mark_range_as_uncommitted(const MetaWord* start, size_t word_size) {
+    DEBUG_ONLY(check_range(start, word_size));
+    assert(word_size > 0, "zero range");
+    const idx_t b1 = bitno_for_address(start);
+    const idx_t b2 = bitno_for_address(start + word_size);
+    if (b1 == b2) { // Simple case, 1 granule
+      bool was_committed = mark_granule(b1, false);
+      return was_committed ? 0 : _words_per_bit;
+    }
+    const idx_t zero_bits_in_range_before =
+        (b2 - b1) - count_one_bits_in_range(b1, b2);
+    clear_range(b1, b2);
+    return zero_bits_in_range_before * _words_per_bit;
+  }
+
+
+  //// Debug stuff ////
+  DEBUG_ONLY(void verify(bool slow, bool do_touch_test = true) const;)
+
+  void print_on(outputStream* st) const;
+
+};
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_COMMITMASK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/counter.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_COUNTER_HPP
+#define SHARE_MEMORY_METASPACE_COUNTER_HPP
+
+#include "metaprogramming/isSigned.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+
+namespace metaspace {
+
+// A very simple helper class which counts something, offers decrement/increment
+// methods and checks for overflow/underflow on increment/decrement.
+//
+// (since we seem to do that alot....)
+
+template <class T>
+class AbstractCounter {
+
+  T _c;
+
+  // Optional name for easier reference
+  const char* const _name;
+
+  // Only allow unsigned values for now
+  STATIC_ASSERT(IsSigned<T>::value == false);
+
+public:
+
+  AbstractCounter(const char* name) : _c(0), _name(name) {}
+  AbstractCounter() : _c(0), _name("") {}
+
+  T get() const           { return _c; }
+
+  void increment() { increment_by(1); }
+  void decrement() { decrement_by(1); }
+
+  void increment_by(T v) {
+    assert(_c + v >= _c,
+        "%s overflow (" UINT64_FORMAT "+" UINT64_FORMAT ")",
+        _name, (uint64_t)_c, (uint64_t)v);
+    _c += v;
+  }
+
+  void decrement_by(T v) {
+    assert(_c - v <= _c,
+        "%s underflow (" UINT64_FORMAT "-" UINT64_FORMAT ")",
+        _name, (uint64_t)_c, (uint64_t)v);
+    _c -= v;
+  }
+
+  void reset()                { _c = 0; }
+
+#ifdef ASSERT
+  void check(T expected) const {
+    assert(_c == expected, "Counter mismatch: %d, expected: %d.",
+           (int)_c, (int)expected);
+    }
+#endif
+
+};
+
+typedef AbstractCounter<size_t>   SizeCounter;
+typedef AbstractCounter<unsigned> IntCounter;
+
+
+template <class T>
+class AbstractAtomicCounter {
+
+  volatile T _c;
+
+  // Optional name for easier reference
+  const char* const _name;
+
+  // Only allow unsigned values for now
+  STATIC_ASSERT(IsSigned<T>::value == false);
+
+public:
+
+
+  AbstractAtomicCounter(const char* name) : _c(0), _name(name) {}
+  AbstractAtomicCounter() : _c(0), _name("") {}
+
+  T get() const               { return _c; }
+
+  void increment() {
+    assert(_c + 1 != 0,
+        "%s overflow (" UINT64_FORMAT "+1)",
+        _name, (uint64_t)_c);
+    Atomic::inc(&_c);
+  }
+
+  void decrement() {
+    assert(_c >= 1,
+        "%s underflow (" UINT64_FORMAT "-1)",
+        _name, (uint64_t)_c);
+    Atomic::dec(&_c);
+  }
+
+  void increment_by(T v) {
+    T v1 = _c;
+    T v2 = Atomic::add(v, &_c);
+    assert(v2 > v1,
+           "%s overflow (" UINT64_FORMAT "+" UINT64_FORMAT ")",
+           _name, (uint64_t)v1, (uint64_t)v);
+  }
+
+  void decrement_by(T v) {
+    assert(_c >= v,
+           "%s underflow (" UINT64_FORMAT "-" UINT64_FORMAT ")",
+           _name, (uint64_t)_c, (uint64_t)v);
+    Atomic::sub(v, &_c);
+  }
+
+#ifdef ASSERT
+  void check(T expected) const {
+    assert(_c == expected, "Counter mismatch: %d, expected: %d.",
+           (int)_c, (int)expected);
+    }
+#endif
+
+};
+
+typedef AbstractAtomicCounter<size_t> SizeAtomicCounter;
+
+
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_WORDSIZECOUNTER_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/internStat.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+
+#include "precompiled.hpp"
+#include "memory/metaspace/internStat.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+#ifdef ASSERT
+
+namespace metaspace {
+
+#define CREATE_COUNTER(name)          uintx InternalStats::_##name;
+#define CREATE_ATOMIC_COUNTER(name)   volatile uintx InternalStats::_##name;
+
+  ALL_MY_COUNTERS(CREATE_COUNTER, CREATE_ATOMIC_COUNTER)
+
+#undef MATERIALIZE_COUNTER
+#undef MATERIALIZE_ATOMIC_COUNTER
+
+void InternalStats::print_on(outputStream* st) {
+
+#define xstr(s) str(s)
+#define str(s) #s
+
+#define PRINTER(name)  st->print_cr("%s: " UINTX_FORMAT ".", xstr(name), _##name);
+
+  ALL_MY_COUNTERS(PRINTER, PRINTER)
+
+#undef PRINTER
+
+}
+
+} // namespace metaspace
+
+#endif // ASSERT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/internStat.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_MEMORY_METASPACE_INTERNSTAT_HPP
+#define SHARE_MEMORY_METASPACE_INTERNSTAT_HPP
+
+#ifdef ASSERT
+
+
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class outputStream;
+
+namespace metaspace {
+
+class InternalStats : public AllStatic {
+
+  // Note: all counters which are modified on the classloader local allocation path
+  //   (not under ExpandLock protection) have to be atomic.
+
+#define ALL_MY_COUNTERS(x, x_atomic)                \
+                                                    \
+  /* Number of allocations. */                      \
+  x_atomic(num_allocs)                              \
+                                                    \
+  /* Number of deallocations */                     \
+  x_atomic(num_deallocs)                            \
+  /* Number of times an allocation was satisfied */ \
+  /*  from deallocated blocks. */                   \
+  x_atomic(num_allocs_from_deallocated_blocks)      \
+                                                    \
+  /* Number of times an allocation failed */        \
+  /*  because the chunk was too small. */           \
+	x_atomic(num_allocs_failed_chunk_too_small)       \
+                                                    \
+  /* Number of times an allocation failed */        \
+  /*  because we hit a limit. */                    \
+  x_atomic(num_allocs_failed_limit)                 \
+                                                    \
+  /* Number of times a ClassLoaderMetaspace was */  \
+  /*  born... */                                    \
+  x(num_metaspace_births)                           \
+  /* ... and died. */                               \
+  x(num_metaspace_deaths)                           \
+                                                    \
+	/* Number of times a ClassLoaderMetaspace was */  \
+	/*  born for an anonymous class... */             \
+	x(num_anon_cld_births)                            \
+	/* ... and died. */                               \
+  x(num_anon_cld_deaths)                            \
+                                                    \
+  /* Number of times VirtualSpaceNode were */       \
+  /*  created...  */                                \
+  x(num_vsnodes_created)                            \
+  /* ... and purged. */                             \
+  x(num_vsnodes_destroyed)                          \
+                                                    \
+  /* Number of times we committed space. */         \
+  x(num_space_committed)                            \
+  /* Number of times we uncommitted space. */       \
+  x(num_space_uncommitted)                          \
+                                                    \
+  /* Number of times a chunk was returned to the */ \
+  /*  freelist (external only). */                  \
+  x(num_chunks_returned_to_freelist)                \
+  /* Number of times a chunk was taken from */      \
+  /*  freelist (external only) */                   \
+  x(num_chunks_taken_from_freelist)                 \
+                                                    \
+  /* Number of successful chunk merges */           \
+  x(num_chunk_merges)                               \
+  /* Number of chunks removed from freelist as */   \
+  /* result of a merge operation */                 \
+  x(num_chunks_removed_from_freelist_due_to_merge)  \
+                                                    \
+  /* Number of chunk splits */                      \
+  x(num_chunk_splits)                               \
+  /* Number of chunks added to freelist as */       \
+  /* result of a split operation */                 \
+  x(num_chunks_added_to_freelist_due_to_split)      \
+  /* Number of chunk in place enlargements */       \
+  x(num_chunk_enlarged)                             \
+  /* Number of chunks retired */                    \
+  x(num_chunks_retired)                             \
+                                                    \
+  /* Number of times we did a purge */              \
+  x(num_purges)                                     \
+  /* Number of times we did a wholesale uncommit */ \
+  x(num_wholesale_uncommits)                        \
+
+
+
+#define DEFINE_COUNTER(name)          static uintx _##name;
+#define DEFINE_ATOMIC_COUNTER(name)   static volatile uintx _##name;
+
+  ALL_MY_COUNTERS(DEFINE_COUNTER, DEFINE_ATOMIC_COUNTER)
+
+#undef DEFINE_COUNTER
+#undef DEFINE_ATOMIC_COUNTER
+
+public:
+
+#define INCREMENTOR(name)           static void inc_##name() { _##name ++; }
+#define INCREMENTOR_ATOMIC(name)    static void inc_##name() { Atomic::inc(&_##name); }
+
+  ALL_MY_COUNTERS(INCREMENTOR, INCREMENTOR_ATOMIC)
+
+  static void print_on(outputStream* st);
+
+#undef INCREMENTOR
+#undef INCREMENTOR_ATOMIC
+
+};
+
+} // namespace metaspace
+
+#endif // ASSERT
+
+#endif // SHARE_MEMORY_METASPACE_INTERNSTAT_HPP
--- a/src/hotspot/share/memory/metaspace/metaDebug.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaDebug.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -50,8 +50,26 @@
       counter_ = 0;           \
 
 #define END_EVERY_NTH         } } }
+
+#define SOMETIMES(code) \
+    EVERY_NTH(VerifyMetaspaceInterval) \
+    { code } \
+    END_EVERY_NTH
+
+#define ASSERT_SOMETIMES(condition, ...) \
+		EVERY_NTH(VerifyMetaspaceInterval) \
+		assert( (condition), __VA_ARGS__); \
+		END_EVERY_NTH
+
+#else
+
+#define SOMETIMES(code)
+#define ASSERT_SOMETIMES(condition, ...)
+
 #endif // ASSERT
 
+
+
 } // namespace metaspace
 
 #endif // SHARE_MEMORY_METASPACE_METADEBUG_HPP
--- a/src/hotspot/share/memory/metaspace/metachunk.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metachunk.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,150 +23,503 @@
  *
  */
 
+
+#include <memory/metaspace/settings.hpp>
 #include "precompiled.hpp"
-#include "memory/allocation.hpp"
+
+#include "logging/log.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
 #include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace/occupancyMap.hpp"
+#include "memory/metaspace/metaDebug.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
 #include "memory/metaspace/virtualSpaceNode.hpp"
+#include "runtime/mutexLocker.hpp"
+
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
 namespace metaspace {
 
-size_t Metachunk::object_alignment() {
-  // Must align pointers and sizes to 8,
-  // so that 64 bit types get correctly aligned.
-  const size_t alignment = 8;
+// Make sure that the Klass alignment also agree.
+STATIC_ASSERT(Metachunk::allocation_alignment_bytes == (size_t)KlassAlignmentInBytes);
 
-  // Make sure that the Klass alignment also agree.
-  STATIC_ASSERT(alignment == (size_t)KlassAlignmentInBytes);
-
-  return alignment;
+// Return a single char presentation of the state ('f', 'u', 'd')
+char Metachunk::get_state_char() const {
+  switch (_state) {
+  case state_free:    return 'f';
+  case state_in_use:  return 'u';
+  case state_dead:    return 'd';
+  }
+  return '?';
 }
 
-size_t Metachunk::overhead() {
-  return align_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
+#ifdef ASSERT
+void Metachunk::assert_have_expand_lock() {
+  assert_lock_strong(MetaspaceExpand_lock);
+}
+#endif
+
+// Commit uncommitted section of the chunk.
+// Fails if we hit a commit limit.
+bool Metachunk::commit_up_to(size_t new_committed_words) {
+
+  // Please note:
+  //
+  // VirtualSpaceNode::ensure_range_is_committed(), when called over a range containing both committed and uncommitted parts,
+  // will replace the whole range with a new mapping, thus erasing the existing content in the committed parts. Therefore
+  // we must make sure never to call VirtualSpaceNode::ensure_range_is_committed() over a range containing live data.
+  //
+  // Luckily, this cannot happen by design. We have two cases:
+  //
+  // 1) chunks equal or larger than a commit granule.
+  //    In this case, due to chunk geometry, the chunk should cover whole commit granules (in other words, a chunk equal or larger than
+  //    a commit granule will never share a granule with a neighbor). That means whatever we commit or uncommit here does not affect
+  //    neighboring chunks. We only have to take care not to re-commit used parts of ourself. We do this by moving the committed_words
+  //    limit in multiple of commit granules.
+  //
+  // 2) chunks smaller than a commit granule.
+  //    In this case, a chunk shares a single commit granule with its neighbors. But this never can be a problem:
+  //    - Either the commit granule is already committed (and maybe the neighbors contain live data). In that case calling
+  //      ensure_range_is_committed() will do nothing.
+  //    - Or the commit granule is not committed, but in this case, the neighbors are uncommitted too and cannot contain live data.
+
+#ifdef ASSERT
+  if (word_size() >= Settings::commit_granule_words()) {
+    // case (1)
+    assert(is_aligned(base(), Settings::commit_granule_bytes()) &&
+           is_aligned(end(), Settings::commit_granule_bytes()),
+           "Chunks larger than a commit granule must cover whole granules.");
+    assert(is_aligned(_committed_words, Settings::commit_granule_words()),
+           "The commit boundary must be aligned to commit granule size");
+    assert(_used_words <= _committed_words, "Sanity");
+  } else {
+    // case (2)
+    assert(_committed_words == 0 || _committed_words == word_size(), "Sanity");
+  }
+#endif
+
+  // We should hold the expand lock at this point.
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  const size_t commit_from = _committed_words;
+  const size_t commit_to =   MIN2(align_up(new_committed_words, Settings::commit_granule_words()), word_size());
+
+  assert(commit_from >= used_words(), "Sanity");
+  assert(commit_to <= word_size(), "Sanity");
+
+  if (commit_to > commit_from) {
+    log_debug(metaspace)("Chunk " METACHUNK_FORMAT ": attempting to move commit line to "
+                         SIZE_FORMAT " words.", METACHUNK_FORMAT_ARGS(this), commit_to);
+
+    if (!_vsnode->ensure_range_is_committed(base() + commit_from, commit_to - commit_from)) {
+      DEBUG_ONLY(verify(true);)
+      return false;
+    }
+  }
+
+  // Remember how far we have committed.
+  _committed_words = commit_to;
+
+  DEBUG_ONLY(verify(true);)
+
+  return true;
+
+}
+
+
+// Ensure that chunk is committed up to at least new_committed_words words.
+// Fails if we hit a commit limit.
+bool Metachunk::ensure_committed(size_t new_committed_words) {
+
+  bool rc = true;
+
+  if (new_committed_words > committed_words()) {
+    MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+    rc = commit_up_to(new_committed_words);
+  }
+
+  return rc;
+
 }
 
-// Metachunk methods
+bool Metachunk::ensure_committed_locked(size_t new_committed_words) {
+
+  // the .._locked() variant should be called if we own the lock already.
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  bool rc = true;
+
+  if (new_committed_words > committed_words()) {
+    rc = commit_up_to(new_committed_words);
+  }
+
+  return rc;
+
+}
 
-Metachunk::Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size,
-                     VirtualSpaceNode* container)
-    : Metabase<Metachunk>(word_size),
-    _container(container),
-    _top(NULL),
-    _sentinel(CHUNK_SENTINEL),
-    _chunk_type(chunktype),
-    _is_class(is_class),
-    _origin(origin_normal),
-    _use_count(0)
-{
-  _top = initial_top();
-  set_is_tagged_free(false);
-#ifdef ASSERT
-  mangle(uninitMetaWordVal);
-  verify();
-#endif
+// Uncommit chunk area. The area must be a common multiple of the
+// commit granule size (in other words, we cannot uncommit chunks smaller than
+// a commit granule size).
+void Metachunk::uncommit() {
+  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+  uncommit_locked();
+}
+
+void Metachunk::uncommit_locked() {
+  // Only uncommit chunks which are free, have no used words set (extra precaution) and are equal or larger in size than a single commit granule.
+  assert_lock_strong(MetaspaceExpand_lock);
+  assert(_state == state_free && _used_words == 0 && word_size() >= Settings::commit_granule_words(),
+         "Only free chunks equal or larger than commit granule size can be uncommitted "
+         "(chunk " METACHUNK_FULL_FORMAT ").", METACHUNK_FULL_FORMAT_ARGS(this));
+  if (word_size() >= Settings::commit_granule_words()) {
+    _vsnode->uncommit_range(base(), word_size());
+    _committed_words = 0;
+  }
+}
+void Metachunk::set_committed_words(size_t v) {
+  // Set committed words. Since we know that we only commit whole commit granules, we can round up v here.
+  v = MIN2(align_up(v, Settings::commit_granule_words()), word_size());
+ _committed_words = v;
 }
 
-MetaWord* Metachunk::allocate(size_t word_size) {
-  MetaWord* result = NULL;
-  // If available, bump the pointer to allocate.
-  if (free_word_size() >= word_size) {
-    result = _top;
-    _top = _top + word_size;
+// Allocate word_size words from this chunk.
+//
+// May cause memory to be committed. That may fail if we hit a commit limit. In that case,
+//  NULL is returned and p_did_hit_commit_limit will be set to true.
+// If the remainder portion of the chunk was too small to hold the allocation,
+//  NULL is returned and p_did_hit_commit_limit will be set to false.
+MetaWord* Metachunk::allocate(size_t request_word_size, bool* p_did_hit_commit_limit) {
+
+  assert_is_aligned(request_word_size, allocation_alignment_words);
+
+  log_trace(metaspace)("Chunk " METACHUNK_FULL_FORMAT ": allocating " SIZE_FORMAT " words.",
+                       METACHUNK_FULL_FORMAT_ARGS(this), request_word_size);
+
+  assert(committed_words() <= word_size(), "Sanity");
+
+  if (free_below_committed_words() < request_word_size) {
+
+    // We may need to expand the comitted area...
+    if (free_words() < request_word_size) {
+      // ... but cannot do this since we ran out of space.
+      *p_did_hit_commit_limit = false;
+      log_trace(metaspace)("Chunk " METACHUNK_FULL_FORMAT ": .. does not fit (remaining space: "
+                           SIZE_FORMAT " words).", METACHUNK_FULL_FORMAT_ARGS(this), free_words());
+      return NULL;
+    }
+
+    log_trace(metaspace)("Chunk " METACHUNK_FULL_FORMAT ": .. attempting to increase committed range.",
+                         METACHUNK_FULL_FORMAT_ARGS(this));
+
+    if (ensure_committed(used_words() + request_word_size) == false) {
+
+      // Commit failed. We may have hit the commit limit or the gc threshold.
+      *p_did_hit_commit_limit = true;
+      log_trace(metaspace)("Chunk " METACHUNK_FULL_FORMAT ": .. failed, we hit a limit.",
+                           METACHUNK_FULL_FORMAT_ARGS(this));
+      return NULL;
+
+    }
+
   }
-  return result;
+
+  assert(committed_words() >= request_word_size, "Sanity");
+
+  MetaWord* const p = top();
+
+  _used_words += request_word_size;
+
+  SOMETIMES(verify(false);)
+
+  return p;
+
 }
 
-// _bottom points to the start of the chunk including the overhead.
-size_t Metachunk::used_word_size() const {
-  return pointer_delta(_top, bottom(), sizeof(MetaWord));
-}
+// Given a memory range which may or may not have been allocated from this chunk, attempt
+// to roll its allocation back. This can work if this is the very last allocation we did
+// from this chunk, in which case we just lower the top pointer again.
+// Returns true if this succeeded, false if it failed.
+bool Metachunk::attempt_rollback_allocation(MetaWord* p, size_t word_size) {
+  assert(p != NULL && word_size > 0, "Sanity");
+  assert(is_in_use() && base() != NULL, "Sanity");
 
-size_t Metachunk::free_word_size() const {
-  return pointer_delta(end(), _top, sizeof(MetaWord));
+  // Is this allocation at the top?
+  if (used_words() >= word_size &&
+      base() + (used_words() - word_size) == p) {
+    log_trace(metaspace)("Chunk " METACHUNK_FULL_FORMAT ": rolling back allocation...",
+                         METACHUNK_FULL_FORMAT_ARGS(this));
+    _used_words -= word_size;
+    log_trace(metaspace)("Chunk " METACHUNK_FULL_FORMAT ": rolled back allocation.",
+                         METACHUNK_FULL_FORMAT_ARGS(this));
+    DEBUG_ONLY(verify(false);)
+
+    return true;
+
+  }
+
+  return false;
 }
 
-void Metachunk::print_on(outputStream* st) const {
-  st->print_cr("Metachunk:"
-               " bottom " PTR_FORMAT " top " PTR_FORMAT
-               " end " PTR_FORMAT " size " SIZE_FORMAT " (%s)",
-               p2i(bottom()), p2i(_top), p2i(end()), word_size(),
-               chunk_size_name(get_chunk_type()));
-  if (Verbose) {
-    st->print_cr("    used " SIZE_FORMAT " free " SIZE_FORMAT,
-                 used_word_size(), free_word_size());
+
+#ifdef ASSERT
+
+// Zap this structure.
+void Metachunk::zap_header(uint8_t c) {
+  memset(this, c, sizeof(Metachunk));
+}
+
+void Metachunk::fill_with_pattern(MetaWord pattern, size_t word_size) {
+  assert(word_size <= committed_words(), "Sanity");
+  for (size_t l = 0; l < word_size; l ++) {
+    _base[l] = pattern;
   }
 }
 
-#ifdef ASSERT
-void Metachunk::mangle(juint word_value) {
-  // Overwrite the payload of the chunk and not the links that
-  // maintain list of chunks.
-  HeapWord* start = (HeapWord*)initial_top();
-  size_t size = word_size() - overhead();
-  Copy::fill_to_words(start, size, word_value);
+void Metachunk::check_pattern(MetaWord pattern, size_t word_size) {
+  assert(word_size <= committed_words(), "Sanity");
+  for (size_t l = 0; l < word_size; l ++) {
+    assert(_base[l] == pattern,
+           "chunk " METACHUNK_FULL_FORMAT ": pattern change at " PTR_FORMAT ": expected " UINTX_FORMAT " but got " UINTX_FORMAT ".",
+           METACHUNK_FULL_FORMAT_ARGS(this), p2i(_base + l), (uintx)pattern, (uintx)_base[l]);
+  }
 }
 
-void Metachunk::verify() const {
-  assert(is_valid_sentinel(), "Chunk " PTR_FORMAT ": sentinel invalid", p2i(this));
-  const ChunkIndex chunk_type = get_chunk_type();
-  assert(is_valid_chunktype(chunk_type), "Chunk " PTR_FORMAT ": Invalid chunk type.", p2i(this));
-  if (chunk_type != HumongousIndex) {
-    assert(word_size() == get_size_for_nonhumongous_chunktype(chunk_type, is_class()),
-           "Chunk " PTR_FORMAT ": wordsize " SIZE_FORMAT " does not fit chunk type %s.",
-           p2i(this), word_size(), chunk_size_name(chunk_type));
+
+// Verifies linking with neighbors in virtual space.
+// Can only be done under expand lock protection.
+void Metachunk::verify_neighborhood() const {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+  assert(!is_dead(), "Do not call on dead chunks.");
+
+  if (is_root_chunk()) {
+
+    // Root chunks are all alone in the world.
+    assert(next_in_vs() == NULL || prev_in_vs() == NULL, "Root chunks should have no neighbors");
+
+  } else {
+
+    // Non-root chunks have neighbors, at least one, possibly two.
+
+    assert(next_in_vs() != NULL || prev_in_vs() != NULL,
+           "A non-root chunk should have neighbors (chunk @" PTR_FORMAT
+           ", base " PTR_FORMAT ", level " CHKLVL_FORMAT ".",
+           p2i(this), p2i(base()), level());
+
+    if (prev_in_vs() != NULL) {
+      assert(prev_in_vs()->end() == base(),
+             "Chunk " METACHUNK_FULL_FORMAT ": should be adjacent to predecessor: " METACHUNK_FULL_FORMAT ".",
+             METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(prev_in_vs()));
+      assert(prev_in_vs()->next_in_vs() == this,
+             "Chunk " METACHUNK_FULL_FORMAT ": broken link to left neighbor: " METACHUNK_FULL_FORMAT " (" PTR_FORMAT ").",
+             METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(prev_in_vs()), p2i(prev_in_vs()->next_in_vs()));
+    }
+
+    if (next_in_vs() != NULL) {
+      assert(end() == next_in_vs()->base(),
+             "Chunk " METACHUNK_FULL_FORMAT ": should be adjacent to successor: " METACHUNK_FULL_FORMAT ".",
+             METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(next_in_vs()));
+      assert(next_in_vs()->prev_in_vs() == this,
+             "Chunk " METACHUNK_FULL_FORMAT ": broken link to right neighbor: " METACHUNK_FULL_FORMAT " (" PTR_FORMAT ").",
+             METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(next_in_vs()), p2i(next_in_vs()->prev_in_vs()));
+    }
+
+    // One of the neighbors must be the buddy. It can be whole or splintered.
+
+    // The chunk following us or preceeding us may be our buddy or a splintered part of it.
+    Metachunk* buddy = is_leader() ? next_in_vs() : prev_in_vs();
+
+    assert(buddy != NULL, "Missing neighbor.");
+    assert(!buddy->is_dead(), "Invalid buddy state.");
+
+    // This neighbor is either or buddy (same level) or a splinter of our buddy - hence
+    // the level can never be smaller (aka the chunk size cannot be larger).
+    assert(buddy->level() >= level(), "Wrong level.");
+
+    if (buddy->level() == level()) {
+
+      // If the buddy is of the same size as us, it is unsplintered.
+      assert(buddy->is_leader() == !is_leader(),
+             "Only one chunk can be leader in a pair");
+
+      // When direct buddies are neighbors, one or both should be in use, otherwise they should
+      // have been merged.
+
+      // But since we call this verification function from internal functions where we are about to merge or just did split,
+      // do not test this. We have RootChunkArea::verify_area_is_ideally_merged() for testing that.
+
+      // assert(buddy->is_in_use() || is_in_use(), "incomplete merging?");
+
+      if (is_leader()) {
+        assert(buddy->base() == end(), "Sanity");
+        assert(is_aligned(base(), word_size() * 2 * BytesPerWord), "Sanity");
+      } else {
+        assert(buddy->end() == base(), "Sanity");
+        assert(is_aligned(buddy->base(), word_size() * 2 * BytesPerWord), "Sanity");
+      }
+
+    } else {
+
+      // Buddy, but splintered, and this is a part of it.
+      if (is_leader()) {
+        assert(buddy->base() == end(), "Sanity");
+      } else {
+        assert(buddy->end() > (base() - word_size()), "Sanity");
+      }
+
+    }
   }
-  assert(is_valid_chunkorigin(get_origin()), "Chunk " PTR_FORMAT ": Invalid chunk origin.", p2i(this));
-  assert(bottom() <= _top && _top <= (MetaWord*)end(),
-         "Chunk " PTR_FORMAT ": Chunk top out of chunk bounds.", p2i(this));
+}
+
+volatile MetaWord dummy = 0;
+
+void Metachunk::verify(bool slow) const {
+
+  // Note. This should be called under CLD lock protection.
+
+  // We can verify everything except the _prev_in_vs/_next_in_vs pair.
+  // This is because neighbor chunks may be added concurrently, so we cannot rely
+  //  on the content of _next_in_vs/_prev_in_vs unless we have the expand lock.
+
+  assert(!is_dead(), "Do not call on dead chunks.");
+
+  // Note: only call this on a life Metachunk.
+  chklvl::check_valid_level(level());
+
+  assert(base() != NULL, "No base ptr");
+
+  assert(committed_words() >= used_words(),
+         "mismatch: committed: " SIZE_FORMAT ", used: " SIZE_FORMAT ".",
+         committed_words(), used_words());
+
+  assert(word_size() >= committed_words(),
+         "mismatch: word_size: " SIZE_FORMAT ", committed: " SIZE_FORMAT ".",
+         word_size(), committed_words());
+
+  // Test base pointer
+  assert(base() != NULL, "Base pointer NULL");
+  assert(vsnode() != NULL, "No space");
+  vsnode()->check_pointer(base());
+
+  // Starting address shall be aligned to chunk size.
+  const size_t required_alignment = word_size() * sizeof(MetaWord);
+  assert_is_aligned(base(), required_alignment);
 
-  // For non-humongous chunks, starting address shall be aligned
-  // to its chunk size. Humongous chunks start address is
-  // aligned to specialized chunk size.
-  const size_t required_alignment =
-    (chunk_type != HumongousIndex ? word_size() : get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class())) * sizeof(MetaWord);
-  assert(is_aligned((address)this, required_alignment),
-         "Chunk " PTR_FORMAT ": (size " SIZE_FORMAT ") not aligned to " SIZE_FORMAT ".",
-         p2i(this), word_size() * sizeof(MetaWord), required_alignment);
+  // If slow, test the committed area
+  if (slow && _committed_words > 0) {
+    for (const MetaWord* p = _base; p < _base + _committed_words; p += os::vm_page_size()) {
+      dummy = *p;
+    }
+    dummy = *(_base + _committed_words - 1);
+  }
+
+}
+#endif // ASSERT
+
+void Metachunk::print_on(outputStream* st) const {
+
+  // Note: must also work with invalid/random data.
+  st->print("Chunk @" PTR_FORMAT ", state %c, base " PTR_FORMAT ", "
+            "level " CHKLVL_FORMAT " (" SIZE_FORMAT " words), "
+            "used " SIZE_FORMAT " words, committed " SIZE_FORMAT " words.",
+            p2i(this), get_state_char(), p2i(base()), level(),
+            (chklvl::is_valid_level(level()) ? chklvl::word_size_for_level(level()) : 0),
+            used_words(), committed_words());
+
+}
+
+///////////////////////////////////7
+// MetachunkList
+
+#ifdef ASSERT
+
+bool MetachunkList::contains(const Metachunk* c) const {
+  for (Metachunk* c2 = first(); c2 != NULL; c2 = c2->next()) {
+    if (c == c2) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void MetachunkList::verify() const {
+  int num = 0;
+  const Metachunk* last_c = NULL;
+  for (const Metachunk* c = first(); c != NULL; c = c->next()) {
+    num ++;
+    assert(c->prev() == last_c,
+           "Broken link to predecessor. Chunk " METACHUNK_FULL_FORMAT ".",
+           METACHUNK_FULL_FORMAT_ARGS(c));
+    c->verify(false);
+    last_c = c;
+  }
+  _num.check(num);
 }
 
 #endif // ASSERT
 
-// Helper, returns a descriptive name for the given index.
-const char* chunk_size_name(ChunkIndex index) {
-  switch (index) {
-    case SpecializedIndex:
-      return "specialized";
-    case SmallIndex:
-      return "small";
-    case MediumIndex:
-      return "medium";
-    case HumongousIndex:
-      return "humongous";
-    default:
-      return "Invalid index";
+void MetachunkList::print_on(outputStream* st) const {
+
+  if (_num.get() > 0) {
+    for (const Metachunk* c = first(); c != NULL; c = c->next()) {
+      st->print(" - <");
+      c->print_on(st);
+      st->print(">");
+    }
+    st->print(" - total : %d chunks.", _num.get());
+  } else {
+    st->print("empty");
   }
+
+}
+
+///////////////////////////////////7
+// MetachunkListCluster
+
+#ifdef ASSERT
+
+bool MetachunkListCluster::contains(const Metachunk* c) const {
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    if (list_for_level(l)->contains(c)) {
+      return true;
+    }
+  }
+  return false;
 }
 
-#ifdef ASSERT
-void do_verify_chunk(Metachunk* chunk) {
-  guarantee(chunk != NULL, "Sanity");
-  // Verify chunk itself; then verify that it is consistent with the
-  // occupany map of its containing node.
-  chunk->verify();
-  VirtualSpaceNode* const vsn = chunk->container();
-  OccupancyMap* const ocmap = vsn->occupancy_map();
-  ocmap->verify_for_chunk(chunk);
+void MetachunkListCluster::verify(bool slow) const {
+
+  int num = 0; size_t word_size = 0;
+
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+
+    // Check, for each chunk in this list, exclusivity.
+    for (const Metachunk* c = first_at_level(l); c != NULL; c = c->next()) {
+      assert(c->level() == l, "Chunk in wrong list.");
+    }
+
+    // Check each list.
+    list_for_level(l)->verify();
+
+    num += list_for_level(l)->size();
+    word_size += list_for_level(l)->size() * chklvl::word_size_for_level(l);
+  }
+  _total_num_chunks.check(num);
+  _total_word_size.check(word_size);
+
 }
-#endif
+#endif // ASSERT
+
+void MetachunkListCluster::print_on(outputStream* st) const {
 
-void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
-  chunk->set_is_tagged_free(!inuse);
-  OccupancyMap* const ocmap = chunk->container()->occupancy_map();
-  ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    st->print("-- List[" CHKLVL_FORMAT "]: ", l);
+    list_for_level(l)->print_on(st);
+    st->cr();
+  }
+  st->print_cr("total chunks: %d, total word size: " SIZE_FORMAT ".", _total_num_chunks.get(), _total_word_size.get());
+
 }
 
 } // namespace metaspace
--- a/src/hotspot/share/memory/metaspace/metachunk.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metachunk.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,12 +25,14 @@
 #ifndef SHARE_MEMORY_METASPACE_METACHUNK_HPP
 #define SHARE_MEMORY_METASPACE_METACHUNK_HPP
 
-#include "memory/metaspace/metabase.hpp"
-#include "memory/metaspace/metaspaceCommon.hpp"
+
+#include "memory/metaspace/counter.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-class MetachunkTest;
+
+class outputStream;
 
 namespace metaspace {
 
@@ -39,135 +42,379 @@
 //    Metachunks are reused (when freed are put on a global freelist) and
 //    have no permanent association to a SpaceManager.
 
-//            +--------------+ <- end    --+       --+
-//            |              |             |         |
-//            |              |             | free    |
-//            |              |             |         |
-//            |              |             |         | size | capacity
-//            |              |             |         |
-//            |              | <- top   -- +         |
-//            |              |             |         |
-//            |              |             | used    |
-//            |              |             |         |
-//            |              |             |         |
-//            +--------------+ <- bottom --+       --+
+//            +--------------+ <- end    ----+         --+
+//            |              |               |           |
+//            |              |               | free      |
+//            |              |               |
+//            |              |               |           | size (aka capacity)
+//            |              |               |           |
+//            | -----------  | <- top     -- +           |
+//            |              |               |           |
+//            |              |               | used      |
+//            +--------------+ <- start   -- +        -- +
+
+// Note: this is a chunk **descriptor**. The real Payload area lives in metaspace,
+// this class lives somewhere else.
+class Metachunk {
+
+  // start of chunk memory; NULL if dead.
+  MetaWord* _base;
+
+  // Used words.
+  size_t _used_words;
+
+  // Size of the region, starting from base, which is guaranteed to be committed. In words.
+  //  The actual size of committed regions may be larger, but it may be fragmented.
+  //
+  //  (This is a performance optimization. The underlying VirtualSpaceNode knows
+  //  which granules are committed; but we want to avoid asking it unnecessarily
+  //  in Metachunk::allocate().)
+  size_t _committed_words;
+
+  chklvl_t _level; // aka size.
+
+  // state_free:    free, owned by ChunkManager
+  // state_in_use:  in-use, owned by SpaceManager
+  // dead:          just a hollow chunk header without associated memory, owned
+  //                 by chunk header pool.
+  enum state_t {
+    state_free = 0,
+    state_in_use = 1,
+    state_dead = 2
+  };
+  state_t _state;
+
+  // We need unfortunately a back link to the virtual space node
+  // for splitting and merging nodes.
+  VirtualSpaceNode* _vsnode;
+
 
-enum ChunkOrigin {
-  // Chunk normally born (via take_from_committed)
-  origin_normal = 1,
-  // Chunk was born as padding chunk
-  origin_pad = 2,
-  // Chunk was born as leftover chunk in VirtualSpaceNode::retire
-  origin_leftover = 3,
-  // Chunk was born as result of a merge of smaller chunks
-  origin_merge = 4,
-  // Chunk was born as result of a split of a larger chunk
-  origin_split = 5,
+  // A chunk header is kept in a list:
+  // - in the list of used chunks inside a SpaceManager, if it is in use
+  // - in the list of free chunks inside a ChunkManager, if it is free
+  // - in the freelist of unused headers inside the ChunkHeaderPool,
+  //   if it is unused (e.g. result of chunk merging) and has no associated
+  //   memory area.
+  Metachunk* _prev;
+  Metachunk* _next;
 
-  origin_minimum = origin_normal,
-  origin_maximum = origin_split,
-  origins_count = origin_maximum + 1
-};
+  // Furthermore, we keep, per chunk, information about the neighboring chunks.
+  // This is needed to split and merge chunks.
+  //
+  // Note: These members can be modified concurrently while a chunk is alive and in use.
+  // This can happen if a neighboring chunk is added or removed.
+  // This means only read or modify these members under expand lock protection.
+  Metachunk* _prev_in_vs;
+  Metachunk* _next_in_vs;
+
+  MetaWord* top() const           { return base() + _used_words; }
+
+  // Commit uncommitted section of the chunk.
+  // Fails if we hit a commit limit.
+  bool commit_up_to(size_t new_committed_words);
+
+  DEBUG_ONLY(static void assert_have_expand_lock();)
 
-inline bool is_valid_chunkorigin(ChunkOrigin origin) {
-  return origin == origin_normal ||
-    origin == origin_pad ||
-    origin == origin_leftover ||
-    origin == origin_merge ||
-    origin == origin_split;
-}
+public:
+
+  Metachunk()
+    : _base(NULL),
+      _used_words(0),
+      _committed_words(0),
+      _level(chklvl::ROOT_CHUNK_LEVEL),
+      _state(state_free),
+      _vsnode(NULL),
+      _prev(NULL), _next(NULL),
+      _prev_in_vs(NULL), _next_in_vs(NULL)
+  {}
 
-class Metachunk : public Metabase<Metachunk> {
+  size_t word_size() const        { return chklvl::word_size_for_level(_level); }
 
-  friend class ::MetachunkTest;
+  MetaWord* base() const          { return _base; }
+//  void set_base(MetaWord* p)      { _base = p; }
+  MetaWord* end() const           { return base() + word_size(); }
 
-  // The VirtualSpaceNode containing this chunk.
-  VirtualSpaceNode* const _container;
+  // Chunk list wiring
+  void set_prev(Metachunk* c)     { _prev = c; }
+  Metachunk* prev() const         { return _prev; }
+  void set_next(Metachunk* c)     { _next = c; }
+  Metachunk* next() const         { return _next; }
 
-  // Current allocation top.
-  MetaWord* _top;
+  DEBUG_ONLY(bool in_list() const { return _prev != NULL || _next != NULL; })
 
-  // A 32bit sentinel for debugging purposes.
-  enum { CHUNK_SENTINEL = 0x4d4554EF,  // "MET"
-         CHUNK_SENTINEL_INVALID = 0xFEEEEEEF
-  };
+  // Physical neighbors wiring
+  void set_prev_in_vs(Metachunk* c) { DEBUG_ONLY(assert_have_expand_lock()); _prev_in_vs = c; }
+  Metachunk* prev_in_vs() const     { DEBUG_ONLY(assert_have_expand_lock()); return _prev_in_vs; }
+  void set_next_in_vs(Metachunk* c) { DEBUG_ONLY(assert_have_expand_lock()); _next_in_vs = c; }
+  Metachunk* next_in_vs() const     { DEBUG_ONLY(assert_have_expand_lock()); return _next_in_vs; }
 
-  uint32_t _sentinel;
+  bool is_free() const            { return _state == state_free; }
+  bool is_in_use() const          { return _state == state_in_use; }
+  bool is_dead() const            { return _state == state_dead; }
+  void set_free()                 { _state = state_free; }
+  void set_in_use()               { _state = state_in_use; }
+  void set_dead()                 { _state = state_dead; }
 
-  const ChunkIndex _chunk_type;
-  const bool _is_class;
-  // Whether the chunk is free (in freelist) or in use by some class loader.
-  bool _is_tagged_free;
+  // Return a single char presentation of the state ('f', 'u', 'd')
+  char get_state_char() const;
+
+  void inc_level()                { _level ++; DEBUG_ONLY(chklvl::is_valid_level(_level);) }
+  void dec_level()                { _level --; DEBUG_ONLY(chklvl::is_valid_level(_level);) }
+//  void set_level(chklvl_t v)      { _level = v; DEBUG_ONLY(chklvl::is_valid_level(_level);) }
+  chklvl_t level() const          { return _level; }
 
-  ChunkOrigin _origin;
-  int _use_count;
+  // Convenience functions for extreme levels.
+  bool is_root_chunk() const      { return chklvl::ROOT_CHUNK_LEVEL == _level; }
+  bool is_leaf_chunk() const      { return chklvl::HIGHEST_CHUNK_LEVEL == _level; }
 
-  MetaWord* initial_top() const { return (MetaWord*)this + overhead(); }
-  MetaWord* top() const         { return _top; }
+  VirtualSpaceNode* vsnode() const        { return _vsnode; }
+//  void set_vsnode(VirtualSpaceNode* n)    { _vsnode = n; }
+
+  size_t used_words() const                   { return _used_words; }
+  size_t free_words() const                   { return word_size() - used_words(); }
+  size_t free_below_committed_words() const   { return committed_words() - used_words(); }
+  void reset_used_words()                     { _used_words = 0; }
 
- public:
-  // Metachunks are allocated out of a MetadataVirtualSpace and
-  // and use some of its space to describe itself (plus alignment
-  // considerations).  Metadata is allocated in the rest of the chunk.
-  // This size is the overhead of maintaining the Metachunk within
-  // the space.
+  size_t committed_words() const      { return _committed_words; }
+  void set_committed_words(size_t v);
+  bool is_fully_committed() const     { return committed_words() == word_size(); }
+  bool is_fully_uncommitted() const   { return committed_words() == 0; }
+
+  // Ensure that chunk is committed up to at least new_committed_words words.
+  // Fails if we hit a commit limit.
+  bool ensure_committed(size_t new_committed_words);
+  bool ensure_committed_locked(size_t new_committed_words);
+
+  bool ensure_fully_committed()           { return ensure_committed(word_size()); }
+  bool ensure_fully_committed_locked()    { return ensure_committed_locked(word_size()); }
 
-  // Alignment of each allocation in the chunks.
-  static size_t object_alignment();
+  // Uncommit chunk area. The area must be a common multiple of the
+  // commit granule size (in other words, we cannot uncommit chunks smaller than
+  // a commit granule size).
+  void uncommit();
+  void uncommit_locked();
 
-  // Size of the Metachunk header, in words, including alignment.
-  static size_t overhead();
+  // Alignment of an allocation.
+  static const size_t allocation_alignment_bytes = 8;
+  static const size_t allocation_alignment_words = allocation_alignment_bytes / BytesPerWord;
+
+  // Allocation from a chunk
 
-  Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size, VirtualSpaceNode* container);
-
-  MetaWord* allocate(size_t word_size);
+  // Allocate word_size words from this chunk (word_size must be aligned to
+  //  allocation_alignment_words).
+  //
+  // May cause memory to be committed. That may fail if we hit a commit limit. In that case,
+  //  NULL is returned and p_did_hit_commit_limit will be set to true.
+  // If the remainder portion of the chunk was too small to hold the allocation,
+  //  NULL is returned and p_did_hit_commit_limit will be set to false.
+  MetaWord* allocate(size_t net_word_size, bool* p_did_hit_commit_limit);
 
-  VirtualSpaceNode* container() const { return _container; }
-
-  MetaWord* bottom() const { return (MetaWord*) this; }
+  // Given a memory range which may or may not have been allocated from this chunk, attempt
+  // to roll its allocation back. This can work if this is the very last allocation we did
+  // from this chunk, in which case we just lower the top pointer again.
+  // Returns true if this succeeded, false if it failed.
+  bool attempt_rollback_allocation(MetaWord* p, size_t word_size);
 
-  // Reset top to bottom so chunk can be reused.
-  void reset_empty() { _top = initial_top(); clear_next(); clear_prev(); }
-  bool is_empty() { return _top == initial_top(); }
+  // Initialize structure for reuse.
+  void initialize(VirtualSpaceNode* node, MetaWord* base, chklvl_t lvl) {
+    _vsnode = node; _base = base; _level = lvl;
+    _used_words = _committed_words = 0; _state = state_free;
+    _next = _prev = _next_in_vs = _prev_in_vs = NULL;
+  }
+
+  // Returns true if this chunk is the leader in its buddy pair, false if not.
+  // Must not be called for root chunks.
+  bool is_leader() const {
+    assert(!is_root_chunk(), "Root chunks have no buddy.");
+    // I am sure this can be done smarter...
+    return is_aligned(base(), chklvl::word_size_for_level(level() - 1) * BytesPerWord);
+  }
 
-  // used (has been allocated)
-  // free (available for future allocations)
-  size_t word_size() const { return size(); }
-  size_t used_word_size() const;
-  size_t free_word_size() const;
+  //// Debug stuff ////
+#ifdef ASSERT
+  void verify(bool slow) const;
+  // Verifies linking with neighbors in virtual space. Needs expand lock protection.
+  void verify_neighborhood() const;
+  void zap_header(uint8_t c = 0x17);
+  void fill_with_pattern(MetaWord pattern, size_t word_size);
+  void check_pattern(MetaWord pattern, size_t word_size);
 
-  bool is_tagged_free() { return _is_tagged_free; }
-  void set_is_tagged_free(bool v) { _is_tagged_free = v; }
-
-  bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; }
+  // Returns true if pointer points into the used area of this chunk.
+  bool is_valid_pointer(const MetaWord* p) const {
+    return base() <= p && p < top();
+  }
+#endif // ASSERT
 
   void print_on(outputStream* st) const;
 
-  bool is_valid_sentinel() const        { return _sentinel == CHUNK_SENTINEL; }
-  void remove_sentinel()                { _sentinel = CHUNK_SENTINEL_INVALID; }
+};
+
+// Little print helpers: since we often print out chunks, here some convenience macros
+#define METACHUNK_FORMAT                "@" PTR_FORMAT ", %c, base " PTR_FORMAT ", level " CHKLVL_FORMAT
+#define METACHUNK_FORMAT_ARGS(chunk)    p2i(chunk), chunk->get_state_char(), p2i(chunk->base()), chunk->level()
+
+#define METACHUNK_FULL_FORMAT                "@" PTR_FORMAT ", %c, base " PTR_FORMAT ", level " CHKLVL_FORMAT " (" SIZE_FORMAT "), used: " SIZE_FORMAT ", committed: " SIZE_FORMAT
+#define METACHUNK_FULL_FORMAT_ARGS(chunk)    p2i(chunk), chunk->get_state_char(), p2i(chunk->base()), chunk->level(), chunk->word_size(), chunk->used_words(), chunk->committed_words()
+
+/////////
+// A list of Metachunks.
+class MetachunkList {
+
+  Metachunk* _first;
+
+  // Number of chunks
+  IntCounter _num;
+
+public:
+
+  MetachunkList() : _first(NULL), _num() {}
+
+  Metachunk* first() const { return _first; }
+  int size() const { return _num.get(); }
+
+  void add(Metachunk* c) {
+    assert(!c->in_list(), "Chunk must not be in a list");
+    if (_first) {
+      _first->set_prev(c);
+    }
+    c->set_next(_first);
+    c->set_prev(NULL);
+    _first = c;
+    _num.increment();
+  }
 
-  int get_use_count() const             { return _use_count; }
-  void inc_use_count()                  { _use_count ++; }
+  // Remove first node unless empty. Returns node or NULL.
+  Metachunk* remove_first() {
+    Metachunk* c = _first;
+    if (c != NULL) {
+      assert(c->prev() == NULL, "Sanity");
+      Metachunk* c2 = c->next();
+      if (c2 != NULL) {
+        c2->set_prev(NULL);
+      }
+      _first = c2;
+      c->set_next(NULL);
+      _num.decrement();
+    }
+    return c;
+  }
+
+  // Remove given chunk from list. List must contain that chunk.
+  void remove(Metachunk* c) {
+    assert(contains(c), "List does not contain this chunk");
+    if (_first == c) {
+      _first = c->next();
+      if (_first != NULL) {
+        _first->set_prev(NULL);
+      }
+    } else {
+      if (c->next() != NULL) {
+        c->next()->set_prev(c->prev());
+      }
+      if (c->prev() != NULL) {
+        c->prev()->set_next(c->next());
+      }
+    }
+    c->set_prev(NULL);
+    c->set_next(NULL);
+    _num.decrement();
+  }
+
+#ifdef ASSERT
+  bool contains(const Metachunk* c) const;
+  void verify() const;
+#endif
 
-  ChunkOrigin get_origin() const        { return _origin; }
-  void set_origin(ChunkOrigin orig)     { _origin = orig; }
+  // Returns size, in words, of committed space of all chunks in this list.
+  // Note: walks list.
+  size_t committed_word_size() const {
+    size_t l = 0;
+    for (const Metachunk* c = _first; c != NULL; c = c->next()) {
+      l += c->committed_words();
+    }
+    return l;
+  }
+
+  void print_on(outputStream* st) const;
+
+};
+
+//////////////////
+// A cluster of Metachunk Lists, one for each chunk level, together with associated counters.
+class MetachunkListCluster {
+
+  MetachunkList _lists[chklvl::NUM_CHUNK_LEVELS];
+  SizeCounter   _total_word_size;
+  IntCounter    _total_num_chunks;
+
+  const MetachunkList* list_for_level(chklvl_t lvl) const         { DEBUG_ONLY(chklvl::check_valid_level(lvl)); return _lists + lvl; }
+  MetachunkList* list_for_level(chklvl_t lvl)                     { DEBUG_ONLY(chklvl::check_valid_level(lvl)); return _lists + lvl; }
+
+  const MetachunkList* list_for_chunk(const Metachunk* c) const   { return list_for_level(c->level()); }
+  MetachunkList* list_for_chunk(const Metachunk* c)               { return list_for_level(c->level()); }
+
+public:
+
+  const Metachunk* first_at_level(chklvl_t lvl) const   { return list_for_level(lvl)->first(); }
+  Metachunk* first_at_level(chklvl_t lvl)               { return list_for_level(lvl)->first(); }
+
+  // Remove given chunk from its list. List must contain that chunk.
+  void remove(Metachunk* c) {
+    list_for_chunk(c)->remove(c);
+    _total_word_size.decrement_by(c->word_size());
+    _total_num_chunks.decrement();
+  }
 
-  ChunkIndex get_chunk_type() const     { return _chunk_type; }
-  bool is_class() const                 { return _is_class; }
+  // Remove first node unless empty. Returns node or NULL.
+  Metachunk* remove_first(chklvl_t lvl) {
+    Metachunk* c = list_for_level(lvl)->remove_first();
+    if (c != NULL) {
+      _total_word_size.decrement_by(c->word_size());
+      _total_num_chunks.decrement();
+    }
+    return c;
+  }
+
+  void add(Metachunk* c) {
+    list_for_chunk(c)->add(c);
+    _total_word_size.increment_by(c->word_size());
+    _total_num_chunks.increment();
+  }
+
+  // Returns number of chunks for a given level.
+  int num_chunks_at_level(chklvl_t lvl) const {
+    return list_for_level(lvl)->size();
+  }
 
-  DEBUG_ONLY(void mangle(juint word_value);)
-  DEBUG_ONLY(void verify() const;)
+  // Returns number of chunks for a given level.
+  size_t committed_word_size_at_level(chklvl_t lvl) const {
+    return list_for_level(lvl)->committed_word_size();
+  }
+
+  // Returs word size, in total, of all chunks in all lists.
+  size_t total_word_size() const          { return _total_word_size.get(); }
+
+  // Returns number of chunks in total
+  int total_num_chunks() const            { return _total_num_chunks.get(); }
+
+  // Returns size, in words, of committed space of all chunks in all list.
+  // Note: walks lists.
+  size_t total_committed_word_size() const {
+    size_t sum = 0;
+    for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+      sum += list_for_level(l)->committed_word_size();
+    }
+    return sum;
+  }
+
+  DEBUG_ONLY(void verify(bool slow) const;)
+  DEBUG_ONLY(bool contains(const Metachunk* c) const;)
+
+  void print_on(outputStream* st) const;
 
 };
 
 
-// Helper function that does a bunch of checks for a chunk.
-DEBUG_ONLY(void do_verify_chunk(Metachunk* chunk);)
-
-// Given a Metachunk, update its in-use information (both in the
-// chunk and the occupancy map).
-void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
-
 } // namespace metaspace
 
 #endif // SHARE_MEMORY_METASPACE_METACHUNK_HPP
--- a/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -32,8 +33,6 @@
 
 namespace metaspace {
 
-DEBUG_ONLY(internal_statistics_t g_internal_statistics;)
-
 // Print a size, in words, scaled.
 void print_scaled_words(outputStream* st, size_t word_size, size_t scale, int width) {
   print_human_readable_size(st, word_size * sizeof(MetaWord), scale, width);
@@ -47,6 +46,19 @@
   st->print(")");
 }
 
+static const char* display_unit_for_scale(size_t scale) {
+  const char* s = NULL;
+  switch(scale) {
+    case 1: s = "bytes"; break;
+    case BytesPerWord: s = "words"; break;
+    case K: s = "KB"; break;
+    case M: s = "MB"; break;
+    case G: s = "GB"; break;
+    default:
+      ShouldNotReachHere();
+  }
+  return s;
+}
 
 // Print a human readable size.
 // byte_size: size, in bytes, to be printed.
@@ -74,36 +86,45 @@
   }
 
 #ifdef ASSERT
-  assert(scale == 1 || scale == BytesPerWord || scale == K || scale == M || scale == G, "Invalid scale");
+  assert(scale == 1 || scale == BytesPerWord ||
+         scale == K || scale == M || scale == G, "Invalid scale");
   // Special case: printing wordsize should only be done with word-sized values
   if (scale == BytesPerWord) {
     assert(byte_size % BytesPerWord == 0, "not word sized");
   }
 #endif
 
-  if (scale == 1) {
-    st->print("%*" PRIuPTR " bytes", width, byte_size);
-  } else if (scale == BytesPerWord) {
-    st->print("%*" PRIuPTR " words", width, byte_size / BytesPerWord);
+  if (width == -1) {
+    if (scale == 1) {
+      st->print(SIZE_FORMAT " bytes", byte_size);
+    } else if (scale == BytesPerWord) {
+      st->print(SIZE_FORMAT " words", byte_size / BytesPerWord);
+    } else {
+      const char* display_unit = display_unit_for_scale(scale);
+      float display_value = (float) byte_size / scale;
+      // Prevent very small but non-null values showing up as 0.00.
+      if (byte_size > 0 && display_value < 0.01f) {
+        st->print("<0.01 %s", display_unit);
+      } else {
+        st->print("%.2f %s", display_value, display_unit);
+      }
+    }
   } else {
-    const char* display_unit = "";
-    switch(scale) {
-      case 1: display_unit = "bytes"; break;
-      case BytesPerWord: display_unit = "words"; break;
-      case K: display_unit = "KB"; break;
-      case M: display_unit = "MB"; break;
-      case G: display_unit = "GB"; break;
-      default:
-        ShouldNotReachHere();
-    }
-    float display_value = (float) byte_size / scale;
-    // Since we use width to display a number with two trailing digits, increase it a bit.
-    width += 3;
-    // Prevent very small but non-null values showing up as 0.00.
-    if (byte_size > 0 && display_value < 0.01f) {
-      st->print("%*s %s", width, "<0.01", display_unit);
+    if (scale == 1) {
+      st->print("%*" PRIuPTR " bytes", width, byte_size);
+    } else if (scale == BytesPerWord) {
+      st->print("%*" PRIuPTR " words", width, byte_size / BytesPerWord);
     } else {
-      st->print("%*.2f %s", width, display_value, display_unit);
+      const char* display_unit = display_unit_for_scale(scale);
+      float display_value = (float) byte_size / scale;
+      // Since we use width to display a number with two trailing digits, increase it a bit.
+      width += 3;
+      // Prevent very small but non-null values showing up as 0.00.
+      if (byte_size > 0 && display_value < 0.01f) {
+        st->print("%*s %s", width, "<0.01", display_unit);
+      } else {
+        st->print("%*.2f %s", width, display_value, display_unit);
+      }
     }
   }
 }
@@ -130,70 +151,6 @@
   }
 }
 
-// Returns size of this chunk type.
-size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
-  assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
-  size_t size = 0;
-  if (is_class) {
-    switch(chunktype) {
-      case SpecializedIndex: size = ClassSpecializedChunk; break;
-      case SmallIndex: size = ClassSmallChunk; break;
-      case MediumIndex: size = ClassMediumChunk; break;
-      default:
-        ShouldNotReachHere();
-    }
-  } else {
-    switch(chunktype) {
-      case SpecializedIndex: size = SpecializedChunk; break;
-      case SmallIndex: size = SmallChunk; break;
-      case MediumIndex: size = MediumChunk; break;
-      default:
-        ShouldNotReachHere();
-    }
-  }
-  return size;
-}
-
-ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
-  if (is_class) {
-    if (size == ClassSpecializedChunk) {
-      return SpecializedIndex;
-    } else if (size == ClassSmallChunk) {
-      return SmallIndex;
-    } else if (size == ClassMediumChunk) {
-      return MediumIndex;
-    } else if (size > ClassMediumChunk) {
-      // A valid humongous chunk size is a multiple of the smallest chunk size.
-      assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
-      return HumongousIndex;
-    }
-  } else {
-    if (size == SpecializedChunk) {
-      return SpecializedIndex;
-    } else if (size == SmallChunk) {
-      return SmallIndex;
-    } else if (size == MediumChunk) {
-      return MediumIndex;
-    } else if (size > MediumChunk) {
-      // A valid humongous chunk size is a multiple of the smallest chunk size.
-      assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
-      return HumongousIndex;
-    }
-  }
-  ShouldNotReachHere();
-  return (ChunkIndex)-1;
-}
-
-ChunkIndex next_chunk_index(ChunkIndex i) {
-  assert(i < NumberOfInUseLists, "Out of bound");
-  return (ChunkIndex) (i+1);
-}
-
-ChunkIndex prev_chunk_index(ChunkIndex i) {
-  assert(i > ZeroIndex, "Out of bound");
-  return (ChunkIndex) (i-1);
-}
-
 const char* loaders_plural(uintx num) {
   return num == 1 ? "loader" : "loaders";
 }
--- a/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -33,14 +34,6 @@
 
 namespace metaspace {
 
-enum ChunkSizes {    // in words.
-  ClassSpecializedChunk = 128,
-  SpecializedChunk = 128,
-  ClassSmallChunk = 256,
-  SmallChunk = 512,
-  ClassMediumChunk = 4 * K,
-  MediumChunk = 8 * K
-};
 
 // Print a size, in words, scaled.
 void print_scaled_words(outputStream* st, size_t word_size, size_t scale = 0, int width = -1);
@@ -65,80 +58,6 @@
          SIZE_FORMAT_HEX " is not aligned to "               \
          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 
-// Internal statistics.
-#ifdef ASSERT
-struct  internal_statistics_t {
-  // Number of allocations.
-  uintx num_allocs;
-  // Number of times a ClassLoaderMetaspace was born...
-  uintx num_metaspace_births;
-  // ... and died.
-  uintx num_metaspace_deaths;
-  // Number of times VirtualSpaceListNodes were created...
-  uintx num_vsnodes_created;
-  // ... and purged.
-  uintx num_vsnodes_purged;
-  // Number of times we expanded the committed section of the space.
-  uintx num_committed_space_expanded;
-  // Number of deallocations
-  uintx num_deallocs;
-  // Number of deallocations triggered from outside ("real" deallocations).
-  uintx num_external_deallocs;
-  // Number of times an allocation was satisfied from deallocated blocks.
-  uintx num_allocs_from_deallocated_blocks;
-  // Number of times a chunk was added to the freelist
-  uintx num_chunks_added_to_freelist;
-  // Number of times a chunk was removed from the freelist
-  uintx num_chunks_removed_from_freelist;
-  // Number of chunk merges
-  uintx num_chunk_merges;
-  // Number of chunk splits
-  uintx num_chunk_splits;
-};
-extern internal_statistics_t g_internal_statistics;
-#endif
-
-// ChunkIndex defines the type of chunk.
-// Chunk types differ by size: specialized < small < medium, chunks
-// larger than medium are humongous chunks of varying size.
-enum ChunkIndex {
-  ZeroIndex = 0,
-  SpecializedIndex = ZeroIndex,
-  SmallIndex = SpecializedIndex + 1,
-  MediumIndex = SmallIndex + 1,
-  HumongousIndex = MediumIndex + 1,
-  NumberOfFreeLists = 3,
-  NumberOfInUseLists = 4
-};
-
-// Utility functions.
-size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunk_type, bool is_class);
-ChunkIndex get_chunk_type_by_size(size_t size, bool is_class);
-
-ChunkIndex next_chunk_index(ChunkIndex i);
-ChunkIndex prev_chunk_index(ChunkIndex i);
-// Returns a descriptive name for a chunk type.
-const char* chunk_size_name(ChunkIndex index);
-
-// Verify chunk sizes.
-inline bool is_valid_chunksize(bool is_class, size_t size) {
-  const size_t reasonable_maximum_humongous_chunk_size = 1 * G;
-  return is_aligned(size, sizeof(MetaWord)) &&
-         size < reasonable_maximum_humongous_chunk_size &&
-         is_class ?
-             (size == ClassSpecializedChunk || size == ClassSmallChunk || size >= ClassMediumChunk) :
-             (size == SpecializedChunk || size == SmallChunk || size >= MediumChunk);
-}
-
-// Verify chunk type.
-inline bool is_valid_chunktype(ChunkIndex index) {
-  return index == SpecializedIndex || index == SmallIndex ||
-         index == MediumIndex || index == HumongousIndex;
-}
-
-inline bool is_valid_nonhumongous_chunktype(ChunkIndex index) {
-  return is_valid_chunktype(index) && index != HumongousIndex;
-}
 
 // Pretty printing helpers
 const char* classes_plural(uintx num);
--- a/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  *
  */
 #include "precompiled.hpp"
-#include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceReport.hpp"
 #include "memory/metaspace/metaspaceDCmd.hpp"
 #include "memory/resourceArea.hpp"
 #include "services/diagnosticCommand.hpp"
@@ -89,12 +89,12 @@
   } else {
     // Full mode. Requires safepoint.
     int flags = 0;
-    if (_show_loaders.value())         flags |= MetaspaceUtils::rf_show_loaders;
-    if (_show_classes.value())         flags |= MetaspaceUtils::rf_show_classes;
-    if (_by_chunktype.value())         flags |= MetaspaceUtils::rf_break_down_by_chunktype;
-    if (_by_spacetype.value())         flags |= MetaspaceUtils::rf_break_down_by_spacetype;
-    if (_show_vslist.value())          flags |= MetaspaceUtils::rf_show_vslist;
-    if (_show_vsmap.value())           flags |= MetaspaceUtils::rf_show_vsmap;
+    if (_show_loaders.value())         flags |= MetaspaceReporter::rf_show_loaders;
+    if (_show_classes.value())         flags |= MetaspaceReporter::rf_show_classes;
+    if (_by_chunktype.value())         flags |= MetaspaceReporter::rf_break_down_by_chunktype;
+    if (_by_spacetype.value())         flags |= MetaspaceReporter::rf_break_down_by_spacetype;
+    if (_show_vslist.value())          flags |= MetaspaceReporter::rf_show_vslist;
+    if (_show_vsmap.value())           flags |= MetaspaceReporter::rf_show_vsmap;
     VM_PrintMetadata op(output(), scale, flags);
     VMThread::execute(&op);
   }
--- a/src/hotspot/share/memory/metaspace/metaspaceDCmd.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceDCmd.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceEnums.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
+#include "utilities/debug.hpp"
+
+namespace metaspace {
+
+const char* describe_spacetype(MetaspaceType st) {
+  const char* s = NULL;
+  switch (st) {
+    case StandardMetaspaceType: s = "Standard"; break;
+    case BootMetaspaceType: s = "Boot"; break;
+    case UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break;
+    case ReflectionMetaspaceType: s = "Reflection"; break;
+    default: ShouldNotReachHere();
+  }
+  return s;
+}
+
+const char* describe_mdtype(MetadataType md) {
+  const char* s = NULL;
+  switch (md) {
+    case NonClassType: s = "nonclass"; break;
+    case ClassType: s = "class"; break;
+    default: ShouldNotReachHere();
+  }
+  return s;
+}
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceEnums.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_MEMORY_METASPACEENUMS_HPP
+#define SHARE_MEMORY_METASPACEENUMS_HPP
+
+#include "utilities/debug.hpp"
+
+// MetadataType and MetaspaceType, as well as some convenience functions surrounding them.
+namespace metaspace {
+
+///////////////////////
+
+enum MetadataType {
+  ClassType,
+  NonClassType,
+  MetadataTypeCount
+};
+
+inline bool is_class(MetadataType md) { return md == ClassType; }
+
+inline MetadataType mdtype_from_bool(bool is_class) { return is_class ? ClassType : NonClassType; }
+
+const char* describe_mdtype(MetadataType md);
+
+#ifdef ASSERT
+inline bool is_valid_mdtype(MetadataType md) {
+  return (int)md >= 0 && (int)md < MetadataTypeCount;
+}
+inline void check_valid_mdtype(MetadataType md) {
+  assert(is_valid_mdtype(md), "Wrong value for MetadataType: %d", (int) md);
+}
+#endif // ASSERT
+
+///////////////////////
+
+enum MetaspaceType {
+  ZeroMetaspaceType = 0,
+  StandardMetaspaceType = ZeroMetaspaceType,
+  BootMetaspaceType = StandardMetaspaceType + 1,
+  UnsafeAnonymousMetaspaceType = BootMetaspaceType + 1,
+  ReflectionMetaspaceType = UnsafeAnonymousMetaspaceType + 1,
+  MetaspaceTypeCount
+};
+
+const char* describe_spacetype(MetaspaceType st);
+
+#ifdef ASSERT
+inline bool is_valid_spacetype(MetaspaceType st) {
+  return (int)st >= 0 && (int)st < MetaspaceTypeCount;
+}
+inline void check_valid_spacetype(MetaspaceType st) {
+  assert(is_valid_spacetype(st), "Wrong value for MetaspaceType: %d", (int) st);
+}
+#endif
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACEENUMS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceReport.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2018, 2019 SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "classfile/classLoaderDataGraph.hpp"
+#include "memory/metaspace/chunkHeaderPool.hpp"
+#include "memory/metaspace/chunkManager.hpp"
+#include "memory/metaspace/internStat.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
+#include "memory/metaspace/metaspaceReport.hpp"
+#include "memory/metaspace/metaspaceStatistics.hpp"
+#include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
+#include "memory/metaspace/runningCounters.hpp"
+#include "memory/metaspace/virtualSpaceList.hpp"
+#include "memory/metaspace.hpp"
+#include "runtime/os.hpp"
+
+namespace metaspace {
+
+static void print_vs(outputStream* out, size_t scale) {
+
+  const size_t reserved_nc = RunningCounters::reserved_words_nonclass();
+  const size_t committed_nc = RunningCounters::committed_words_nonclass();
+  const int num_nodes_nc = VirtualSpaceList::vslist_nonclass()->num_nodes();
+  const size_t reserved_c = RunningCounters::reserved_words_class();
+  const size_t committed_c = RunningCounters::committed_words_class();
+  const int num_nodes_c = VirtualSpaceList::vslist_class()->num_nodes();
+
+  if (Metaspace::using_class_space()) {
+
+    out->print("  Non-class space:  ");
+    print_scaled_words(out, reserved_nc, scale, 7);
+    out->print(" reserved, ");
+    print_scaled_words_and_percentage(out, committed_nc, reserved_nc, scale, 7);
+    out->print(" committed, ");
+    out->print(" %d nodes.", num_nodes_nc);
+    out->cr();
+    out->print("      Class space:  ");
+    print_scaled_words(out, reserved_c, scale, 7);
+    out->print(" reserved, ");
+    print_scaled_words_and_percentage(out, committed_c, reserved_c, scale, 7);
+    out->print(" committed, ");
+    out->print(" %d nodes.", num_nodes_c);
+    out->cr();
+    out->print("              Both:  ");
+    print_scaled_words(out, reserved_c + reserved_nc, scale, 7);
+    out->print(" reserved, ");
+    print_scaled_words_and_percentage(out, committed_c + committed_nc, reserved_c + reserved_nc, scale, 7);
+    out->print(" committed. ");
+    out->cr();
+
+  } else {
+    assert(committed_c == 0 && reserved_c == 0 && num_nodes_c == 0, "Sanity");
+    print_scaled_words(out, reserved_nc, scale, 7);
+    out->print(" reserved, ");
+    print_scaled_words_and_percentage(out, committed_nc, reserved_nc, scale, 7);
+    out->print(" committed, ");
+    out->print(" %d nodes.", num_nodes_nc);
+    out->cr();
+  }
+}
+
+static void print_settings(outputStream* out, size_t scale) {
+  out->print("MaxMetaspaceSize: ");
+  if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) {
+    // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real
+    // value is smaller.
+    out->print("unlimited");
+  } else {
+    print_human_readable_size(out, MaxMetaspaceSize, scale);
+  }
+  out->cr();
+  if (Metaspace::using_class_space()) {
+    out->print("CompressedClassSpaceSize: ");
+    print_human_readable_size(out, CompressedClassSpaceSize, scale);
+  }
+  out->cr();
+  out->print("InitialBootClassLoaderMetaspaceSize: ");
+  print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
+  out->cr();
+  Settings::print_on(out);
+}
+
+// This will print out a basic metaspace usage report but
+// unlike print_report() is guaranteed not to lock or to walk the CLDG.
+void MetaspaceReporter::print_basic_report(outputStream* out, size_t scale) {
+
+  if (!Metaspace::initialized()) {
+    out->print_cr("Metaspace not yet initialized.");
+    return;
+  }
+
+  out->cr();
+  out->print_cr("Usage:");
+
+  if (Metaspace::using_class_space()) {
+    out->print("  Non-class:  ");
+  }
+
+  // Note: since we want to purely rely on counters, without any locking or walking the CLDG,
+  // for Usage stats (statistics over in-use chunks) all we can print is the
+  // used words. We cannot print committed areas, or free/waste areas, of in-use chunks require
+  // walking.
+  const size_t used_nc = MetaspaceUtils::used_words(metaspace::NonClassType);
+
+  print_scaled_words(out, used_nc, scale, 5);
+  out->print(" used.");
+  out->cr();
+
+  if (Metaspace::using_class_space()) {
+    const size_t used_c = MetaspaceUtils::used_words(metaspace::ClassType);
+    out->print("      Class:  ");
+    print_scaled_words(out, used_c, scale, 5);
+    out->print(" used.");
+    out->cr();
+
+    out->print("       Both:  ");
+    const size_t used = used_nc + used_c;
+    print_scaled_words(out, used, scale, 5);
+    out->print(" used.");
+    out->cr();
+  }
+
+  out->cr();
+  out->print_cr("Virtual space:");
+
+  print_vs(out, scale);
+
+  out->cr();
+  out->print_cr("Chunk freelists:");
+
+  if (Metaspace::using_class_space()) {
+    out->print("   Non-Class:  ");
+  }
+  print_scaled_words(out, ChunkManager::chunkmanager_nonclass()->total_word_size(), scale);
+  out->cr();
+  if (Metaspace::using_class_space()) {
+    out->print("       Class:  ");
+    print_scaled_words(out, ChunkManager::chunkmanager_class()->total_word_size(), scale);
+    out->cr();
+    out->print("        Both:  ");
+    print_scaled_words(out, ChunkManager::chunkmanager_nonclass()->total_word_size() +
+                            ChunkManager::chunkmanager_class()->total_word_size(), scale);
+    out->cr();
+  }
+
+  out->cr();
+
+  // Print basic settings
+  print_settings(out, scale);
+
+  out->cr();
+
+#ifdef ASSERT
+  out->cr();
+  out->print_cr("Internal statistics:");
+  out->cr();
+  InternalStats::print_on(out);
+  out->cr();
+#endif
+}
+
+void MetaspaceReporter::print_report(outputStream* out, size_t scale, int flags) {
+
+  if (!Metaspace::initialized()) {
+    out->print_cr("Metaspace not yet initialized.");
+    return;
+  }
+
+  const bool print_loaders = (flags & rf_show_loaders) > 0;
+  const bool print_classes = (flags & rf_show_classes) > 0;
+  const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
+  const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
+
+  // Some report options require walking the class loader data graph.
+  metaspace::PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype);
+  if (print_loaders) {
+    out->cr();
+    out->print_cr("Usage per loader:");
+    out->cr();
+  }
+
+  ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print
+
+  // Print totals, broken up by space type.
+  if (print_by_spacetype) {
+    out->cr();
+    out->print_cr("Usage per space type:");
+    out->cr();
+    for (int space_type = (int)metaspace::ZeroMetaspaceType;
+         space_type < (int)metaspace::MetaspaceTypeCount; space_type ++)
+    {
+      uintx num_loaders = cl._num_loaders_by_spacetype[space_type];
+      uintx num_classes = cl._num_classes_by_spacetype[space_type];
+      out->print("%s - " UINTX_FORMAT " %s",
+        describe_spacetype((MetaspaceType)space_type),
+        num_loaders, loaders_plural(num_loaders));
+      if (num_classes > 0) {
+        out->print(", ");
+
+        print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]);
+        out->print(":");
+        cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
+      } else {
+        out->print(".");
+        out->cr();
+      }
+      out->cr();
+    }
+  }
+
+  // Print totals for in-use data:
+  out->cr();
+  {
+    uintx num_loaders = cl._num_loaders;
+    out->print("Total Usage - " UINTX_FORMAT " %s, ",
+      num_loaders, loaders_plural(num_loaders));
+    print_number_of_classes(out, cl._num_classes, cl._num_classes_shared);
+    out->print(":");
+    cl._stats_total.print_on(out, scale, print_by_chunktype);
+    out->cr();
+  }
+
+  /////////////////////////////////////////////////
+  // -- Print Virtual space.
+  out->cr();
+  out->print_cr("Virtual space:");
+
+  print_vs(out, scale);
+
+  // -- Print VirtualSpaceList details.
+  if ((flags & rf_show_vslist) > 0) {
+    out->cr();
+    out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
+
+    if (Metaspace::using_class_space()) {
+      out->print_cr("   Non-Class:");
+    }
+    VirtualSpaceList::vslist_nonclass()->print_on(out);
+    out->cr();
+    if (Metaspace::using_class_space()) {
+      out->print_cr("       Class:");
+      VirtualSpaceList::vslist_class()->print_on(out);
+      out->cr();
+    }
+  }
+  out->cr();
+
+  // -- Print VirtualSpaceList map.
+/* Deactivated for now.
+  if ((flags & rf_show_vsmap) > 0) {
+    out->cr();
+    out->print_cr("Virtual space map:");
+
+    if (Metaspace::using_class_space()) {
+      out->print_cr("   Non-Class:");
+    }
+    Metaspace::space_list()->print_map(out);
+    if (Metaspace::using_class_space()) {
+      out->print_cr("       Class:");
+      Metaspace::class_space_list()->print_map(out);
+    }
+  }
+  out->cr();
+*/
+
+  //////////// Freelists (ChunkManager) section ///////////////////////////
+
+  out->cr();
+  out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
+
+  cm_stats_t non_class_cm_stat;
+  cm_stats_t class_cm_stat;
+  cm_stats_t total_cm_stat;
+
+  ChunkManager::chunkmanager_nonclass()->add_to_statistics(&non_class_cm_stat);
+  if (Metaspace::using_class_space()) {
+    ChunkManager::chunkmanager_nonclass()->add_to_statistics(&non_class_cm_stat);
+    ChunkManager::chunkmanager_class()->add_to_statistics(&class_cm_stat);
+    total_cm_stat.add(non_class_cm_stat);
+    total_cm_stat.add(class_cm_stat);
+
+    out->print_cr("   Non-Class:");
+    non_class_cm_stat.print_on(out, scale);
+    out->cr();
+    out->print_cr("       Class:");
+    class_cm_stat.print_on(out, scale);
+    out->cr();
+    out->print_cr("        Both:");
+    total_cm_stat.print_on(out, scale);
+    out->cr();
+  } else {
+    ChunkManager::chunkmanager_nonclass()->add_to_statistics(&non_class_cm_stat);
+    non_class_cm_stat.print_on(out, scale);
+    out->cr();
+  }
+
+  //////////// Waste section ///////////////////////////
+  // As a convenience, print a summary of common waste.
+  out->cr();
+  out->print("Waste (unused committed space):");
+  // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
+  const size_t committed_words = RunningCounters::committed_words();
+
+  out->print("(percentages refer to total committed size ");
+  print_scaled_words(out, committed_words, scale);
+  out->print_cr("):");
+
+  // Print waste for in-use chunks.
+  in_use_chunk_stats_t ucs_nonclass = cl._stats_total.sm_stats_nonclass.totals();
+  in_use_chunk_stats_t ucs_class = cl._stats_total.sm_stats_class.totals();
+  const size_t waste_in_chunks_in_use = ucs_nonclass.waste_words + ucs_class.waste_words;
+  const size_t free_in_chunks_in_use = ucs_nonclass.free_words + ucs_class.free_words;
+
+  out->print("        Waste in chunks in use: ");
+  print_scaled_words_and_percentage(out, waste_in_chunks_in_use, committed_words, scale, 6);
+  out->cr();
+  out->print("        Free in chunks in use: ");
+  print_scaled_words_and_percentage(out, free_in_chunks_in_use, committed_words, scale, 6);
+  out->cr();
+
+  // Print waste in free chunks.
+  const size_t committed_in_free_chunks = total_cm_stat.total_committed_word_size();
+  out->print("                In free chunks: ");
+  print_scaled_words_and_percentage(out, committed_in_free_chunks, committed_words, scale, 6);
+  out->cr();
+
+  // Print waste in deallocated blocks.
+  const uintx free_blocks_num =
+      cl._stats_total.sm_stats_nonclass.free_blocks_num +
+      cl._stats_total.sm_stats_class.free_blocks_num;
+  const size_t free_blocks_cap_words =
+      cl._stats_total.sm_stats_nonclass.free_blocks_word_size +
+      cl._stats_total.sm_stats_class.free_blocks_word_size;
+  out->print("Deallocated from chunks in use: ");
+  print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
+  out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
+  out->cr();
+
+  // Print total waste.
+  const size_t total_waste =
+      waste_in_chunks_in_use +
+      free_in_chunks_in_use +
+      committed_in_free_chunks +
+      free_blocks_cap_words;
+  out->print("                       -total-: ");
+  print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
+  out->cr();
+
+  // Also print chunk header pool size.
+  out->cr();
+  out->print("chunk header pool: %u items, ", ChunkHeaderPool::pool().used());
+  print_scaled_words(out, ChunkHeaderPool::pool().memory_footprint_words(), scale);
+  out->print(".");
+  out->cr();
+
+  // Print internal statistics
+#ifdef ASSERT
+  out->cr();
+  out->print_cr("Internal statistics:");
+  out->cr();
+  InternalStats::print_on(out);
+  out->cr();
+#endif
+
+  // Print some interesting settings
+  out->cr();
+  out->print_cr("Settings:");
+  print_settings(out, scale);
+
+  out->cr();
+  out->cr();
+
+} // MetaspaceUtils::print_report()
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceReport.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, 2019 SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_METASPACEREPORT_HPP
+#define SHARE_MEMORY_METASPACE_METASPACEREPORT_HPP
+
+#include "memory/allocation.hpp"
+
+namespace metaspace {
+
+class MetaspaceReporter : public AllStatic {
+public:
+
+  // Flags for print_report().
+  enum ReportFlag {
+    // Show usage by class loader.
+    rf_show_loaders                 = (1 << 0),
+    // Breaks report down by chunk type (small, medium, ...).
+    rf_break_down_by_chunktype      = (1 << 1),
+    // Breaks report down by space type (anonymous, reflection, ...).
+    rf_break_down_by_spacetype      = (1 << 2),
+    // Print details about the underlying virtual spaces.
+    rf_show_vslist                  = (1 << 3),
+    // Print metaspace map.
+    rf_show_vsmap                   = (1 << 4),
+    // If show_loaders: show loaded classes for each loader.
+    rf_show_classes                 = (1 << 5)
+  };
+
+  // This will print out a basic metaspace usage report but
+  // unlike print_report() is guaranteed not to lock or to walk the CLDG.
+  static void print_basic_report(outputStream* st, size_t scale);
+
+  // Prints a report about the current metaspace state.
+  // Optional parts can be enabled via flags.
+  // Function will walk the CLDG and will lock the expand lock; if that is not
+  // convenient, use print_basic_report() instead.
+  static void print_report(outputStream* out, size_t scale = 0, int flags = 0);
+
+};
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_METASPACEREPORT_HPP
--- a/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 
 #include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/metaspace/metaspaceSizesSnapshot.hpp"
 
 namespace metaspace {
@@ -33,9 +34,9 @@
 MetaspaceSizesSnapshot::MetaspaceSizesSnapshot()
     : _used(MetaspaceUtils::used_bytes()),
       _committed(MetaspaceUtils::committed_bytes()),
-      _non_class_used(MetaspaceUtils::used_bytes(Metaspace::NonClassType)),
-      _non_class_committed(MetaspaceUtils::committed_bytes(Metaspace::NonClassType)),
-      _class_used(MetaspaceUtils::used_bytes(Metaspace::ClassType)),
-      _class_committed(MetaspaceUtils::committed_bytes(Metaspace::ClassType)) { }
+      _non_class_used(MetaspaceUtils::used_bytes(metaspace::NonClassType)),
+      _non_class_committed(MetaspaceUtils::committed_bytes(metaspace::NonClassType)),
+      _class_used(MetaspaceUtils::used_bytes(metaspace::ClassType)),
+      _class_committed(MetaspaceUtils::committed_bytes(metaspace::ClassType)) { }
 
 } // namespace metaspace
--- a/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,177 +24,158 @@
  */
 #include "precompiled.hpp"
 
-#include "memory/metaspace/metachunk.hpp"
+
+#include "memory/metaspace/chunkLevel.hpp"
 #include "memory/metaspace/metaspaceCommon.hpp"
 #include "memory/metaspace/metaspaceStatistics.hpp"
+
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ostream.hpp"
 
 namespace metaspace {
 
-// FreeChunksStatistics methods
-
-FreeChunksStatistics::FreeChunksStatistics()
-: _num(0), _cap(0)
-{}
-
-void FreeChunksStatistics::reset() {
-  _num = 0; _cap = 0;
-}
-
-void FreeChunksStatistics::add(uintx n, size_t s) {
-  _num += n; _cap += s;
-}
-
-void FreeChunksStatistics::add(const FreeChunksStatistics& other) {
-  _num += other._num;
-  _cap += other._cap;
-}
-
-void FreeChunksStatistics::print_on(outputStream* st, size_t scale) const {
-  st->print(UINTX_FORMAT, _num);
-  st->print(" chunks, total capacity ");
-  print_scaled_words(st, _cap, scale);
-}
-
 // ChunkManagerStatistics methods
 
-void ChunkManagerStatistics::reset() {
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    _chunk_stats[i].reset();
+// Returns total word size of all chunks in this manager.
+void cm_stats_t::add(const cm_stats_t& other) {
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    num_chunks[l] += other.num_chunks[l];
+    committed_word_size[l] += other.committed_word_size[l];
   }
 }
 
-size_t ChunkManagerStatistics::total_capacity() const {
-  return _chunk_stats[SpecializedIndex].cap() +
-      _chunk_stats[SmallIndex].cap() +
-      _chunk_stats[MediumIndex].cap() +
-      _chunk_stats[HumongousIndex].cap();
+// Returns total word size of all chunks in this manager.
+size_t cm_stats_t::total_word_size() const {
+  size_t s = 0;
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    s += num_chunks[l] * chklvl::word_size_for_level(l);
+  }
+  return s;
+}
+
+// Returns total committed word size of all chunks in this manager.
+size_t cm_stats_t::total_committed_word_size() const {
+  size_t s = 0;
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    s += committed_word_size[l];
+  }
+  return s;
 }
 
-void ChunkManagerStatistics::print_on(outputStream* st, size_t scale) const {
-  FreeChunksStatistics totals;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+
+void cm_stats_t::print_on(outputStream* st, size_t scale) const {
+  // Note: used as part of MetaspaceReport so formatting matters.
+  size_t total_size = 0;
+  size_t total_committed_size = 0;
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
     st->cr();
-    st->print("%12s chunks: ", chunk_size_name(i));
-    if (_chunk_stats[i].num() > 0) {
-      st->print(UINTX_FORMAT_W(4) ", capacity ", _chunk_stats[i].num());
-      print_scaled_words(st, _chunk_stats[i].cap(), scale);
+    chklvl::print_chunk_size(st, l);
+    st->print(": ");
+    if (num_chunks[l] > 0) {
+      const size_t word_size = num_chunks[l] * chklvl::word_size_for_level(l);
+
+      st->print("%4d, capacity=", num_chunks[l]);
+      print_scaled_words(st, word_size, scale);
+
+      st->print(", committed=");
+      print_scaled_words_and_percentage(st, committed_word_size[l], word_size, scale);
+
+      total_size += word_size;
+      total_committed_size += committed_word_size[l];
     } else {
       st->print("(none)");
     }
-    totals.add(_chunk_stats[i]);
   }
   st->cr();
-  st->print("%19s: " UINTX_FORMAT_W(4) ", capacity=", "Total", totals.num());
-  print_scaled_words(st, totals.cap(), scale);
+  st->print("Total word size: ");
+  print_scaled_words(st, total_size, scale);
+  st->print(", committed: ");
+  print_scaled_words_and_percentage(st, total_committed_size, total_size, scale);
   st->cr();
 }
 
+#ifdef ASSERT
+void cm_stats_t::verify() const {
+  assert(total_committed_word_size() <= total_word_size(),
+         "Sanity");
+}
+#endif
+
 // UsedChunksStatistics methods
 
-UsedChunksStatistics::UsedChunksStatistics()
-: _num(0), _cap(0), _used(0), _free(0), _waste(0), _overhead(0)
-{}
-
-void UsedChunksStatistics::reset() {
-  _num = 0;
-  _cap = _overhead = _used = _free = _waste = 0;
-}
-
-void UsedChunksStatistics::add(const UsedChunksStatistics& other) {
-  _num += other._num;
-  _cap += other._cap;
-  _used += other._used;
-  _free += other._free;
-  _waste += other._waste;
-  _overhead += other._overhead;
-  DEBUG_ONLY(check_sanity());
-}
-
-void UsedChunksStatistics::print_on(outputStream* st, size_t scale) const {
+void in_use_chunk_stats_t::print_on(outputStream* st, size_t scale) const {
   int col = st->position();
-  st->print(UINTX_FORMAT_W(4) " chunk%s, ", _num, _num != 1 ? "s" : "");
-  if (_num > 0) {
+  st->print("%4d chunk%s, ", num, num != 1 ? "s" : "");
+  if (num > 0) {
     col += 14; st->fill_to(col);
 
-    print_scaled_words(st, _cap, scale, 5);
-    st->print(" capacity, ");
+    print_scaled_words(st, word_size, scale, 5);
+    st->print(" capacity,");
+
+    col += 20; st->fill_to(col);
+    print_scaled_words_and_percentage(st, committed_words, word_size, scale, 5);
+    st->print(" committed, ");
 
     col += 18; st->fill_to(col);
-    print_scaled_words_and_percentage(st, _used, _cap, scale, 5);
+    print_scaled_words_and_percentage(st, used_words, word_size, scale, 5);
     st->print(" used, ");
 
     col += 20; st->fill_to(col);
-    print_scaled_words_and_percentage(st, _free, _cap, scale, 5);
+    print_scaled_words_and_percentage(st, free_words, word_size, scale, 5);
     st->print(" free, ");
 
     col += 20; st->fill_to(col);
-    print_scaled_words_and_percentage(st, _waste, _cap, scale, 5);
-    st->print(" waste, ");
+    print_scaled_words_and_percentage(st, waste_words, word_size, scale, 5);
+    st->print(" waste ");
 
-    col += 20; st->fill_to(col);
-    print_scaled_words_and_percentage(st, _overhead, _cap, scale, 5);
-    st->print(" overhead");
   }
-  DEBUG_ONLY(check_sanity());
 }
 
 #ifdef ASSERT
-void UsedChunksStatistics::check_sanity() const {
-  assert(_overhead == (Metachunk::overhead() * _num), "Sanity: Overhead.");
-  assert(_cap == _used + _free + _waste + _overhead, "Sanity: Capacity.");
+void in_use_chunk_stats_t::verify() const {
+  assert(word_size >= committed_words &&
+      committed_words == used_words + free_words + waste_words,
+         "Sanity: cap " SIZE_FORMAT ", committed " SIZE_FORMAT ", used " SIZE_FORMAT ", free " SIZE_FORMAT ", waste " SIZE_FORMAT ".",
+         word_size, committed_words, used_words, free_words, waste_words);
 }
 #endif
 
 // SpaceManagerStatistics methods
 
-SpaceManagerStatistics::SpaceManagerStatistics() { reset(); }
-
-void SpaceManagerStatistics::reset() {
-  for (int i = 0; i < NumberOfInUseLists; i ++) {
-    _chunk_stats[i].reset();
-    _free_blocks_num = 0; _free_blocks_cap_words = 0;
+void sm_stats_t::add(const sm_stats_t& other) {
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    stats[l].add(other.stats[l]);
   }
-}
-
-void SpaceManagerStatistics::add_free_blocks_info(uintx num, size_t cap) {
-  _free_blocks_num += num;
-  _free_blocks_cap_words += cap;
+  free_blocks_num += other.free_blocks_num;
+  free_blocks_word_size += other.free_blocks_word_size;
 }
 
-void SpaceManagerStatistics::add(const SpaceManagerStatistics& other) {
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    _chunk_stats[i].add(other._chunk_stats[i]);
-  }
-  _free_blocks_num += other._free_blocks_num;
-  _free_blocks_cap_words += other._free_blocks_cap_words;
-}
 
 // Returns total chunk statistics over all chunk types.
-UsedChunksStatistics SpaceManagerStatistics::totals() const {
-  UsedChunksStatistics stat;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    stat.add(_chunk_stats[i]);
+in_use_chunk_stats_t sm_stats_t::totals() const {
+  in_use_chunk_stats_t out;
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    out.add(stats[l]);
   }
-  return stat;
+  return out;
 }
 
-void SpaceManagerStatistics::print_on(outputStream* st, size_t scale,  bool detailed) const {
+void sm_stats_t::print_on(outputStream* st, size_t scale,  bool detailed) const {
   streamIndentor sti(st);
   if (detailed) {
     st->cr_indent();
-    st->print("Usage by chunk type:");
+    st->print("Usage by chunk level:");
     {
       streamIndentor sti2(st);
-      for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+      for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
         st->cr_indent();
-        st->print("%15s: ", chunk_size_name(i));
-        if (_chunk_stats[i].num() == 0) {
+        chklvl::print_chunk_size(st, l);
+        st->print(" chunks: ");
+        if (stats[l].num == 0) {
           st->print(" (none)");
         } else {
-          _chunk_stats[i].print_on(st, scale);
+          stats[l].print_on(st, scale);
         }
       }
 
@@ -202,61 +183,57 @@
       st->print("%15s: ", "-total-");
       totals().print_on(st, scale);
     }
-    if (_free_blocks_num > 0) {
+    if (free_blocks_num > 0) {
       st->cr_indent();
-      st->print("deallocated: " UINTX_FORMAT " blocks with ", _free_blocks_num);
-      print_scaled_words(st, _free_blocks_cap_words, scale);
+      st->print("deallocated: " UINTX_FORMAT " blocks with ", free_blocks_num);
+      print_scaled_words(st, free_blocks_word_size, scale);
     }
   } else {
     totals().print_on(st, scale);
     st->print(", ");
-    st->print("deallocated: " UINTX_FORMAT " blocks with ", _free_blocks_num);
-    print_scaled_words(st, _free_blocks_cap_words, scale);
-  }
-}
-
-// ClassLoaderMetaspaceStatistics methods
-
-ClassLoaderMetaspaceStatistics::ClassLoaderMetaspaceStatistics() { reset(); }
-
-void ClassLoaderMetaspaceStatistics::reset() {
-  nonclass_sm_stats().reset();
-  if (Metaspace::using_class_space()) {
-    class_sm_stats().reset();
+    st->print("deallocated: " UINTX_FORMAT " blocks with ", free_blocks_num);
+    print_scaled_words(st, free_blocks_word_size, scale);
   }
 }
 
+#ifdef ASSERT
+
+void sm_stats_t::verify() const {
+  size_t total_used = 0;
+  for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
+    stats[l].verify();
+    total_used += stats[l].used_words;
+  }
+  // Deallocated allocations still count as used
+  assert(total_used >= free_blocks_word_size,
+         "Sanity");
+}
+#endif
+
+// ClassLoaderMetaspaceStatistics methods
+
 // Returns total space manager statistics for both class and non-class metaspace
-SpaceManagerStatistics ClassLoaderMetaspaceStatistics::totals() const {
-  SpaceManagerStatistics stats;
-  stats.add(nonclass_sm_stats());
-  if (Metaspace::using_class_space()) {
-    stats.add(class_sm_stats());
-  }
-  return stats;
+sm_stats_t clms_stats_t::totals() const {
+  sm_stats_t out;
+  out.add(sm_stats_nonclass);
+  out.add(sm_stats_class);
+  return out;
 }
 
-void ClassLoaderMetaspaceStatistics::add(const ClassLoaderMetaspaceStatistics& other) {
-  nonclass_sm_stats().add(other.nonclass_sm_stats());
-  if (Metaspace::using_class_space()) {
-    class_sm_stats().add(other.class_sm_stats());
-  }
-}
-
-void ClassLoaderMetaspaceStatistics::print_on(outputStream* st, size_t scale, bool detailed) const {
+void clms_stats_t::print_on(outputStream* st, size_t scale, bool detailed) const {
   streamIndentor sti(st);
   st->cr_indent();
   if (Metaspace::using_class_space()) {
     st->print("Non-Class: ");
   }
-  nonclass_sm_stats().print_on(st, scale, detailed);
+  sm_stats_nonclass.print_on(st, scale, detailed);
   if (detailed) {
     st->cr();
   }
   if (Metaspace::using_class_space()) {
     st->cr_indent();
     st->print("    Class: ");
-    class_sm_stats().print_on(st, scale, detailed);
+    sm_stats_class.print_on(st, scale, detailed);
     if (detailed) {
       st->cr();
     }
@@ -270,6 +247,14 @@
   st->cr();
 }
 
+
+#ifdef ASSERT
+void clms_stats_t::verify() const {
+  sm_stats_nonclass.verify();
+  sm_stats_class.verify();
+}
+#endif
+
 } // end namespace metaspace
 
 
--- a/src/hotspot/share/memory/metaspace/metaspaceStatistics.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceStatistics.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,162 +26,135 @@
 #ifndef SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP
 #define SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP
 
+#include "memory/metaspace.hpp"             // for MetadataType enum
+#include "memory/metaspace/chunkLevel.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "memory/metaspace.hpp" // for MetadataType enum
-#include "memory/metaspace/metachunk.hpp" // for ChunkIndex enum
 
 class outputStream;
 
 namespace metaspace {
 
-// Contains statistics for a number of free chunks.
-class FreeChunksStatistics {
-  uintx _num;         // Number of chunks
-  size_t _cap;        // Total capacity, in words
+
+// Contains statistics for one or multiple ChunkManager.
+struct cm_stats_t {
 
-public:
-  FreeChunksStatistics();
+  // How many chunks per level are checked in.
+  int num_chunks[chklvl::NUM_CHUNK_LEVELS];
+
+  // Size, in words, of the sum of all committed areas in this chunk manager, per level.
+  size_t committed_word_size[chklvl::NUM_CHUNK_LEVELS];
 
-  void reset();
+  cm_stats_t() : num_chunks(), committed_word_size() {}
+
+  void add(const cm_stats_t& other);
 
-  uintx num() const { return _num; }
-  size_t cap() const { return _cap; }
+  // Returns total word size of all chunks in this manager.
+  size_t total_word_size() const;
 
-  void add(uintx n, size_t s);
-  void add(const FreeChunksStatistics& other);
+  // Returns total committed word size of all chunks in this manager.
+  size_t total_committed_word_size() const;
+
   void print_on(outputStream* st, size_t scale) const;
 
-}; // end: FreeChunksStatistics
-
-
-// Contains statistics for a ChunkManager.
-class ChunkManagerStatistics {
-
-  FreeChunksStatistics _chunk_stats[NumberOfInUseLists];
-
-public:
-
-  // Free chunk statistics, by chunk index.
-  const FreeChunksStatistics& chunk_stats(ChunkIndex index) const   { return _chunk_stats[index]; }
-  FreeChunksStatistics& chunk_stats(ChunkIndex index)               { return _chunk_stats[index]; }
-
-  void reset();
-  size_t total_capacity() const;
-
-  void print_on(outputStream* st, size_t scale) const;
+  DEBUG_ONLY(void verify() const;)
 
 }; // ChunkManagerStatistics
 
-// Contains statistics for a number of chunks in use.
-// Each chunk has a used and free portion; however, there are current chunks (serving
-// potential future metaspace allocations) and non-current chunks. Unused portion of the
-// former is counted as free, unused portion of the latter counts as waste.
-class UsedChunksStatistics {
-  uintx _num;     // Number of chunks
-  size_t _cap;    // Total capacity in words.
-  size_t _used;   // Total used area, in words
-  size_t _free;   // Total free area (unused portions of current chunks), in words
-  size_t _waste;  // Total waste area (unused portions of non-current chunks), in words
-  size_t _overhead; // Total sum of chunk overheads, in words.
+// Contains statistics for one or multiple chunks in use.
+struct in_use_chunk_stats_t {
+
+  // Number of chunks
+  int num;
+
+  // Note:
+  // capacity = committed + uncommitted
+  //            committed = used + free + waste
 
-public:
-
-  UsedChunksStatistics();
+  // Capacity (total sum of all chunk sizes) in words.
+  // May contain committed and uncommitted space.
+  size_t word_size;
 
-  void reset();
+  // Total committed area, in words.
+  size_t committed_words;
 
-  uintx num() const { return _num; }
+  // Total used area, in words.
+  size_t used_words;
 
-  // Total capacity, in words
-  size_t cap() const { return _cap; }
-
-  // Total used area, in words
-  size_t used() const { return _used; }
+  // Total free committed area, in words.
+  size_t free_words;
 
-  // Total free area (unused portions of current chunks), in words
-  size_t free() const { return _free; }
-
-  // Total waste area (unused portions of non-current chunks), in words
-  size_t waste() const { return _waste; }
+  // Total waste committed area, in words.
+  size_t waste_words;
 
-  // Total area spent in overhead (chunk headers), in words
-  size_t overhead() const { return _overhead; }
+  in_use_chunk_stats_t()
+    : num(0), word_size(0), committed_words(0),
+      used_words(0), free_words(0), waste_words(0)
+  {}
 
-  void add_num(uintx n) { _num += n; }
-  void add_cap(size_t s) { _cap += s; }
-  void add_used(size_t s) { _used += s; }
-  void add_free(size_t s) { _free += s; }
-  void add_waste(size_t s) { _waste += s; }
-  void add_overhead(size_t s) { _overhead += s; }
+  void add(const in_use_chunk_stats_t& other) {
+    num += other.num;
+    word_size += other.word_size;
+    committed_words += other.committed_words;
+    used_words += other.used_words;
+    free_words += other.free_words;
+    waste_words += other.waste_words;
 
-  void add(const UsedChunksStatistics& other);
+  }
 
   void print_on(outputStream* st, size_t scale) const;
 
-#ifdef ASSERT
-  void check_sanity() const;
-#endif
+  DEBUG_ONLY(void verify() const;)
 
 }; // UsedChunksStatistics
 
 // Class containing statistics for one or more space managers.
-class SpaceManagerStatistics {
-
-  UsedChunksStatistics _chunk_stats[NumberOfInUseLists];
-  uintx _free_blocks_num;
-  size_t _free_blocks_cap_words;
+struct  sm_stats_t {
 
-public:
-
-  SpaceManagerStatistics();
-
-  // Chunk statistics by chunk index
-  const UsedChunksStatistics& chunk_stats(ChunkIndex index) const   { return _chunk_stats[index]; }
-  UsedChunksStatistics& chunk_stats(ChunkIndex index)               { return _chunk_stats[index]; }
+  // chunk statistics by chunk level
+  in_use_chunk_stats_t stats[chklvl::NUM_CHUNK_LEVELS];
+  uintx free_blocks_num;
+  size_t free_blocks_word_size;
 
-  uintx free_blocks_num () const                                    { return _free_blocks_num; }
-  size_t free_blocks_cap_words () const                             { return _free_blocks_cap_words; }
-
-  void reset();
-
-  void add_free_blocks_info(uintx num, size_t cap);
+  sm_stats_t()
+    : stats(),
+      free_blocks_num(0),
+      free_blocks_word_size(0)
+  {}
 
-  // Returns total chunk statistics over all chunk types.
-  UsedChunksStatistics totals() const;
+  void add(const sm_stats_t& other);
+
+  void print_on(outputStream* st, size_t scale = K,  bool detailed = true) const;
 
-  void add(const SpaceManagerStatistics& other);
+  in_use_chunk_stats_t totals() const;
 
-  void print_on(outputStream* st, size_t scale,  bool detailed) const;
+  DEBUG_ONLY(void verify() const;)
 
 }; // SpaceManagerStatistics
 
-class ClassLoaderMetaspaceStatistics {
-
-  SpaceManagerStatistics _sm_stats[Metaspace::MetadataTypeCount];
+// Statistics for one or multiple ClassLoaderMetaspace objects
+struct clms_stats_t {
 
-public:
-
-  ClassLoaderMetaspaceStatistics();
+  sm_stats_t sm_stats_nonclass;
+  sm_stats_t sm_stats_class;
 
-  const SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType) const { return _sm_stats[mdType]; }
-  SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType)             { return _sm_stats[mdType]; }
+  clms_stats_t() : sm_stats_nonclass(), sm_stats_class() {}
 
-  const SpaceManagerStatistics& nonclass_sm_stats() const { return sm_stats(Metaspace::NonClassType); }
-  SpaceManagerStatistics& nonclass_sm_stats()             { return sm_stats(Metaspace::NonClassType); }
-  const SpaceManagerStatistics& class_sm_stats() const    { return sm_stats(Metaspace::ClassType); }
-  SpaceManagerStatistics& class_sm_stats()                { return sm_stats(Metaspace::ClassType); }
+  void add(const clms_stats_t& other) {
+    sm_stats_nonclass.add(other.sm_stats_nonclass);
+    sm_stats_class.add(other.sm_stats_class);
+  }
 
-  void reset();
-
-  void add(const ClassLoaderMetaspaceStatistics& other);
+  void print_on(outputStream* st, size_t scale, bool detailed) const;
 
   // Returns total space manager statistics for both class and non-class metaspace
-  SpaceManagerStatistics totals() const;
+  sm_stats_t totals() const;
 
-  void print_on(outputStream* st, size_t scale, bool detailed) const;
+
+  DEBUG_ONLY(void verify() const;)
 
 }; // ClassLoaderMetaspaceStatistics
 
 } // namespace metaspace
 
 #endif // SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP
+
--- a/src/hotspot/share/memory/metaspace/occupancyMap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace/occupancyMap.hpp"
-#include "runtime/os.hpp"
-
-namespace metaspace {
-
-OccupancyMap::OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
-            _reference_address(reference_address), _word_size(word_size),
-            _smallest_chunk_word_size(smallest_chunk_word_size)
-{
-  assert(reference_address != NULL, "invalid reference address");
-  assert(is_aligned(reference_address, smallest_chunk_word_size),
-      "Reference address not aligned to smallest chunk size.");
-  assert(is_aligned(word_size, smallest_chunk_word_size),
-      "Word_size shall be a multiple of the smallest chunk size.");
-  // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
-  size_t num_bits = word_size / smallest_chunk_word_size;
-  _map_size = (num_bits + 7) / 8;
-  assert(_map_size * 8 >= num_bits, "sanity");
-  _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
-  _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
-  assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
-  memset(_map[1], 0, _map_size);
-  memset(_map[0], 0, _map_size);
-  // Sanity test: the first respectively last possible chunk start address in
-  // the covered range shall map to the first and last bit in the bitmap.
-  assert(get_bitpos_for_address(reference_address) == 0,
-      "First chunk address in range must map to fist bit in bitmap.");
-  assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
-      "Last chunk address in range must map to last bit in bitmap.");
-}
-
-OccupancyMap::~OccupancyMap() {
-  os::free(_map[0]);
-  os::free(_map[1]);
-}
-
-#ifdef ASSERT
-// Verify occupancy map for the address range [from, to).
-// We need to tell it the address range, because the memory the
-// occupancy map is covering may not be fully comitted yet.
-void OccupancyMap::verify(MetaWord* from, MetaWord* to) {
-  Metachunk* chunk = NULL;
-  int nth_bit_for_chunk = 0;
-  MetaWord* chunk_end = NULL;
-  for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
-    const unsigned pos = get_bitpos_for_address(p);
-    // Check the chunk-starts-info:
-    if (get_bit_at_position(pos, layer_chunk_start_map)) {
-      // Chunk start marked in bitmap.
-      chunk = (Metachunk*) p;
-      if (chunk_end != NULL) {
-        assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
-            "the next chunk to start at %p).", p, chunk_end);
-      }
-      assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
-      if (chunk->get_chunk_type() != HumongousIndex) {
-        guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
-      }
-      chunk_end = p + chunk->word_size();
-      nth_bit_for_chunk = 0;
-      assert(chunk_end <= to, "Chunk end overlaps test address range.");
-    } else {
-      // No chunk start marked in bitmap.
-      assert(chunk != NULL, "Chunk should start at start of address range.");
-      assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
-      nth_bit_for_chunk ++;
-    }
-    // Check the in-use-info:
-    const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
-    if (in_use_bit) {
-      assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
-          chunk, nth_bit_for_chunk);
-    } else {
-      assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
-          chunk, nth_bit_for_chunk);
-    }
-  }
-}
-
-// Verify that a given chunk is correctly accounted for in the bitmap.
-void OccupancyMap::verify_for_chunk(Metachunk* chunk) {
-  assert(chunk_starts_at_address((MetaWord*) chunk),
-      "No chunk start marked in map for chunk %p.", chunk);
-  // For chunks larger than the minimal chunk size, no other chunk
-  // must start in its area.
-  if (chunk->word_size() > _smallest_chunk_word_size) {
-    assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
-        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
-        "No chunk must start within another chunk.");
-  }
-  if (!chunk->is_tagged_free()) {
-    assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
-        "Chunk %p is in use but marked as free in map (%d %d).",
-        chunk, chunk->get_chunk_type(), chunk->get_origin());
-  } else {
-    assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
-        "Chunk %p is free but marked as in-use in map (%d %d).",
-        chunk, chunk->get_chunk_type(), chunk->get_origin());
-  }
-}
-
-#endif // ASSERT
-
-} // namespace metaspace
-
-
--- a/src/hotspot/share/memory/metaspace/occupancyMap.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,242 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP
-#define SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP
-
-#include "memory/allocation.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-
-namespace metaspace {
-
-class Metachunk;
-
-// Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
-template <typename T> struct all_ones  { static const T value; };
-template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
-template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
-
-// The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
-// keeps information about
-// - where a chunk starts
-// - whether a chunk is in-use or free
-// A bit in this bitmap represents one range of memory in the smallest
-// chunk size (SpecializedChunk or ClassSpecializedChunk).
-class OccupancyMap : public CHeapObj<mtInternal> {
-
-  // The address range this map covers.
-  const MetaWord* const _reference_address;
-  const size_t _word_size;
-
-  // The word size of a specialized chunk, aka the number of words one
-  // bit in this map represents.
-  const size_t _smallest_chunk_word_size;
-
-  // map data
-  // Data are organized in two bit layers:
-  // The first layer is the chunk-start-map. Here, a bit is set to mark
-  // the corresponding region as the head of a chunk.
-  // The second layer is the in-use-map. Here, a set bit indicates that
-  // the corresponding belongs to a chunk which is in use.
-  uint8_t* _map[2];
-
-  enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
-
-  // length, in bytes, of bitmap data
-  size_t _map_size;
-
-  // Returns true if bit at position pos at bit-layer layer is set.
-  bool get_bit_at_position(unsigned pos, unsigned layer) const {
-    assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
-    const unsigned byteoffset = pos / 8;
-    assert(byteoffset < _map_size,
-           "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
-    const unsigned mask = 1 << (pos % 8);
-    return (_map[layer][byteoffset] & mask) > 0;
-  }
-
-  // Changes bit at position pos at bit-layer layer to value v.
-  void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
-    assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
-    const unsigned byteoffset = pos / 8;
-    assert(byteoffset < _map_size,
-           "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
-    const unsigned mask = 1 << (pos % 8);
-    if (v) {
-      _map[layer][byteoffset] |= mask;
-    } else {
-      _map[layer][byteoffset] &= ~mask;
-    }
-  }
-
-  // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
-  // pos is 32/64 aligned and num_bits is 32/64.
-  // This is the typical case when coalescing to medium chunks, whose size is
-  // 32 or 64 times the specialized chunk size (depending on class or non class
-  // case), so they occupy 64 bits which should be 64bit aligned, because
-  // chunks are chunk-size aligned.
-  template <typename T>
-  bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
-    assert(_map_size > 0, "not initialized");
-    assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
-    assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
-    assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
-    const size_t byteoffset = pos / 8;
-    assert(byteoffset <= (_map_size - sizeof(T)),
-           "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
-    const T w = *(T*)(_map[layer] + byteoffset);
-    return w > 0 ? true : false;
-  }
-
-  // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
-  bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
-    if (pos % 32 == 0 && num_bits == 32) {
-      return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
-    } else if (pos % 64 == 0 && num_bits == 64) {
-      return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
-    } else {
-      for (unsigned n = 0; n < num_bits; n ++) {
-        if (get_bit_at_position(pos + n, layer)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
-  bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
-    assert(word_size % _smallest_chunk_word_size == 0,
-        "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
-    const unsigned pos = get_bitpos_for_address(p);
-    const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
-    return is_any_bit_set_in_region(pos, num_bits, layer);
-  }
-
-  // Optimized case of set_bits_of_region for 32/64bit aligned access:
-  // pos is 32/64 aligned and num_bits is 32/64.
-  // This is the typical case when coalescing to medium chunks, whose size
-  // is 32 or 64 times the specialized chunk size (depending on class or non
-  // class case), so they occupy 64 bits which should be 64bit aligned,
-  // because chunks are chunk-size aligned.
-  template <typename T>
-  void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
-    assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
-           (unsigned)(sizeof(T) * 8), pos);
-    assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
-           num_bits, (unsigned)(sizeof(T) * 8));
-    const size_t byteoffset = pos / 8;
-    assert(byteoffset <= (_map_size - sizeof(T)),
-           "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
-    T* const pw = (T*)(_map[layer] + byteoffset);
-    *pw = v ? all_ones<T>::value : (T) 0;
-  }
-
-  // Set all bits in a region starting at pos to a value.
-  void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
-    assert(_map_size > 0, "not initialized");
-    assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
-    if (pos % 32 == 0 && num_bits == 32) {
-      set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
-    } else if (pos % 64 == 0 && num_bits == 64) {
-      set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
-    } else {
-      for (unsigned n = 0; n < num_bits; n ++) {
-        set_bit_at_position(pos + n, layer, v);
-      }
-    }
-  }
-
-  // Helper: sets all bits in a region [p, p+word_size).
-  void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
-    assert(word_size % _smallest_chunk_word_size == 0,
-        "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
-    const unsigned pos = get_bitpos_for_address(p);
-    const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
-    set_bits_of_region(pos, num_bits, layer, v);
-  }
-
-  // Helper: given an address, return the bit position representing that address.
-  unsigned get_bitpos_for_address(const MetaWord* p) const {
-    assert(_reference_address != NULL, "not initialized");
-    assert(p >= _reference_address && p < _reference_address + _word_size,
-           "Address %p out of range for occupancy map [%p..%p).",
-            p, _reference_address, _reference_address + _word_size);
-    assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
-           "Address not aligned (%p).", p);
-    const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
-    assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
-    return (unsigned) d;
-  }
-
- public:
-
-  OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size);
-  ~OccupancyMap();
-
-  // Returns true if at address x a chunk is starting.
-  bool chunk_starts_at_address(MetaWord* p) const {
-    const unsigned pos = get_bitpos_for_address(p);
-    return get_bit_at_position(pos, layer_chunk_start_map);
-  }
-
-  void set_chunk_starts_at_address(MetaWord* p, bool v) {
-    const unsigned pos = get_bitpos_for_address(p);
-    set_bit_at_position(pos, layer_chunk_start_map, v);
-  }
-
-  // Removes all chunk-start-bits inside a region, typically as a
-  // result of a chunk merge.
-  void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
-    set_bits_of_region(p, word_size, layer_chunk_start_map, false);
-  }
-
-  // Returns true if there are life (in use) chunks in the region limited
-  // by [p, p+word_size).
-  bool is_region_in_use(MetaWord* p, size_t word_size) const {
-    return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
-  }
-
-  // Marks the region starting at p with the size word_size as in use
-  // or free, depending on v.
-  void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
-    set_bits_of_region(p, word_size, layer_in_use_map, v);
-  }
-
-  // Verify occupancy map for the address range [from, to).
-  // We need to tell it the address range, because the memory the
-  // occupancy map is covering may not be fully comitted yet.
-  DEBUG_ONLY(void verify(MetaWord* from, MetaWord* to);)
-
-  // Verify that a given chunk is correctly accounted for in the bitmap.
-  DEBUG_ONLY(void verify_for_chunk(Metachunk* chunk);)
-
-};
-
-} // namespace metaspace
-
-#endif // SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP
--- a/src/hotspot/share/memory/metaspace/printCLDMetaspaceInfoClosure.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/printCLDMetaspaceInfoClosure.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,9 +25,10 @@
 #include "precompiled.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/javaClasses.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
 #include "memory/metaspace/printMetaspaceInfoKlassClosure.hpp"
-#include "memory/metaspaceShared.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/safepoint.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -80,7 +82,7 @@
   }
 
   // Collect statistics for this class loader metaspace
-  ClassLoaderMetaspaceStatistics this_cld_stat;
+  clms_stats_t this_cld_stat;
   msp->add_to_statistics(&this_cld_stat);
 
   // And add it to the running totals
--- a/src/hotspot/share/memory/metaspace/printCLDMetaspaceInfoClosure.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/printCLDMetaspaceInfoClosure.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +29,7 @@
 #include "memory/iterator.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspace/metaspaceStatistics.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class outputStream;
@@ -47,13 +49,13 @@
   uintx                           _num_loaders;
   uintx                           _num_loaders_without_metaspace;
   uintx                           _num_loaders_unloading;
-  ClassLoaderMetaspaceStatistics  _stats_total;
+  clms_stats_t                    _stats_total;
 
-  uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
-  ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
+  uintx                           _num_loaders_by_spacetype [MetaspaceTypeCount];
+  clms_stats_t                    _stats_by_spacetype [MetaspaceTypeCount];
 
-  uintx                           _num_classes_by_spacetype [Metaspace::MetaspaceTypeCount];
-  uintx                           _num_classes_shared_by_spacetype [Metaspace::MetaspaceTypeCount];
+  uintx                           _num_classes_by_spacetype [MetaspaceTypeCount];
+  uintx                           _num_classes_shared_by_spacetype [MetaspaceTypeCount];
   uintx                           _num_classes;
   uintx                           _num_classes_shared;
 
--- a/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP and/or its affiliates.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/rootChunkArea.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "logging/log.hpp"
+#include "memory/allocation.hpp"
+#include "memory/metaspace/chunkHeaderPool.hpp"
+#include "memory/metaspace/chunkManager.hpp"
+#include "memory/metaspace/internStat.hpp"
+#include "memory/metaspace/metachunk.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/rootChunkArea.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+namespace metaspace {
+
+RootChunkArea::RootChunkArea(const MetaWord* base)
+  : _base(base), _first_chunk(NULL)
+{}
+
+RootChunkArea::~RootChunkArea() {
+  // This is called when a VirtualSpaceNode is destructed (purged).
+  // All chunks should be free of course. In fact, there should only
+  // be one chunk, since all free chunks should have been merged.
+  if (_first_chunk != NULL) {
+    assert(_first_chunk->is_root_chunk() && _first_chunk->is_free(),
+           "Cannot delete root chunk area if not all chunks are free.");
+    ChunkHeaderPool::pool().return_chunk_header(_first_chunk);
+  }
+}
+
+// Initialize: allocate a root node and a root chunk header; return the
+// root chunk header. It will be partly initialized.
+// Note: this just allocates a memory-less header; memory itself is allocated inside VirtualSpaceNode.
+Metachunk* RootChunkArea::alloc_root_chunk_header(VirtualSpaceNode* node) {
+
+  assert(_first_chunk == 0, "already have a root");
+
+  Metachunk* c = ChunkHeaderPool::pool().allocate_chunk_header();
+  c->initialize(node, const_cast<MetaWord*>(_base), chklvl::ROOT_CHUNK_LEVEL);
+
+  _first_chunk = c;
+
+  return c;
+
+}
+
+
+
+// Given a chunk c, split it recursively until you get a chunk of the given target_level.
+//
+// The original chunk must not be part of a freelist.
+//
+// Returns pointer to the result chunk; the splitted-off chunks are added as
+//  free chunks to the freelists.
+//
+// Returns NULL if chunk cannot be split at least once.
+Metachunk* RootChunkArea::split(chklvl_t target_level, Metachunk* c, MetachunkListCluster* freelists) {
+
+  DEBUG_ONLY(c->verify(true);)
+
+  // Splitting a chunk once works like this:
+  //
+  // For a given chunk we want to split:
+  // - increase the chunk level (which halves its size)
+  // - (but leave base address as it is since it will be the leader of the newly
+  //    created chunk pair)
+  // - then create a new chunk header of the same level, set its memory range
+  //   to cover the second halfof the old chunk.
+  // - wire them up (prev_in_vs/next_in_vs)
+  // - return the follower chunk as "splinter chunk" in the splinters array.
+
+  // Doing this multiple times will create a new free splinter chunk for every
+  // level we split:
+  //
+  // A  <- original chunk
+  //
+  // B B  <- split into two halves
+  //
+  // C C B  <- first half split again
+  //
+  // D D C B  <- first half split again ...
+  //
+
+  // As an optimization, since we usually do not split once but multiple times,
+  // to not do each split separately, since we would have to wire up prev_in_vs/next_in_vs
+  // on every level just to tear it open in the next level when we reintroduce a new
+  // half chunk splinter.
+  // Instead, just split split split and delay building up the double linked list of the
+  // new chunks at the end of all splits.
+
+  DEBUG_ONLY(check_pointer(c->base());)
+  assert(c->is_free(), "Can only split free chunks.");
+
+  DEBUG_ONLY(chklvl::check_valid_level(target_level));
+  assert(target_level > c->level(), "Wrong target level");
+
+  DEBUG_ONLY(verify(true);)
+
+  const chklvl_t starting_level = c->level();
+
+  Metachunk* result = c;
+
+  log_trace(metaspace)("Splitting chunk @" PTR_FORMAT ", base " PTR_FORMAT ", level " CHKLVL_FORMAT "...",
+                       p2i(c), p2i(c->base()), c->level());
+
+  while (result->level() < target_level) {
+
+    result->inc_level();
+    Metachunk* splinter_chunk = ChunkHeaderPool::pool().allocate_chunk_header();
+    splinter_chunk->initialize(result->vsnode(), result->end(), result->level());
+
+    // Fix committed words info: If over the half of the original chunk was
+    // committed, committed area spills over into the follower chunk.
+    const size_t old_committed_words = result->committed_words();
+    if (old_committed_words > result->word_size()) {
+      result->set_committed_words(result->word_size());
+      splinter_chunk->set_committed_words(old_committed_words - result->word_size());
+    } else {
+      splinter_chunk->set_committed_words(0);
+    }
+
+    // Insert splinter chunk into vs list
+    if (result->next_in_vs() != NULL) {
+      result->next_in_vs()->set_prev_in_vs(splinter_chunk);
+    }
+    splinter_chunk->set_next_in_vs(result->next_in_vs());
+    splinter_chunk->set_prev_in_vs(result);
+    result->set_next_in_vs(splinter_chunk);
+
+    log_trace(metaspace)("Created splinter chunk @" PTR_FORMAT ", base " PTR_FORMAT ", level " CHKLVL_FORMAT "...",
+                         p2i(splinter_chunk), p2i(splinter_chunk->base()), splinter_chunk->level());
+
+    // Add splinter to free lists
+    freelists->add(splinter_chunk);
+
+    DEBUG_ONLY(InternalStats::inc_num_chunks_added_to_freelist_due_to_split();)
+
+  }
+
+  assert(result->level() == target_level, "Sanity");
+
+  DEBUG_ONLY(verify(true);)
+  DEBUG_ONLY(result->verify(true);)
+
+  DEBUG_ONLY(InternalStats::inc_num_chunk_splits();)
+
+  return result;
+
+}
+
+
+// Given a chunk, attempt to merge it recursively with its neighboring chunks.
+//
+// If successful (merged at least once), returns address of
+// the merged chunk; NULL otherwise.
+//
+// The merged chunks are removed from the freelists.
+//
+// !!! Please note that if this method returns a non-NULL value, the
+// original chunk will be invalid and should not be accessed anymore! !!!
+Metachunk* RootChunkArea::merge(Metachunk* c, MetachunkListCluster* freelists) {
+
+  // Note rules:
+  //
+  // - a chunk always has a buddy, unless it is a root chunk.
+  // - In that buddy pair, a chunk is either leader or follower.
+  // - a chunk's base address is always aligned at its size.
+  // - if chunk is leader, its base address is also aligned to the size of the next
+  //   lower level, at least. A follower chunk is not.
+
+  // How we merge once:
+  //
+  // For a given chunk c, which has to be free and non-root, we do:
+  // - find out if we are the leader or the follower chunk
+  // - if we are leader, next_in_vs must be the follower; if we are follower,
+  //   prev_in_vs must be the leader. Now we have the buddy chunk.
+  // - However, if the buddy chunk itself is split (of a level higher than us)
+  //   we cannot merge.
+  // - we can only merge if the buddy is of the same level as we are and it is
+  //   free.
+  // - Then we merge by simply removing the follower chunk from the address range
+  //   linked list (returning the now useless header to the pool) and decreasing
+  //   the leader chunk level by one. That makes it double the size.
+
+  // Example:
+  // (lower case chunks are free, the * indicates the chunk we want to merge):
+  //
+  // ........................
+  // d d*c   b       A           <- we return the second (d*) chunk...
+  //
+  // c*  c   b       A           <- we merge it with its predecessor and decrease its level...
+  //
+  // b*      b       A           <- we merge it again, since its new neighbor was free too...
+  //
+  // a*              A           <- we merge it again, since its new neighbor was free too...
+  //
+  // And we are done, since its new neighbor, (A), is not free. We would also be done
+  // if the new neighbor itself is splintered.
+
+  DEBUG_ONLY(check_pointer(c->base());)
+  assert(!c->is_root_chunk(), "Cannot be merged further.");
+  assert(c->is_free(), "Can only merge free chunks.");
+
+  DEBUG_ONLY(c->verify(false);)
+
+  log_trace(metaspace)("Attempting to merge chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
+
+  const chklvl_t starting_level = c->level();
+
+  bool stop = false;
+  Metachunk* result = NULL;
+
+  do {
+
+    // First find out if this chunk is the leader of its pair
+    const bool is_leader = c->is_leader();
+
+    // Note: this is either our buddy or a splinter of the buddy.
+    Metachunk* const buddy = c->is_leader() ? c->next_in_vs() : c->prev_in_vs();
+    DEBUG_ONLY(buddy->verify(true);)
+
+    // A buddy chunk must be of the same or higher level (so, same size or smaller)
+    // never be larger.
+    assert(buddy->level() >= c->level(), "Sanity");
+
+    // Is this really my buddy (same level) or a splinter of it (higher level)?
+    // Also, is it free?
+    if (buddy->level() != c->level() || buddy->is_free() == false) {
+
+      log_trace(metaspace)("cannot merge with chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(buddy));
+
+      stop = true;
+
+    } else {
+
+      log_trace(metaspace)("will merge with chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(buddy));
+
+      // We can merge with the buddy.
+
+      // First, remove buddy from the chunk manager.
+      assert(buddy->is_free(), "Sanity");
+      freelists->remove(buddy);
+      DEBUG_ONLY(InternalStats::inc_num_chunks_removed_from_freelist_due_to_merge();)
+
+      // Determine current leader and follower
+      Metachunk* leader;
+      Metachunk* follower;
+      if (is_leader) {
+        leader = c; follower = buddy;
+      } else {
+        leader = buddy; follower = c;
+      }
+
+      // Last checkpoint
+      assert(leader->end() == follower->base() &&
+             leader->level() == follower->level() &&
+             leader->is_free() && follower->is_free(), "Sanity");
+
+      // The new merged chunk is as far committed as possible (if leader
+      // chunk is fully committed, as far as the follower chunk).
+      size_t merged_committed_words = leader->committed_words();
+      if (merged_committed_words == leader->word_size()) {
+        merged_committed_words += follower->committed_words();
+      }
+
+      // Leader survives, follower chunk is freed. Remove follower from vslist ..
+      leader->set_next_in_vs(follower->next_in_vs());
+      if (follower->next_in_vs() != NULL) {
+        follower->next_in_vs()->set_prev_in_vs(leader);
+      }
+
+      // .. and return follower chunk header to pool for reuse.
+      ChunkHeaderPool::pool().return_chunk_header(follower);
+
+      // Leader level gets decreased (leader chunk doubles in size) but
+      // base address stays the same.
+      leader->dec_level();
+
+      // set commit boundary
+      leader->set_committed_words(merged_committed_words);
+
+      // If the leader is now of root chunk size, stop merging
+      if (leader->is_root_chunk()) {
+        stop = true;
+      }
+
+      result = c = leader;
+
+      DEBUG_ONLY(leader->verify(true);)
+
+    }
+
+  } while (!stop);
+
+#ifdef ASSERT
+  verify(true);
+  if (result != NULL) {
+    result->verify(true);
+    if (result->level() < starting_level) {
+      DEBUG_ONLY(InternalStats::inc_num_chunk_merges();)
+    }
+  }
+#endif // ASSERT
+
+  return result;
+
+}
+
+// Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
+// enlarge it in place by claiming its trailing buddy.
+//
+// This will only work if c is the leader of the buddy pair and the trailing buddy is free.
+//
+// If successful, the follower chunk will be removed from the freelists, the leader chunk c will
+// double in size (level decreased by one).
+//
+// On success, true is returned, false otherwise.
+bool RootChunkArea::attempt_enlarge_chunk(Metachunk* c, MetachunkListCluster* freelists) {
+
+  DEBUG_ONLY(check_pointer(c->base());)
+  assert(!c->is_root_chunk(), "Cannot be merged further.");
+
+  // There is no real reason for this limitation other than it is not
+  // needed on free chunks since they should be merged already:
+  assert(c->is_in_use(), "Can only enlarge in use chunks.");
+
+  DEBUG_ONLY(c->verify(false);)
+
+  if (!c->is_leader()) {
+    return false;
+  }
+
+  // We are the leader, so the buddy must follow us.
+  Metachunk* const buddy = c->next_in_vs();
+  DEBUG_ONLY(buddy->verify(true);)
+
+  // Of course buddy cannot be larger than us.
+  assert(buddy->level() >= c->level(), "Sanity");
+
+  // We cannot merge buddy in if it is not free...
+  if (!buddy->is_free()) {
+    return false;
+  }
+
+  // ... nor if it is splintered.
+  if (buddy->level() != c->level()) {
+    return false;
+  }
+
+  // Okay, lets enlarge c.
+
+  log_trace(metaspace)("Enlarging chunk " METACHUNK_FULL_FORMAT " by merging in follower " METACHUNK_FULL_FORMAT ".",
+                       METACHUNK_FULL_FORMAT_ARGS(c), METACHUNK_FULL_FORMAT_ARGS(buddy));
+
+  // the enlarged c is as far committed as possible:
+  size_t merged_committed_words = c->committed_words();
+  if (merged_committed_words == c->word_size()) {
+    merged_committed_words += buddy->committed_words();
+  }
+
+  // Remove buddy from vs list...
+  Metachunk* successor = buddy->next_in_vs();
+  if (successor != NULL) {
+    successor->set_prev_in_vs(c);
+  }
+  c->set_next_in_vs(successor);
+
+  // .. and from freelist ...
+  freelists->remove(buddy);
+
+  // .. and return its empty husk to the pool...
+  ChunkHeaderPool::pool().return_chunk_header(buddy);
+
+  // Then decrease level of c.
+  c->dec_level();
+
+  // and correct committed words if needed.
+  c->set_committed_words(merged_committed_words);
+
+  log_debug(metaspace)("Enlarged chunk " METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c));
+//  log_debug(metaspace)("Enlarged chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
+
+  DEBUG_ONLY(verify(true));
+
+  return true;
+
+}
+
+
+#ifdef ASSERT
+
+#define assrt_(cond, ...) \
+  if (!(cond)) { \
+    fdStream errst(2); \
+    this->print_on(&errst); \
+    vmassert(cond, __VA_ARGS__); \
+  }
+
+void RootChunkArea::verify(bool slow) const {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  assert_is_aligned(_base, chklvl::MAX_CHUNK_BYTE_SIZE);
+
+  // Iterate thru all chunks in this area. They must be ordered correctly,
+  // being adjacent to each other, and cover the complete area
+  int num_chunk = 0;
+
+  if (_first_chunk != NULL) {
+
+    assrt_(_first_chunk->prev_in_vs() == NULL, "Sanity");
+
+    const Metachunk* c = _first_chunk;
+    const MetaWord* expected_next_base = _base;
+    const MetaWord* const area_end = _base + word_size();
+
+    while (c != NULL) {
+
+      assrt_(c->base() == expected_next_base,
+             "Chunk No. %d " METACHUNK_FORMAT " - unexpected base.",
+             num_chunk, METACHUNK_FORMAT_ARGS(c));
+
+      assrt_(c->base() >= base() && c->end() <= end(),
+             "chunk %d " METACHUNK_FORMAT " oob for this root area [" PTR_FORMAT ".." PTR_FORMAT ").",
+             num_chunk, METACHUNK_FORMAT_ARGS(c), p2i(base()), p2i(end()));
+
+      assrt_(is_aligned(c->base(), c->word_size()),
+             "misaligned chunk %d " METACHUNK_FORMAT ".", num_chunk, METACHUNK_FORMAT_ARGS(c));
+
+      c->verify_neighborhood();
+      c->verify(slow);
+
+      expected_next_base = c->end();
+      num_chunk ++;
+
+      c = c->next_in_vs();
+
+    }
+    assrt_(expected_next_base == _base + word_size(), "Sanity");
+  }
+
+}
+
+void RootChunkArea::verify_area_is_ideally_merged() const {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  int num_chunk = 0;
+  for (const Metachunk* c = _first_chunk; c != NULL; c = c->next_in_vs()) {
+    if (!c->is_root_chunk() && c->is_free()) {
+      // If a chunk is free, it must not have a buddy which is also free, because
+      // those chunks should have been merged.
+      // In other words, a buddy shall be either in-use or splintered
+      // (which in turn would mean part of it are in use).
+      Metachunk* const buddy = c->is_leader() ? c->next_in_vs() : c->prev_in_vs();
+      assrt_(buddy->is_in_use() || buddy->level() > c->level(),
+             "Chunk No. %d " METACHUNK_FORMAT " : missed merge opportunity with neighbor " METACHUNK_FORMAT ".",
+             num_chunk, METACHUNK_FORMAT_ARGS(c), METACHUNK_FORMAT_ARGS(buddy));
+    }
+    num_chunk ++;
+  }
+}
+
+#endif
+
+void RootChunkArea::print_on(outputStream* st) const {
+
+  st->print(PTR_FORMAT ": ", p2i(base()));
+  if (_first_chunk != NULL) {
+    const Metachunk* c = _first_chunk;
+    //                                    01234567890123
+    const char* letters_for_levels_cap = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+    const char* letters_for_levels =     "abcdefghijklmnopqrstuvwxyz";
+    while (c != NULL) {
+      const chklvl_t l = c->level();
+      if (l >= 0 && (size_t)l < strlen(letters_for_levels)) {
+//        c->print_on(st); st->cr();
+        st->print("%c", c->is_free() ? letters_for_levels[c->level()] : letters_for_levels_cap[c->level()]);
+      } else {
+        // Obviously garbage, but lets not crash.
+        st->print("?");
+      }
+      c = c->next_in_vs();
+    }
+  } else {
+    st->print(" (no chunks)");
+  }
+  st->cr();
+
+}
+
+
+// Create an array of ChunkTree objects, all initialized to NULL, covering
+// a given memory range. Memory range must be a multiple of root chunk size.
+RootChunkAreaLUT::RootChunkAreaLUT(const MetaWord* base, size_t word_size)
+  : _base(base),
+    _num((int)(word_size / chklvl::MAX_CHUNK_WORD_SIZE)),
+    _arr(NULL)
+{
+  assert_is_aligned(word_size, chklvl::MAX_CHUNK_WORD_SIZE);
+  _arr = NEW_C_HEAP_ARRAY(RootChunkArea, _num, mtClass);
+  const MetaWord* this_base = _base;
+  for (int i = 0; i < _num; i ++) {
+    RootChunkArea* rca = new(_arr + i) RootChunkArea(this_base);
+    assert(rca == _arr + i, "Sanity");
+    this_base += chklvl::MAX_CHUNK_WORD_SIZE;
+  }
+}
+
+RootChunkAreaLUT::~RootChunkAreaLUT() {
+  for (int i = 0; i < _num; i ++) {
+    _arr[i].~RootChunkArea();
+  }
+  FREE_C_HEAP_ARRAY(RootChunkArea, _arr);
+}
+
+#ifdef ASSERT
+
+void RootChunkAreaLUT::verify(bool slow) const {
+  for (int i = 0; i < _num; i ++) {
+    check_pointer(_arr[i].base());
+    _arr[i].verify(slow);
+  }
+}
+
+#endif
+
+void RootChunkAreaLUT::print_on(outputStream* st) const {
+  for (int i = 0; i < _num; i ++) {
+    st->print("%2d:", i);
+    _arr[i].print_on(st);
+  }
+}
+
+
+} // end: namespace metaspace
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/rootChunkArea.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_ROOTCHUNKAREA_HPP
+#define SHARE_MEMORY_METASPACE_ROOTCHUNKAREA_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class outputStream;
+
+namespace metaspace {
+
+class Metachunk;
+class MetachunkClosure;
+class MetachunkListCluster;
+class VirtualSpaceNode;
+
+
+// RootChunkArea describes the chunk composition of a root-chunk-sized areal.
+//
+
+class RootChunkArea {
+
+  // The base address of this area.
+  const MetaWord* const _base;
+
+  // The first chunk in this area; if this area is maximally
+  // folded, this is the root chunk covering the whole area size.
+  Metachunk* _first_chunk;
+
+public:
+
+  RootChunkArea(const MetaWord* base);
+  ~RootChunkArea();
+
+  // Initialize: allocate a root node and a root chunk header; return the
+  // root chunk header. It will be partly initialized.
+  // Note: this just allocates a memory-less header; memory itself is allocated inside VirtualSpaceNode.
+  Metachunk* alloc_root_chunk_header(VirtualSpaceNode* node);
+
+  // Given a chunk c, split it recursively until you get a chunk of the given target_level.
+  //
+  // The original chunk must not be part of a freelist.
+  //
+  // Returns pointer to the result chunk; the splitted-off chunks are added as
+  //  free chunks to the freelists.
+  //
+  // Returns NULL if chunk cannot be split at least once.
+  Metachunk* split(chklvl_t target_level, Metachunk* c, MetachunkListCluster* freelists);
+
+  // Given a chunk, attempt to merge it recursively with its neighboring chunks.
+  //
+  // If successful (merged at least once), returns address of
+  // the merged chunk; NULL otherwise.
+  //
+  // The merged chunks are removed from the freelists.
+  //
+  // !!! Please note that if this method returns a non-NULL value, the
+  // original chunk will be invalid and should not be accessed anymore! !!!
+  Metachunk* merge(Metachunk* c, MetachunkListCluster* freelists);
+
+  // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
+  // enlarge it in place by claiming its trailing buddy.
+  //
+  // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
+  //
+  // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
+  // double in size (level decreased by one).
+  //
+  // On success, true is returned, false otherwise.
+  bool attempt_enlarge_chunk(Metachunk* c, MetachunkListCluster* freelists);
+
+  // Returns true if all chunks in this area are free; false if not.
+  bool all_chunks_are_free() const;
+
+  /// range ///
+
+  const MetaWord* base() const  { return _base; }
+  size_t word_size() const      { return chklvl::MAX_CHUNK_WORD_SIZE; }
+  const MetaWord* end() const   { return _base + word_size(); }
+
+  // Direct access to the first chunk (use with care)
+  Metachunk* first_chunk()              { return _first_chunk; }
+  const Metachunk* first_chunk() const  { return _first_chunk; }
+
+  //// Debug stuff ////
+
+#ifdef ASSERT
+  void check_pointer(const MetaWord* p) const {
+    assert(p >= _base && p < _base + word_size(),
+           "pointer " PTR_FORMAT " oob for this root area [" PTR_FORMAT ".." PTR_FORMAT ")",
+           p2i(p), p2i(_base), p2i(_base + word_size()));
+  }
+  void verify(bool slow) const;
+
+  // This is a separate operation from verify(). We should be able to call verify()
+  // from almost anywhere, regardless of state, but verify_area_is_ideally_merged()
+  // can only be called outside split and merge ops.
+  void verify_area_is_ideally_merged() const;
+#endif // ASSERT
+
+  void print_on(outputStream* st) const;
+
+};
+
+
+///////////////////////
+// A lookup table for RootChunkAreas: given an address into a VirtualSpaceNode,
+// it gives the RootChunkArea containing this address.
+// To reduce pointer chasing, the LUT entries (of type RootChunkArea) are
+// following this object.
+class RootChunkAreaLUT {
+
+  // Base address of the whole area.
+  const MetaWord* const _base;
+
+  // Number of root chunk areas.
+  const int _num;
+
+  // Array of RootChunkArea objects.
+  RootChunkArea* _arr;
+
+#ifdef ASSERT
+  void check_pointer(const MetaWord* p) const {
+    assert(p >= base() && p < base() + word_size(), "Invalid pointer");
+  }
+#endif
+
+  // Given an address into this range, return the index into the area array for the
+  // area this address falls into.
+  int index_by_address(const MetaWord* p) const {
+    DEBUG_ONLY(check_pointer(p);)
+    int idx = (int)((p - base()) / chklvl::MAX_CHUNK_WORD_SIZE);
+    assert(idx >= 0 && idx < _num, "Sanity");
+    return idx;
+  }
+
+public:
+
+  RootChunkAreaLUT(const MetaWord* base, size_t word_size);
+  ~RootChunkAreaLUT();
+
+  // Given a memory address into the range this array covers, return the
+  // corresponding area object. If none existed at this position, create it
+  // on demand.
+  RootChunkArea* get_area_by_address(const MetaWord* p) const {
+    const int idx = index_by_address(p);
+    RootChunkArea* ra = _arr + idx;
+    DEBUG_ONLY(ra->check_pointer(p);)
+    return _arr + idx;
+  }
+
+  // Access area by its index
+  int number_of_areas() const                               { return _num; }
+  RootChunkArea* get_area_by_index(int index)               { assert(index >= 0 && index < _num, "oob"); return _arr + index; }
+  const RootChunkArea* get_area_by_index(int index) const   { assert(index >= 0 && index < _num, "oob"); return _arr + index; }
+
+  /// range ///
+
+  const MetaWord* base() const  { return _base; }
+  size_t word_size() const      { return _num * chklvl::MAX_CHUNK_WORD_SIZE; }
+  const MetaWord* end() const   { return _base + word_size(); }
+
+  DEBUG_ONLY(void verify(bool slow) const;)
+
+  void print_on(outputStream* st) const;
+
+};
+
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_ROOTCHUNKAREA_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/runningCounters.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/metaspace/counter.hpp"
+#include "memory/metaspace/runningCounters.hpp"
+#include "memory/metaspace/virtualSpaceList.hpp"
+#include "memory/metaspace/chunkManager.hpp"
+
+namespace metaspace {
+
+SizeAtomicCounter RunningCounters::_used_class_counter("used-words-in-class-space");
+SizeAtomicCounter RunningCounters::_used_nonclass_counter("used-words-in-nonclass-space");
+
+// Return reserved size, in words, for Metaspace
+size_t RunningCounters::reserved_words() {
+  return reserved_words_class() + reserved_words_nonclass();
+}
+
+size_t RunningCounters::reserved_words_class() {
+  VirtualSpaceList* vs = VirtualSpaceList::vslist_class();
+  return vs != NULL ? vs->reserved_words() : 0;
+}
+
+size_t RunningCounters::reserved_words_nonclass() {
+  return VirtualSpaceList::vslist_nonclass()->reserved_words();
+}
+
+// Return total committed size, in words, for Metaspace
+size_t RunningCounters::committed_words() {
+  return committed_words_class() + committed_words_nonclass();
+}
+
+size_t RunningCounters::committed_words_class() {
+  VirtualSpaceList* vs = VirtualSpaceList::vslist_class();
+  return vs != NULL ? vs->committed_words() : 0;
+}
+
+size_t RunningCounters::committed_words_nonclass() {
+  return VirtualSpaceList::vslist_nonclass()->committed_words();
+}
+
+
+// ---- used chunks -----
+
+// Returns size, in words, used for metadata.
+size_t RunningCounters::used_words() {
+  return used_words_class() + used_words_nonclass();
+}
+
+size_t RunningCounters::used_words_class() {
+  return _used_class_counter.get();
+}
+
+size_t RunningCounters::used_words_nonclass() {
+  return _used_nonclass_counter.get();
+}
+
+// ---- free chunks -----
+
+// Returns size, in words, of all chunks in all freelists.
+size_t RunningCounters::free_chunks_words() {
+  return free_chunks_words_class() + free_chunks_words_nonclass();
+}
+
+size_t RunningCounters::free_chunks_words_class() {
+  ChunkManager* cm = ChunkManager::chunkmanager_class();
+  return cm != NULL ? cm->total_word_size() : 0;
+}
+
+size_t RunningCounters::free_chunks_words_nonclass() {
+  return ChunkManager::chunkmanager_nonclass()->total_word_size();
+}
+
+} // namespace metaspace
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/runningCounters.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_RUNNINGCOUNTERS_HPP
+#define SHARE_MEMORY_METASPACE_RUNNINGCOUNTERS_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/metaspace/counter.hpp"
+
+namespace metaspace {
+
+class ClassLoaderMetaspace;
+
+// These are running counters for some basic Metaspace statistics.
+// Their value can be obtained quickly without locking.
+
+class RunningCounters : public AllStatic {
+
+  friend class ClassLoaderMetaspace;
+
+  // ---- in-use chunks ----
+
+  // Used space, in words.
+  // (Note that the used counter is on the hot path of Metaspace allocation.
+  //  Do we really need it? We may get by with capacity only and get more details
+  //  with get_statistics_slow().)
+  static SizeAtomicCounter _used_class_counter;
+  static SizeAtomicCounter _used_nonclass_counter;
+
+public:
+
+  // ---- virtual memory -----
+
+  // Return reserved size, in words, for Metaspace
+  static size_t reserved_words();
+  static size_t reserved_words_class();
+  static size_t reserved_words_nonclass();
+
+  // Return total committed size, in words, for Metaspace
+  static size_t committed_words();
+  static size_t committed_words_class();
+  static size_t committed_words_nonclass();
+
+
+  // ---- used chunks -----
+
+  // Returns size, in words, used for metadata.
+  static size_t used_words();
+  static size_t used_words_class();
+  static size_t used_words_nonclass();
+
+  // ---- free chunks -----
+
+  // Returns size, in words, of all chunks in all freelists.
+  static size_t free_chunks_words();
+  static size_t free_chunks_words_class();
+  static size_t free_chunks_words_nonclass();
+
+  // Direct access to the counters.
+  static SizeAtomicCounter* used_nonclass_counter()     { return &_used_nonclass_counter; }
+  static SizeAtomicCounter* used_class_counter()        { return &_used_class_counter; }
+
+};
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_RUNNINGCOUNTERS_HPP
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/settings.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#include <memory/metaspace/settings.hpp>
+#include "precompiled.hpp"
+
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+
+#include "memory/metaspace/chunkLevel.hpp"
+#include "memory/metaspace/settings.hpp"
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/debug.hpp"
+
+namespace metaspace {
+
+size_t Settings::_commit_granule_bytes = 0;
+size_t Settings::_commit_granule_words = 0;
+bool Settings::_newborn_root_chunks_are_fully_committed = false;
+size_t Settings::_committed_words_on_fresh_chunks = 0;
+
+bool Settings::_enlarge_chunks_in_place = false;
+size_t Settings::_enlarge_chunks_in_place_max_word_size = 0;
+
+bool Settings::_uncommit_on_return = false;
+size_t Settings::_uncommit_on_return_min_word_size = 0;
+
+bool Settings::_delete_nodes_on_purge = false;
+bool Settings::_uncommit_on_purge = false;
+size_t Settings::_uncommit_on_purge_min_word_size = 0;
+
+
+
+void Settings::ergo_initialize() {
+
+  if (strcmp(MetaspaceReclaimStrategy, "none") == 0) {
+
+    log_info(metaspace)("Initialized with strategy: no reclaim.");
+
+    _commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 64 * K);
+    _commit_granule_words = _commit_granule_bytes / BytesPerWord;
+
+    _newborn_root_chunks_are_fully_committed = true;
+
+    _committed_words_on_fresh_chunks = chklvl::MAX_CHUNK_WORD_SIZE;
+
+    _uncommit_on_return = false;
+    _uncommit_on_return_min_word_size = 3; // does not matter; should not be used resp. assert when used.
+
+    _delete_nodes_on_purge = false;
+    _uncommit_on_purge = false;
+    _uncommit_on_purge_min_word_size = 3; // does not matter; should not be used resp. assert when used.
+
+  } else if (strcmp(MetaspaceReclaimStrategy, "aggressive") == 0) {
+
+    log_info(metaspace)("Initialized with strategy: aggressive reclaim.");
+
+    // Set the granule size rather small; may increase
+    // mapping fragmentation but also increase chance to uncommit.
+    _commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 16 * K);
+    _commit_granule_words = _commit_granule_bytes / BytesPerWord;
+
+    _newborn_root_chunks_are_fully_committed = false;
+
+    // When handing out fresh chunks, only commit the minimum sensible amount (0 would be possible
+    // but not make sense since the chunk is immediately used for allocation after being handed out, so the
+    // first granule would be committed right away anyway).
+    _committed_words_on_fresh_chunks = _commit_granule_words;
+
+    _uncommit_on_return = true;
+    _uncommit_on_return_min_word_size = _commit_granule_words;
+
+    _delete_nodes_on_purge = true;
+    _uncommit_on_purge = true;
+    _uncommit_on_purge_min_word_size = _commit_granule_words; // does not matter; should not be used resp. assert when used.
+
+  } else if (strcmp(MetaspaceReclaimStrategy, "balanced") == 0) {
+
+    log_info(metaspace)("Initialized with strategy: balanced reclaim.");
+
+    _commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 64 * K);
+    _commit_granule_words = _commit_granule_bytes / BytesPerWord;
+
+    _newborn_root_chunks_are_fully_committed = false;
+
+    // When handing out fresh chunks, only commit the minimum sensible amount (0 would be possible
+    // but not make sense since the chunk is immediately used for allocation after being handed out, so the
+    // first granule would be committed right away anyway).
+    _committed_words_on_fresh_chunks = _commit_granule_words;
+
+    _uncommit_on_return = true;
+    _uncommit_on_return_min_word_size = _commit_granule_words;
+
+    _delete_nodes_on_purge = true;
+    _uncommit_on_purge = true;
+    _uncommit_on_purge_min_word_size = _commit_granule_words;
+
+  } else {
+
+    vm_exit_during_initialization("Invalid value for MetaspaceReclaimStrategy: \"%s\".", MetaspaceReclaimStrategy);
+
+  }
+
+  // Since this has nothing to do with reclaiming, set it independently from the
+  // strategy. This is rather arbitrarily choosen.
+  _enlarge_chunks_in_place = MetaspaceEnlargeChunksInPlace;
+  _enlarge_chunks_in_place_max_word_size = 256 * K;
+
+  // Sanity checks.
+  guarantee(commit_granule_words() <= chklvl::MAX_CHUNK_WORD_SIZE, "Too large granule size");
+  guarantee(is_power_of_2(commit_granule_words()), "granule size must be a power of 2");
+
+  LogStream ls(Log(metaspace)::info());
+  Settings::print_on(&ls);
+
+}
+
+void Settings::print_on(outputStream* st) {
+
+  st->print_cr(" - commit_granule_bytes: " SIZE_FORMAT ".", commit_granule_bytes());
+  st->print_cr(" - commit_granule_words: " SIZE_FORMAT ".", commit_granule_words());
+
+  st->print_cr(" - newborn_root_chunks_are_fully_committed: %d.", (int)newborn_root_chunks_are_fully_committed());
+  st->print_cr(" - committed_words_on_fresh_chunks: " SIZE_FORMAT ".", committed_words_on_fresh_chunks());
+
+  st->print_cr(" - virtual_space_node_default_size: " SIZE_FORMAT ".", virtual_space_node_default_word_size());
+  st->print_cr(" - allocation_from_dictionary_limit: " SIZE_FORMAT ".", allocation_from_dictionary_limit());
+
+  st->print_cr(" - enlarge_chunks_in_place: %d.", (int)enlarge_chunks_in_place());
+  st->print_cr(" - enlarge_chunks_in_place_max_word_size: " SIZE_FORMAT ".", enlarge_chunks_in_place_max_word_size());
+
+  st->print_cr(" - uncommit_on_return: %d.", (int)uncommit_on_return());
+  st->print_cr(" - uncommit_on_return_min_word_size: " SIZE_FORMAT ".", uncommit_on_return_min_word_size());
+
+  st->print_cr(" - delete_nodes_on_purge: %d.", (int)delete_nodes_on_purge());
+
+  st->print_cr(" - uncommit_on_purge: %d.", (int)uncommit_on_purge());
+  st->print_cr(" - uncommit_on_purge_min_word_size: " SIZE_FORMAT ".", uncommit_on_purge_min_word_size());
+
+
+}
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/settings.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_CONSTANTS_HPP
+#define SHARE_MEMORY_METASPACE_CONSTANTS_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+namespace metaspace {
+
+class Settings : public AllStatic {
+
+  // Granularity, in bytes, metaspace is committed with.
+  static size_t _commit_granule_bytes;
+
+  // Granularity, in words, metaspace is committed with.
+  static size_t _commit_granule_words;
+
+  // Whether or not commit new-born root chunks thru after creation.
+  static bool _newborn_root_chunks_are_fully_committed;
+
+  // When a chunk is handed out by the ChunkManager to a class loader, how much
+  // of a chunk should be committed up-front?
+  // Size in words. Will be rounded up to the nearest multiple of commit_granule_words.
+  // (Note: 0 is possible but inefficient, since it will cause the ClassLoaderMetaspace
+  //        to commit the first granule right away anyway, so nothing is saved.
+  //        chklvl::MAX_CHUNK_WORD_SIZE pretty much means every chunk is committed thru
+  //        from the start.
+  static size_t _committed_words_on_fresh_chunks;
+
+  // The default size of a non-class VirtualSpaceNode (unless created differently).
+  // Must be a multiple of the root chunk size.
+  static const size_t _virtual_space_node_default_word_size = chklvl::MAX_CHUNK_WORD_SIZE * 2; // lets go with 8mb virt size. Seems a good compromise betw. virt and mapping fragmentation.
+
+  static const size_t _allocation_from_dictionary_limit = 4 * K;
+
+  // When allocating from a chunk, if the remaining area in the chunk is too small to hold
+  // the requested size, we attempt to double the chunk size in place...
+  static bool   _enlarge_chunks_in_place;
+
+  // .. but we do only do this for chunks below a given size to prevent unnecessary memory blowup.
+  static size_t _enlarge_chunks_in_place_max_word_size;
+
+  // If true, chunks are uncommitted after gc (when metaspace is purged).
+  static bool _uncommit_on_return;
+
+  // If true, vsnodes which only contain free chunks will be deleted (purged) as part of a gc.
+  static bool _delete_nodes_on_purge;
+
+  // If _uncommit_on_return is true:
+  // Minimum word size a chunk has to have after returning and merging with neighboring free chunks
+  // to be candidate for uncommitting. Must be a multiple of and not smaller than commit granularity.
+  static size_t _uncommit_on_return_min_word_size;
+
+  // If true, chunks are uncommitted after gc (when metaspace is purged).
+  static bool _uncommit_on_purge;
+
+  // If _uncommit_on_purge is true:
+  // Minimum word size of an area to be candidate for uncommitting.
+  // Must be a multiple of and not smaller than commit granularity.
+  static size_t _uncommit_on_purge_min_word_size;
+
+public:
+
+  static size_t commit_granule_bytes()                        { return _commit_granule_bytes; }
+  static size_t commit_granule_words()                        { return _commit_granule_words; }
+  static bool newborn_root_chunks_are_fully_committed()       { return _newborn_root_chunks_are_fully_committed; }
+  static size_t committed_words_on_fresh_chunks()             { return _committed_words_on_fresh_chunks; }
+  static size_t virtual_space_node_default_word_size()        { return _virtual_space_node_default_word_size; }
+  static size_t allocation_from_dictionary_limit()            { return _allocation_from_dictionary_limit; }
+  static bool enlarge_chunks_in_place()                       { return _enlarge_chunks_in_place; }
+  static size_t enlarge_chunks_in_place_max_word_size()       { return _enlarge_chunks_in_place_max_word_size; }
+  static bool uncommit_on_return()                            { return _uncommit_on_return; }
+  static size_t uncommit_on_return_min_word_size()            { return _uncommit_on_return_min_word_size; }
+  static bool delete_nodes_on_purge()                         { return _delete_nodes_on_purge; }
+  static bool uncommit_on_purge()                             { return _uncommit_on_purge; }
+  static size_t uncommit_on_purge_min_word_size()             { return _uncommit_on_purge_min_word_size; }
+
+  static void ergo_initialize();
+
+  static void print_on(outputStream* st);
+
+};
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_BLOCKFREELIST_HPP
--- a/src/hotspot/share/memory/metaspace/smallBlocks.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/smallBlocks.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -31,8 +31,8 @@
 
 void SmallBlocks::print_on(outputStream* st) const {
   st->print_cr("SmallBlocks:");
-  for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
-    uint k = i - _small_block_min_size;
+  for (uint i = _small_block_min_word_size; i < _small_block_max_word_size; i++) {
+    uint k = i - _small_block_min_word_size;
     st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
   }
 }
@@ -41,8 +41,8 @@
 // Returns the total size, in words, of all blocks, across all block sizes.
 size_t SmallBlocks::total_size() const {
   size_t result = 0;
-  for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
-    uint k = i - _small_block_min_size;
+  for (uint i = _small_block_min_word_size; i < _small_block_max_word_size; i++) {
+    uint k = i - _small_block_min_word_size;
     result = result + _small_lists[k].count() * _small_lists[k].size();
   }
   return result;
@@ -51,8 +51,8 @@
 // Returns the total number of all blocks across all block sizes.
 uintx SmallBlocks::total_num_blocks() const {
   uintx result = 0;
-  for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
-    uint k = i - _small_block_min_size;
+  for (uint i = _small_block_min_word_size; i < _small_block_max_word_size; i++) {
+    uint k = i - _small_block_min_word_size;
     result = result + _small_lists[k].count();
   }
   return result;
--- a/src/hotspot/share/memory/metaspace/smallBlocks.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/smallBlocks.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -28,6 +28,7 @@
 #include "memory/allocation.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/metaspace/metablock.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class outputStream;
@@ -36,22 +37,27 @@
 
 class SmallBlocks : public CHeapObj<mtClass> {
 
-  const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
+  const static uint _small_block_max_byte_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >);
+  const static uint _small_block_max_word_size = _small_block_max_byte_size / BytesPerWord;
+  STATIC_ASSERT(_small_block_max_word_size * BytesPerWord == _small_block_max_byte_size);
+
   // Note: this corresponds to the imposed miminum allocation size, see SpaceManager::get_allocation_word_size()
-  const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
+  const static uint _small_block_min_byte_size = sizeof(Metablock);
+  const static uint _small_block_min_word_size = _small_block_min_byte_size / BytesPerWord;
+  STATIC_ASSERT(_small_block_min_word_size * BytesPerWord == _small_block_min_byte_size);
 
 private:
-  FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
+  FreeList<Metablock> _small_lists[_small_block_max_word_size - _small_block_min_word_size];
 
   FreeList<Metablock>& list_at(size_t word_size) {
-    assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
-    return _small_lists[word_size - _small_block_min_size];
+    assert(word_size >= _small_block_min_word_size, "There are no metaspace objects less than %u words", _small_block_min_word_size);
+    return _small_lists[word_size - _small_block_min_word_size];
   }
 
 public:
   SmallBlocks() {
-    for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
-      uint k = i - _small_block_min_size;
+    for (uint i = _small_block_min_word_size; i < _small_block_max_word_size; i++) {
+      uint k = i - _small_block_min_word_size;
       _small_lists[k].set_size(i);
     }
   }
@@ -62,8 +68,10 @@
   // Returns the total number of all blocks across all block sizes.
   uintx total_num_blocks() const;
 
-  static uint small_block_max_size() { return _small_block_max_size; }
-  static uint small_block_min_size() { return _small_block_min_size; }
+  static uint small_block_max_byte_size() { return _small_block_max_byte_size; }
+  static uint small_block_max_word_size() { return _small_block_max_word_size; }
+  static uint small_block_min_byte_size() { return _small_block_min_byte_size; }
+  static uint small_block_min_word_size() { return _small_block_min_word_size; }
 
   MetaWord* get_block(size_t word_size) {
     if (list_at(word_size).count() > 0) {
--- a/src/hotspot/share/memory/metaspace/spaceManager.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/spaceManager.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,501 +27,424 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/metaspace/chunkManager.hpp"
+#include "memory/metaspace/internStat.hpp"
 #include "memory/metaspace/metachunk.hpp"
 #include "memory/metaspace/metaDebug.hpp"
 #include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/metaspaceStatistics.hpp"
+#include "memory/metaspace/smallBlocks.hpp"
 #include "memory/metaspace/spaceManager.hpp"
 #include "memory/metaspace/virtualSpaceList.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/init.hpp"
 #include "services/memoryService.hpp"
+#include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 namespace metaspace {
 
-#define assert_counter(expected_value, real_value, msg) \
-  assert( (expected_value) == (real_value),             \
-         "Counter mismatch (%s): expected " SIZE_FORMAT \
-         ", but got: " SIZE_FORMAT ".", msg, expected_value, \
-         real_value);
-
-// SpaceManager methods
+// Given a net allocation word size, return the raw word size
+// we need to actually allocate in order to:
+// 1) be able to deallocate the allocation - deallocated blocks are stored either in SmallBlocks
+//    (an array of short lists) or, beyond a certain size, in a dictionary tree.
+//    For that to work the allocated block must be at least three words.
+// 2) be aligned to sizeof(void*)
 
-size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
-  size_t chunk_sizes[] = {
-      specialized_chunk_size(is_class_space),
-      small_chunk_size(is_class_space),
-      medium_chunk_size(is_class_space)
-  };
+// Note: externally visible for gtests.
+//static
+size_t get_raw_allocation_word_size(size_t net_word_size) {
 
-  // Adjust up to one of the fixed chunk sizes ...
-  for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
-    if (requested <= chunk_sizes[i]) {
-      return chunk_sizes[i];
-    }
-  }
+  size_t byte_size = net_word_size * BytesPerWord;
+  byte_size = MAX2(byte_size, (size_t)SmallBlocks::small_block_min_byte_size());
+  byte_size = align_up(byte_size, Metachunk::allocation_alignment_bytes);
 
-  // ... or return the size as a humongous chunk.
-  return requested;
-}
+  size_t word_size = byte_size / BytesPerWord;
+  assert(word_size * BytesPerWord == byte_size, "Sanity");
 
-size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
-  return adjust_initial_chunk_size(requested, is_class());
+  return word_size;
+
 }
 
-size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
-  size_t requested;
+static const size_t highest_possible_delta_between_raw_and_net_size = get_raw_allocation_word_size(1) - 1;
+
+// The inverse function to get_raw_allocation_word_size: Given a raw size, return the max net word size
+// fitting into it.
+static size_t get_net_allocation_word_size(size_t raw_word_size) {
 
-  if (is_class()) {
-    switch (type) {
-    case Metaspace::BootMetaspaceType:              requested = Metaspace::first_class_chunk_word_size(); break;
-    case Metaspace::UnsafeAnonymousMetaspaceType:   requested = ClassSpecializedChunk; break;
-    case Metaspace::ReflectionMetaspaceType:        requested = ClassSpecializedChunk; break;
-    default:                                        requested = ClassSmallChunk; break;
-    }
-  } else {
-    switch (type) {
-    case Metaspace::BootMetaspaceType:              requested = Metaspace::first_chunk_word_size(); break;
-    case Metaspace::UnsafeAnonymousMetaspaceType:   requested = SpecializedChunk; break;
-    case Metaspace::ReflectionMetaspaceType:        requested = SpecializedChunk; break;
-    default:                                        requested = SmallChunk; break;
-    }
+  size_t byte_size = raw_word_size * BytesPerWord;
+  byte_size = align_down(byte_size, Metachunk::allocation_alignment_bytes);
+  if (byte_size < SmallBlocks::small_block_min_byte_size()) {
+    return 0;
   }
-
-  // Adjust to one of the fixed chunk sizes (unless humongous)
-  const size_t adjusted = adjust_initial_chunk_size(requested);
+  return byte_size / BytesPerWord;
 
-  assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
-         SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
-
-  return adjusted;
 }
 
-void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
-
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
-        num_chunks_by_type(i), chunk_size_name(i));
-  }
+// Given a requested word size, will allocate a chunk large enough to at least fit that
+// size, but may be larger according to internal heuristics.
+//
+// On success, it will replace the current chunk with the newly allocated one, which will
+// become the current chunk. The old current chunk should be retired beforehand.
+//
+// May fail if we could not allocate a new chunk. In that case the current chunk remains
+// unchanged and false is returned.
+bool SpaceManager::allocate_new_current_chunk(size_t requested_word_size) {
 
-  chunk_manager()->locked_print_free_chunks(st);
-}
-
-size_t SpaceManager::calc_chunk_size(size_t word_size) {
-
-  // Decide between a small chunk and a medium chunk.  Up to
-  // _small_chunk_limit small chunks can be allocated.
-  // After that a medium chunk is preferred.
-  size_t chunk_word_size;
+  assert_lock_strong(lock());
 
-  // Special case for unsafe anonymous metadata space.
-  // UnsafeAnonymous metadata space is usually small since it is used for
-  // class loader data's whose life cycle is governed by one class such as an
-  // unsafe anonymous class.  The majority within 1K - 2K range and
-  // rarely about 4K (64-bits JVM).
-  // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
-  // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
-  // reduces space waste from 60+% to around 30%.
-  if ((_space_type == Metaspace::UnsafeAnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
-      _mdtype == Metaspace::NonClassType &&
-      num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
-      word_size + Metachunk::overhead() <= SpecializedChunk) {
-    return SpecializedChunk;
-  }
+  guarantee(requested_word_size <= chklvl::MAX_CHUNK_WORD_SIZE,
+            "Requested size too large (" SIZE_FORMAT ") - max allowed size per allocation is " SIZE_FORMAT ".",
+            requested_word_size, chklvl::MAX_CHUNK_WORD_SIZE);
 
-  if (num_chunks_by_type(MediumIndex) == 0 &&
-      num_chunks_by_type(SmallIndex) < small_chunk_limit) {
-    chunk_word_size = (size_t) small_chunk_size();
-    if (word_size + Metachunk::overhead() > small_chunk_size()) {
-      chunk_word_size = medium_chunk_size();
-    }
-  } else {
-    chunk_word_size = medium_chunk_size();
+  // If we have a current chunk, it should have been retired (almost empty) beforehand.
+  // See: retire_current_chunk().
+  assert(current_chunk() == NULL || current_chunk()->free_below_committed_words() <= 10, "Must retire chunk beforehand");
+
+  const chklvl_t min_level = chklvl::level_fitting_word_size(requested_word_size);
+  chklvl_t pref_level = _chunk_alloc_sequence->get_next_chunk_level(_chunks.size());
+
+  if (pref_level > min_level) {
+    pref_level = min_level;
   }
 
-  // Might still need a humongous chunk.  Enforce
-  // humongous allocations sizes to be aligned up to
-  // the smallest chunk size.
-  size_t if_humongous_sized_chunk =
-    align_up(word_size + Metachunk::overhead(),
-                  smallest_chunk_size());
-  chunk_word_size =
-    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
-
-  assert(!SpaceManager::is_humongous(word_size) ||
-         chunk_word_size == if_humongous_sized_chunk,
-         "Size calculation is wrong, word_size " SIZE_FORMAT
-         " chunk_word_size " SIZE_FORMAT,
-         word_size, chunk_word_size);
-  Log(gc, metaspace, alloc) log;
-  if (log.is_trace() && SpaceManager::is_humongous(word_size)) {
-    log.trace("Metadata humongous allocation:");
-    log.trace("  word_size " PTR_FORMAT, word_size);
-    log.trace("  chunk_word_size " PTR_FORMAT, chunk_word_size);
-    log.trace("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
-  }
-  return chunk_word_size;
-}
-
-void SpaceManager::track_metaspace_memory_usage() {
-  if (is_init_completed()) {
-    if (is_class()) {
-      MemoryService::track_compressed_class_memory_usage();
-    }
-    MemoryService::track_metaspace_memory_usage();
-  }
-}
-
-MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
-  assert_lock_strong(_lock);
-  assert(vs_list()->current_virtual_space() != NULL,
-         "Should have been set");
-  assert(current_chunk() == NULL ||
-         current_chunk()->allocate(word_size) == NULL,
-         "Don't need to expand");
-  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
-
-  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
-    size_t words_left = 0;
-    size_t words_used = 0;
-    if (current_chunk() != NULL) {
-      words_left = current_chunk()->free_word_size();
-      words_used = current_chunk()->used_word_size();
-    }
-    log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
-                                       word_size, words_used, words_left);
+  Metachunk* c = _chunk_manager->get_chunk(min_level, pref_level);
+  if (c == NULL) {
+    log_debug(metaspace)("SpaceManager %s: failed to allocate new chunk for requested word size " SIZE_FORMAT ".",
+                         _name, requested_word_size);
+    return false;
   }
 
-  // Get another chunk
-  size_t chunk_word_size = calc_chunk_size(word_size);
-  Metachunk* next = get_new_chunk(chunk_word_size);
+  assert(c->is_in_use(), "Wrong chunk state.");
+  assert(c->level() <= min_level && c->level() >= pref_level, "Sanity");
 
-  MetaWord* mem = NULL;
+  _chunks.add(c);
 
-  // If a chunk was available, add it to the in-use chunk list
-  // and do an allocation from it.
-  if (next != NULL) {
-    // Add to this manager's list of chunks in use.
-    // If the new chunk is humongous, it was created to serve a single large allocation. In that
-    // case it usually makes no sense to make it the current chunk, since the next allocation would
-    // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
-    // good chunk which could be used for more normal allocations.
-    bool make_current = true;
-    if (next->get_chunk_type() == HumongousIndex &&
-        current_chunk() != NULL) {
-      make_current = false;
-    }
-    add_chunk(next, make_current);
-    mem = next->allocate(word_size);
-  }
+  log_debug(metaspace)("SpaceManager %s: allocated new chunk " METACHUNK_FORMAT " for requested word size " SIZE_FORMAT ".",
+                       _name, METACHUNK_FORMAT_ARGS(c), requested_word_size);
 
-  // Track metaspace memory usage statistic.
-  track_metaspace_memory_usage();
+  return c;
 
-  return mem;
 }
 
-void SpaceManager::print_on(outputStream* st) const {
-  SpaceManagerStatistics stat;
-  add_to_statistics(&stat); // will lock _lock.
-  stat.print_on(st, 1*K, false);
+void SpaceManager::create_block_freelist() {
+  assert(_block_freelist == NULL, "Only call once");
+  _block_freelist = new BlockFreelist();
 }
 
-SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
-                           Metaspace::MetaspaceType space_type,//
-                           Mutex* lock) :
-  _lock(lock),
-  _mdtype(mdtype),
-  _space_type(space_type),
-  _chunk_list(NULL),
-  _current_chunk(NULL),
-  _overhead_words(0),
-  _capacity_words(0),
-  _used_words(0),
-  _block_freelists(NULL) {
-  Metadebug::init_allocation_fail_alot_count();
-  memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
-  log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
+void SpaceManager::add_allocation_to_block_freelist(MetaWord* p, size_t word_size) {
+  if (_block_freelist == NULL) {
+    _block_freelist = new BlockFreelist(); // Create only on demand
+  }
+  _block_freelist->return_block(p, word_size);
 }
 
-void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
-
-  assert_lock_strong(MetaspaceExpand_lock);
-
-  _capacity_words += new_chunk->word_size();
-  _overhead_words += Metachunk::overhead();
-  DEBUG_ONLY(new_chunk->verify());
-  _num_chunks_by_type[new_chunk->get_chunk_type()] ++;
-
-  // Adjust global counters:
-  MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
-  MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
-}
-
-void SpaceManager::account_for_allocation(size_t words) {
-  // Note: we should be locked with the ClassloaderData-specific metaspace lock.
-  // We may or may not be locked with the global metaspace expansion lock.
-  assert_lock_strong(lock());
-
-  // Add to the per SpaceManager totals. This can be done non-atomically.
-  _used_words += words;
-
-  // Adjust global counters. This will be done atomically.
-  MetaspaceUtils::inc_used(mdtype(), words);
-}
-
-void SpaceManager::account_for_spacemanager_death() {
-
-  assert_lock_strong(MetaspaceExpand_lock);
-
-  MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
-  MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
-  MetaspaceUtils::dec_used(mdtype(), _used_words);
+SpaceManager::SpaceManager(ChunkManager* chunk_manager,
+             const ChunkAllocSequence* alloc_sequence,
+             Mutex* lock,
+             SizeAtomicCounter* total_used_words_counter,
+             const char* name)
+: _lock(lock),
+  _chunk_manager(chunk_manager),
+  _chunk_alloc_sequence(alloc_sequence),
+  _chunks(),
+  _block_freelist(NULL),
+  _total_used_words_counter(total_used_words_counter),
+  _name(name)
+{
 }
 
 SpaceManager::~SpaceManager() {
 
-  // This call this->_lock which can't be done while holding MetaspaceExpand_lock
-  DEBUG_ONLY(verify_metrics());
+  MutexLocker fcl(lock(), Mutex::_no_safepoint_check_flag);
+  Metachunk* c = _chunks.first();
+  Metachunk* c2 = NULL;
+  while(c) {
+    // c may become invalid. Take care while iterating.
+    c2 = c->next();
+    _total_used_words_counter->decrement_by(c->used_words());
+    _chunks.remove(c);
+    _chunk_manager->return_chunk(c);
+    c = c2;
+  }
+
+  DEBUG_ONLY(chunk_manager()->verify(true);)
+
+  delete _block_freelist;
+
+}
 
-  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+// The remaining committed free space in the current chunk is chopped up and stored in the block
+// free list for later use. As a result, the current chunk will remain current but completely
+// used up. This is a preparation for calling allocate_new_current_chunk().
+void SpaceManager::retire_current_chunk() {
+  assert_lock_strong(lock());
+
+  Metachunk* c = current_chunk();
+  assert(c != NULL, "Sanity");
 
-  account_for_spacemanager_death();
+  log_debug(metaspace)("SpaceManager %s: retiring chunk " METACHUNK_FULL_FORMAT ".",
+                       _name, METACHUNK_FULL_FORMAT_ARGS(c));
+
+  // Side note:
+  // In theory it could happen that we are asked to retire a completely empty chunk. This may be the
+  // result of rolled back allocations (see deallocate in place) and a lot of luck.
+  // But since these cases should be exceedingly rare, we do not handle them special in order to keep
+  // the code simple.
 
-  Log(gc, metaspace, freelist) log;
-  if (log.is_trace()) {
-    log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
-    ResourceMark rm;
-    LogStream ls(log.trace());
-    locked_print_chunks_in_use_on(&ls);
-    if (block_freelists() != NULL) {
-      block_freelists()->print_on(&ls);
+  size_t raw_remaining_words = c->free_below_committed_words();
+  size_t net_remaining_words = get_net_allocation_word_size(raw_remaining_words);
+  if (net_remaining_words > 0) {
+    bool did_hit_limit = false;
+    MetaWord* ptr = c->allocate(net_remaining_words, &did_hit_limit);
+    assert(ptr != NULL && did_hit_limit == false, "Should have worked");
+    add_allocation_to_block_freelist(ptr, net_remaining_words);
+    _total_used_words_counter->increment_by(net_remaining_words);
+  }
+
+  // After this operation: the current chunk should have (almost) no free committed space left.
+  assert(current_chunk()->free_below_committed_words() <= highest_possible_delta_between_raw_and_net_size,
+         "Chunk retiring did not work (current chunk " METACHUNK_FULL_FORMAT ").",
+         METACHUNK_FULL_FORMAT_ARGS(current_chunk()));
+
+  DEBUG_ONLY(verify_locked();)
+
+}
+
+// Allocate memory from Metaspace.
+// 1) Attempt to allocate from the dictionary of deallocated blocks.
+// 2) Attempt to allocate from the current chunk.
+// 3) Attempt to enlarge the current chunk in place if it is too small.
+// 4) Attempt to get a new chunk and allocate from that chunk.
+// At any point, if we hit a commit limit, we return NULL.
+MetaWord* SpaceManager::allocate(size_t requested_word_size) {
+
+  MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
+
+  const size_t raw_word_size = get_raw_allocation_word_size(requested_word_size);
+
+  log_trace(metaspace)("SpaceManager %s: requested " SIZE_FORMAT " words, "
+                       "raw word size: " SIZE_FORMAT ".",
+                       _name, requested_word_size, raw_word_size);
+
+  MetaWord* p = NULL;
+
+  bool did_hit_limit = false;
+
+  // Allocate first chunk if needed.
+  if (current_chunk() == NULL) {
+    if (allocate_new_current_chunk(raw_word_size) == false) {
+      did_hit_limit = true;
+    } else {
+      assert(current_chunk() != NULL && current_chunk()->free_words() >= raw_word_size, "Sanity");
     }
   }
 
-  // Add all the chunks in use by this space manager
-  // to the global list of free chunks.
-
-  // Follow each list of chunks-in-use and add them to the
-  // free lists.  Each list is NULL terminated.
-  chunk_manager()->return_chunk_list(chunk_list());
-#ifdef ASSERT
-  _chunk_list = NULL;
-  _current_chunk = NULL;
-#endif
-
-#ifdef ASSERT
-  EVERY_NTH(VerifyMetaspaceInterval)
-    chunk_manager()->locked_verify(true);
-  END_EVERY_NTH
-#endif
-
-  if (_block_freelists != NULL) {
-    delete _block_freelists;
-  }
-}
-
-void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
-  assert_lock_strong(lock());
-  // Allocations and deallocations are in raw_word_size
-  size_t raw_word_size = get_allocation_word_size(word_size);
-  // Lazily create a block_freelist
-  if (block_freelists() == NULL) {
-    _block_freelists = new BlockFreelist();
-  }
-  block_freelists()->return_block(p, raw_word_size);
-  DEBUG_ONLY(Atomic::inc(&(g_internal_statistics.num_deallocs)));
-}
-
-// Adds a chunk to the list of chunks in use.
-void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
-
-  assert_lock_strong(_lock);
-  assert(new_chunk != NULL, "Should not be NULL");
-  assert(new_chunk->next() == NULL, "Should not be on a list");
-
-  new_chunk->reset_empty();
-
-  // Find the correct list and and set the current
-  // chunk for that list.
-  ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
-
-  if (make_current) {
-    // If we are to make the chunk current, retire the old current chunk and replace
-    // it with the new chunk.
-    retire_current_chunk();
-    set_current_chunk(new_chunk);
-  }
-
-  // Add the new chunk at the head of its respective chunk list.
-  new_chunk->set_next(_chunk_list);
-  _chunk_list = new_chunk;
-
-  // Adjust counters.
-  account_for_new_chunk(new_chunk);
-
-  assert(new_chunk->is_empty(), "Not ready for reuse");
-  Log(gc, metaspace, freelist) log;
-  if (log.is_trace()) {
-    log.trace("SpaceManager::added chunk: ");
-    ResourceMark rm;
-    LogStream ls(log.trace());
-    new_chunk->print_on(&ls);
-    chunk_manager()->locked_print_free_chunks(&ls);
-  }
-}
-
-void SpaceManager::retire_current_chunk() {
-  if (current_chunk() != NULL) {
-    size_t remaining_words = current_chunk()->free_word_size();
-    if (remaining_words >= SmallBlocks::small_block_min_size()) {
-      MetaWord* ptr = current_chunk()->allocate(remaining_words);
-      deallocate(ptr, remaining_words);
-      account_for_allocation(remaining_words);
-    }
-  }
-}
-
-Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
-  // Get a chunk from the chunk freelist
-  Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
-
-  if (next == NULL) {
-    next = vs_list()->get_new_chunk(chunk_word_size,
-                                    medium_chunk_bunch());
-  }
-
-  Log(gc, metaspace, alloc) log;
-  if (log.is_trace() && next != NULL &&
-      SpaceManager::is_humongous(next->word_size())) {
-    log.trace("  new humongous chunk word size " PTR_FORMAT, next->word_size());
-  }
-
-  return next;
-}
-
-MetaWord* SpaceManager::allocate(size_t word_size) {
-  MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
-  size_t raw_word_size = get_allocation_word_size(word_size);
-  BlockFreelist* fl =  block_freelists();
-  MetaWord* p = NULL;
+  // 1) Attempt to allocate from the dictionary of deallocated blocks.
 
   // Allocation from the dictionary is expensive in the sense that
   // the dictionary has to be searched for a size.  Don't allocate
   // from the dictionary until it starts to get fat.  Is this
   // a reasonable policy?  Maybe an skinny dictionary is fast enough
   // for allocations.  Do some profiling.  JJJ
-  if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
-    p = fl->get_block(raw_word_size);
+  if (_block_freelist != NULL && _block_freelist->total_size() > Settings::allocation_from_dictionary_limit()) {
+    p = _block_freelist->get_block(raw_word_size);
+
     if (p != NULL) {
-      DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
+      DEBUG_ONLY(InternalStats::inc_num_allocs_from_deallocated_blocks();)
+      log_trace(metaspace)("SpaceManager %s: .. taken from freelist.", _name);
+      // Note: space in the freeblock dictionary counts as used (see retire_current_chunk()) -
+      // that means that we must not increase the used counter again when allocating from the dictionary.
+      // Therefore we return here.
+      return p;
     }
+
   }
-  if (p == NULL) {
-    p = allocate_work(raw_word_size);
+
+  // 2) Failing that, attempt to allocate from the current chunk. If we hit commit limit, return NULL.
+  if (p == NULL && !did_hit_limit) {
+    p = current_chunk()->allocate(raw_word_size, &did_hit_limit);
+    log_trace(metaspace)("SpaceManager %s: .. taken from current chunk.", _name);
   }
 
-#ifdef ASSERT
-  EVERY_NTH(VerifyMetaspaceInterval)
-    verify_metrics_locked();
-  END_EVERY_NTH
-#endif
+  // 3) Failing that because the remaining chunk space is too small for the requested size
+  //     (and not because commit limit), attempt to enlarge the chunk in place.
+  if (p == NULL && !did_hit_limit) {
 
-  return p;
-}
+    // Since we did not hit the commit limit, the current chunk must have been too small.
+    assert(current_chunk()->free_words() < raw_word_size, "Sanity");
+
+    DEBUG_ONLY(InternalStats::inc_num_allocs_failed_chunk_too_small();)
 
-// Returns the address of spaced allocated for "word_size".
-// This methods does not know about blocks (Metablocks)
-MetaWord* SpaceManager::allocate_work(size_t word_size) {
-  assert_lock_strong(lock());
-#ifdef ASSERT
-  if (Metadebug::test_metadata_failure()) {
-    return NULL;
-  }
-#endif
-  // Is there space in the current chunk?
-  MetaWord* result = NULL;
+    // Under certain conditions we can just attempt to enlarge the chunk - fusing it with its follower
+    // chunk to produce a chunk double the size (level decreased by 1).
+    // 0) only if it is not switched off
+    // 1) obviously, this only works for non-root chunks
+    // 2) ... which are leader of their buddy pair.
+    // 3) only if the requested allocation would fit into a thus enlarged chunk
+    // 4) do not grow memory faster than what the chunk allocation strategy would allow
+    // 5) as a safety feature, only below a certain limit
+    if (Settings::enlarge_chunks_in_place() &&              // 0
+        current_chunk()->is_root_chunk() == false &&        // 1
+        current_chunk()->is_leader() &&                     // 2
+        current_chunk()->word_size() + current_chunk()->free_words() >= requested_word_size &&      // 3
+        _chunk_alloc_sequence->get_next_chunk_level(_chunks.size()) <= current_chunk()->level() &&  // 4
+        current_chunk()->word_size() <= Settings::enlarge_chunks_in_place_max_word_size())          // 5
+    {
 
-  if (current_chunk() != NULL) {
-    result = current_chunk()->allocate(word_size);
-  }
+      if (_chunk_manager->attempt_enlarge_chunk(current_chunk())) {
+
+        // Re-attempt allocation.
+        p = current_chunk()->allocate(raw_word_size, &did_hit_limit);
 
-  if (result == NULL) {
-    result = grow_and_allocate(word_size);
-  }
-
-  if (result != NULL) {
-    account_for_allocation(word_size);
+        if (p != NULL) {
+          DEBUG_ONLY(InternalStats::inc_num_chunk_enlarged();)
+          log_trace(metaspace)("SpaceManager %s: .. taken from current chunk (enlarged chunk).", _name);
+        }
+      }
+    }
   }
 
-  return result;
-}
+  // 4) Failing that, attempt to get a new chunk and allocate from that chunk. Again, we may hit a commit
+  //    limit, in which case we return NULL.
+  if (p == NULL && !did_hit_limit) {
+
+    // Since we did not hit the commit limit, the current chunk must have been too small.
+    assert(current_chunk()->free_words() < raw_word_size, "Sanity");
+
+    // Before we allocate a new chunk we need to retire the old chunk, which is too small to serve our request
+    // but may still have free committed words.
+    retire_current_chunk();
 
-void SpaceManager::verify() {
-  Metachunk* curr = chunk_list();
-  while (curr != NULL) {
-    DEBUG_ONLY(do_verify_chunk(curr);)
-    assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
-    curr = curr->next();
+    DEBUG_ONLY(InternalStats::inc_num_chunks_retired();)
+
+    // Allocate a new chunk.
+    if (allocate_new_current_chunk(raw_word_size) == false) {
+      did_hit_limit = true;
+    } else {
+      assert(current_chunk() != NULL && current_chunk()->free_words() >= raw_word_size, "Sanity");
+      p = current_chunk()->allocate(raw_word_size, &did_hit_limit);
+      log_trace(metaspace)("SpaceManager %s: .. allocated new chunk " CHKLVL_FORMAT " and taken from that.",
+                           _name, current_chunk()->level());
+    }
+
   }
-}
+
+  assert(p != NULL || (p == NULL && did_hit_limit), "Sanity");
+
+  SOMETIMES(verify_locked();)
 
-void SpaceManager::verify_chunk_size(Metachunk* chunk) {
-  assert(is_humongous(chunk->word_size()) ||
-         chunk->word_size() == medium_chunk_size() ||
-         chunk->word_size() == small_chunk_size() ||
-         chunk->word_size() == specialized_chunk_size(),
-         "Chunk size is wrong");
-  return;
+  if (p == NULL) {
+    DEBUG_ONLY(InternalStats::inc_num_allocs_failed_limit();)
+  } else {
+    DEBUG_ONLY(InternalStats::inc_num_allocs();)
+    _total_used_words_counter->increment_by(raw_word_size);
+  }
+
+  log_trace(metaspace)("SpaceManager %s: returned " PTR_FORMAT ".",
+                       _name, p2i(p));
+
+  return p;
+
 }
 
-void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
+// Prematurely returns a metaspace allocation to the _block_freelists
+// because it is not needed anymore (requires CLD lock to be active).
+void SpaceManager::deallocate_locked(MetaWord* p, size_t word_size) {
   assert_lock_strong(lock());
-  Metachunk* chunk = chunk_list();
-  while (chunk != NULL) {
-    UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type());
-    chunk_stat.add_num(1);
-    chunk_stat.add_cap(chunk->word_size());
-    chunk_stat.add_overhead(Metachunk::overhead());
-    chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
-    if (chunk != current_chunk()) {
-      chunk_stat.add_waste(chunk->free_word_size());
-    } else {
-      chunk_stat.add_free(chunk->free_word_size());
-    }
-    chunk = chunk->next();
+
+  // Allocations and deallocations are in raw_word_size
+  size_t raw_word_size = get_raw_allocation_word_size(word_size);
+
+  log_debug(metaspace)("SpaceManager %s: deallocating " PTR_FORMAT
+                       ", word size: " SIZE_FORMAT ", raw size: " SIZE_FORMAT ".",
+                       _name, p2i(p), word_size, raw_word_size);
+
+  assert(current_chunk() != NULL, "SpaceManager is empty.");
+
+  assert(is_valid_area(p, word_size),
+         "Pointer range not part of this SpaceManager and cannot be deallocated: (" PTR_FORMAT ".." PTR_FORMAT ").",
+         p2i(p), p2i(p + word_size));
+
+  // If this allocation has just been allocated from the current chunk, it may still be on the top of the
+  // current chunk. In that case, just roll back the allocation.
+  if (current_chunk()->attempt_rollback_allocation(p, raw_word_size)) {
+    log_trace(metaspace)("SpaceManager %s: ... rollback succeeded.", _name);
+    return;
   }
-  if (block_freelists() != NULL) {
-    out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
-  }
+
+  add_allocation_to_block_freelist(p, raw_word_size);
+
+  DEBUG_ONLY(verify_locked();)
+
 }
 
-void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
+// Prematurely returns a metaspace allocation to the _block_freelists because it is not
+// needed anymore.
+void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
+  MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
+  deallocate_locked(p, word_size);
+}
+
+// Update statistics. This walks all in-use chunks.
+void SpaceManager::add_to_statistics(sm_stats_t* out) const {
+
   MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
-  add_to_statistics_locked(out);
+
+  for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) {
+    in_use_chunk_stats_t& ucs = out->stats[c->level()];
+    ucs.num ++;
+    ucs.word_size += c->word_size();
+    ucs.committed_words += c->committed_words();
+    ucs.used_words += c->used_words();
+    // Note: for free and waste, we only count what's committed.
+    if (c == current_chunk()) {
+      ucs.free_words += c->free_below_committed_words();
+    } else {
+      ucs.waste_words += c->free_below_committed_words();
+    }
+  }
+
+  if (block_freelist() != NULL) {
+    out->free_blocks_num += block_freelist()->num_blocks();
+    out->free_blocks_word_size += block_freelist()->total_size();
+  }
+
+  SOMETIMES(out->verify();)
 }
 
 #ifdef ASSERT
-void SpaceManager::verify_metrics_locked() const {
+
+void SpaceManager::verify_locked() const {
+
   assert_lock_strong(lock());
 
-  SpaceManagerStatistics stat;
-  add_to_statistics_locked(&stat);
+  assert(_chunk_alloc_sequence != NULL && _chunk_manager != NULL, "Sanity");
 
-  UsedChunksStatistics chunk_stats = stat.totals();
+  _chunks.verify();
+
+}
 
-  DEBUG_ONLY(chunk_stats.check_sanity());
+void SpaceManager::verify() const {
 
-  assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
-  assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
-  assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
+  MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
+  verify_locked();
+
 }
 
-void SpaceManager::verify_metrics() const {
-  MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
-  verify_metrics_locked();
+// Returns true if the area indicated by pointer and size have actually been allocated
+// from this space manager.
+bool SpaceManager::is_valid_area(MetaWord* p, size_t word_size) const {
+  assert(p != NULL && word_size > 0, "Sanity");
+  for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) {
+    if (c->is_valid_pointer(p)) {
+      assert(c->is_valid_pointer(p + word_size - 1), "Range partly oob");
+      return true;
+    }
+  }
+  return false;
 }
+
 #endif // ASSERT
 
 
--- a/src/hotspot/share/memory/metaspace/spaceManager.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/spaceManager.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,206 +29,115 @@
 #include "memory/allocation.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspace/blockFreelist.hpp"
-#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/chunkAllocSequence.hpp"
+#include "memory/metaspace/chunkManager.hpp"
 #include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace/metaspaceStatistics.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
 
 class outputStream;
 class Mutex;
 
 namespace metaspace {
 
-//  SpaceManager - used by Metaspace to handle allocations
+struct sm_stats_t;
+
+// The SpaceManager:
+// - keeps a list of chunks-in-use by the class loader, as well as a current chunk used
+//   to allocate from
+// - keeps a dictionary of free MetaBlocks. Those can be remnants of a retired chunk or
+//   allocations which were not needed anymore for some reason (e.g. releasing half-allocated
+//   structures when class loading fails)
+
 class SpaceManager : public CHeapObj<mtClass> {
-  friend class ::ClassLoaderMetaspace;
-  friend class Metadebug;
 
- private:
-
-  // protects allocations
+  // Lock handed down from the associated ClassLoaderData.
+  //  Protects allocations from this space.
   Mutex* const _lock;
 
-  // Type of metadata allocated.
-  const Metaspace::MetadataType   _mdtype;
+  // The chunk manager to allocate chunks from.
+  ChunkManager* const _chunk_manager;
 
-  // Type of metaspace
-  const Metaspace::MetaspaceType  _space_type;
+  // The chunk allocation strategy to use.
+  const ChunkAllocSequence* const _chunk_alloc_sequence;
 
   // List of chunks in use by this SpaceManager.  Allocations
-  // are done from the current chunk.  The list is used for deallocating
+  // are done from the current chunk. The list is used for deallocating
   // chunks when the SpaceManager is freed.
-  Metachunk* _chunk_list;
-  Metachunk* _current_chunk;
+  MetachunkList _chunks;
 
-  enum {
-
-    // Maximum number of small chunks to allocate to a SpaceManager
-    small_chunk_limit = 4,
+  Metachunk* current_chunk()              { return _chunks.first(); }
+  const Metachunk* current_chunk() const  { return _chunks.first(); }
 
-    // Maximum number of specialize chunks to allocate for anonymous and delegating
-    // metadata space to a SpaceManager
-    anon_and_delegating_metadata_specialize_chunk_limit = 4,
-
-    allocation_from_dictionary_limit = 4 * K
-
-  };
+  // Prematurely released metablocks.
+  BlockFreelist* _block_freelist;
 
-  // Some running counters, but lets keep their number small to not add to much to
-  // the per-classloader footprint.
-  // Note: capacity = used + free + waste + overhead. We do not keep running counters for
-  // free and waste. Their sum can be deduced from the three other values.
-  size_t _overhead_words;
-  size_t _capacity_words;
-  size_t _used_words;
-  uintx _num_chunks_by_type[NumberOfInUseLists];
+  // Points to outside size counter which we are to increase/decrease when we allocate memory
+  // on behalf of a user or when we are destroyed.
+  SizeAtomicCounter* const _total_used_words_counter;
 
-  // Free lists of blocks are per SpaceManager since they
-  // are assumed to be in chunks in use by the SpaceManager
-  // and all chunks in use by a SpaceManager are freed when
-  // the class loader using the SpaceManager is collected.
-  BlockFreelist* _block_freelists;
+  const char* const _name;
 
- private:
-  // Accessors
-  Metachunk* chunk_list() const { return _chunk_list; }
-
-  BlockFreelist* block_freelists() const { return _block_freelists; }
+  Mutex* lock() const                           { return _lock; }
+  ChunkManager* chunk_manager() const           { return _chunk_manager; }
+  const ChunkAllocSequence* chunk_alloc_sequence() const    { return _chunk_alloc_sequence; }
 
-  Metaspace::MetadataType mdtype() { return _mdtype; }
-
-  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
-  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
+  BlockFreelist* block_freelist() const         { return _block_freelist; }
+  void create_block_freelist();
+  void add_allocation_to_block_freelist(MetaWord* p, size_t word_size);
 
-  Metachunk* current_chunk() const { return _current_chunk; }
-  void set_current_chunk(Metachunk* v) {
-    _current_chunk = v;
-  }
-
-  Metachunk* find_current_chunk(size_t word_size);
-
-  // Add chunk to the list of chunks in use
-  void add_chunk(Metachunk* v, bool make_current);
+  // The remaining committed free space in the current chunk is chopped up and stored in the block
+  // free list for later use. As a result, the current chunk will remain current but completely
+  // used up. This is a preparation for calling allocate_new_current_chunk().
   void retire_current_chunk();
 
-  Mutex* lock() const { return _lock; }
-
-  // Adds to the given statistic object. Expects to be locked with lock().
-  void add_to_statistics_locked(SpaceManagerStatistics* out) const;
+  // Given a requested word size, will allocate a chunk large enough to at least fit that
+  // size, but may be larger according to internal heuristics.
+  //
+  // On success, it will replace the current chunk with the newly allocated one, which will
+  // become the current chunk. The old current chunk should be retired beforehand.
+  //
+  // May fail if we could not allocate a new chunk. In that case the current chunk remains
+  // unchanged and false is returned.
+  bool allocate_new_current_chunk(size_t requested_word_size);
 
-  // Verify internal counters against the current state. Expects to be locked with lock().
-  DEBUG_ONLY(void verify_metrics_locked() const;)
+  // Prematurely returns a metaspace allocation to the _block_freelists
+  // because it is not needed anymore (requires CLD lock to be active).
+  void deallocate_locked(MetaWord* p, size_t word_size);
+
+  // Returns true if the area indicated by pointer and size have actually been allocated
+  // from this space manager.
+  DEBUG_ONLY(bool is_valid_area(MetaWord* p, size_t word_size) const;)
 
- public:
-  SpaceManager(Metaspace::MetadataType mdtype,
-               Metaspace::MetaspaceType space_type,
-               Mutex* lock);
+public:
+
+  SpaceManager(ChunkManager* chunk_manager,
+               const ChunkAllocSequence* alloc_sequence,
+               Mutex* lock,
+               SizeAtomicCounter* total_used_words_counter,
+               const char* name);
+
   ~SpaceManager();
 
-  enum ChunkMultiples {
-    MediumChunkMultiple = 4
-  };
-
-  static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
-  static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
-  static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
-
-  static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
-
-  // Accessors
-  bool is_class() const { return _mdtype == Metaspace::ClassType; }
-
-  size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
-  size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
-  size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
-
-  size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
-
-  size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
-
-  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
-
-  size_t capacity_words() const     { return _capacity_words; }
-  size_t used_words() const         { return _used_words; }
-  size_t overhead_words() const     { return _overhead_words; }
-
-  // Adjust local, global counters after a new chunk has been added.
-  void account_for_new_chunk(const Metachunk* new_chunk);
-
-  // Adjust local, global counters after space has been allocated from the current chunk.
-  void account_for_allocation(size_t words);
-
-  // Adjust global counters just before the SpaceManager dies, after all its chunks
-  // have been returned to the freelist.
-  void account_for_spacemanager_death();
-
-  // Adjust the initial chunk size to match one of the fixed chunk list sizes,
-  // or return the unadjusted size if the requested size is humongous.
-  static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
-  size_t adjust_initial_chunk_size(size_t requested) const;
-
-  // Get the initial chunks size for this metaspace type.
-  size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
-
-  // Todo: remove this once we have counters by chunk type.
-  uintx num_chunks_by_type(ChunkIndex chunk_type) const       { return _num_chunks_by_type[chunk_type]; }
-
-  Metachunk* get_new_chunk(size_t chunk_word_size);
-
-  // Block allocation and deallocation.
-  // Allocates a block from the current chunk
+  // Allocate memory from Metaspace.
+  // 1) Attempt to allocate from the dictionary of deallocated blocks.
+  // 2) Attempt to allocate from the current chunk.
+  // 3) Attempt to enlarge the current chunk in place if it is too small.
+  // 4) Attempt to get a new chunk and allocate from that chunk.
+  // At any point, if we hit a commit limit, we return NULL.
   MetaWord* allocate(size_t word_size);
 
-  // Helper for allocations
-  MetaWord* allocate_work(size_t word_size);
-
-  // Returns a block to the per manager freelist
+  // Prematurely returns a metaspace allocation to the _block_freelists because it is not
+  // needed anymore.
   void deallocate(MetaWord* p, size_t word_size);
 
-  // Based on the allocation size and a minimum chunk size,
-  // returned chunk size (for expanding space for chunk allocation).
-  size_t calc_chunk_size(size_t allocation_word_size);
-
-  // Called when an allocation from the current chunk fails.
-  // Gets a new chunk (may require getting a new virtual space),
-  // and allocates from that chunk.
-  MetaWord* grow_and_allocate(size_t word_size);
-
-  // Notify memory usage to MemoryService.
-  void track_metaspace_memory_usage();
-
-  // debugging support.
-
-  void print_on(outputStream* st) const;
-  void locked_print_chunks_in_use_on(outputStream* st) const;
-
-  void verify();
-  void verify_chunk_size(Metachunk* chunk);
+  // Update statistics. This walks all in-use chunks.
+  void add_to_statistics(sm_stats_t* out) const;
 
-  // This adjusts the size given to be greater than the minimum allocation size in
-  // words for data in metaspace.  Esentially the minimum size is currently 3 words.
-  size_t get_allocation_word_size(size_t word_size) {
-    size_t byte_size = word_size * BytesPerWord;
-
-    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
-    raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
-
-    size_t raw_word_size = raw_bytes_size / BytesPerWord;
-    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
-
-    return raw_word_size;
-  }
-
-  // Adds to the given statistic object.
-  void add_to_statistics(SpaceManagerStatistics* out) const;
-
-  // Verify internal counters against the current state.
-  DEBUG_ONLY(void verify_metrics() const;)
+  DEBUG_ONLY(void verify() const;)
+  DEBUG_ONLY(void verify_locked() const;)
 
 };
 
-
 } // namespace metaspace
 
 #endif // SHARE_MEMORY_METASPACE_SPACEMANAGER_HPP
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,423 +26,248 @@
 
 #include "precompiled.hpp"
 #include "logging/log.hpp"
-#include "logging/logStream.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspace/chunkManager.hpp"
-#include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/counter.hpp"
+#include "memory/metaspace/commitLimiter.hpp"
+#include "memory/metaspace/counter.hpp"
 #include "memory/metaspace/virtualSpaceList.hpp"
 #include "memory/metaspace/virtualSpaceNode.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/safepoint.hpp"
+
 
 namespace metaspace {
 
-
-VirtualSpaceList::~VirtualSpaceList() {
-  VirtualSpaceListIterator iter(virtual_space_list());
-  while (iter.repeat()) {
-    VirtualSpaceNode* vsl = iter.get_next();
-    delete vsl;
-  }
-}
-
-void VirtualSpaceList::inc_reserved_words(size_t v) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _reserved_words = _reserved_words + v;
-}
-void VirtualSpaceList::dec_reserved_words(size_t v) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _reserved_words = _reserved_words - v;
-}
+static int next_node_id = 0;
 
-#define assert_committed_below_limit()                        \
-  assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
-         "Too much committed memory. Committed: " SIZE_FORMAT \
-         " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
-          MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
-
-void VirtualSpaceList::inc_committed_words(size_t v) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _committed_words = _committed_words + v;
-
-  assert_committed_below_limit();
-}
-void VirtualSpaceList::dec_committed_words(size_t v) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _committed_words = _committed_words - v;
-
-  assert_committed_below_limit();
-}
-
-void VirtualSpaceList::inc_virtual_space_count() {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _virtual_space_count++;
-}
-
-void VirtualSpaceList::dec_virtual_space_count() {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _virtual_space_count--;
+// Create a new, empty, expandable list.
+VirtualSpaceList::VirtualSpaceList(const char* name, CommitLimiter* commit_limiter)
+  : _name(name),
+    _first_node(NULL),
+    _can_expand(true),
+    _can_purge(true),
+    _commit_limiter(commit_limiter),
+    _reserved_words_counter(),
+    _committed_words_counter()
+{
 }
 
-// Walk the list of VirtualSpaceNodes and delete
-// nodes with a 0 container_count.  Remove Metachunks in
-// the node from their respective freelists.
-void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  // Don't use a VirtualSpaceListIterator because this
-  // list is being changed and a straightforward use of an iterator is not safe.
-  VirtualSpaceNode* prev_vsl = virtual_space_list();
-  VirtualSpaceNode* next_vsl = prev_vsl;
-  int num_purged_nodes = 0;
-  while (next_vsl != NULL) {
-    VirtualSpaceNode* vsl = next_vsl;
-    DEBUG_ONLY(vsl->verify(false);)
-    next_vsl = vsl->next();
-    // Don't free the current virtual space since it will likely
-    // be needed soon.
-    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
-      log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
-                                         ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
-      DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
-      // Unlink it from the list
-      if (prev_vsl == vsl) {
-        // This is the case of the current node being the first node.
-        assert(vsl == virtual_space_list(), "Expected to be the first node");
-        set_virtual_space_list(vsl->next());
-      } else {
-        prev_vsl->set_next(vsl->next());
-      }
-
-      vsl->purge(chunk_manager);
-      dec_reserved_words(vsl->reserved_words());
-      dec_committed_words(vsl->committed_words());
-      dec_virtual_space_count();
-      delete vsl;
-      num_purged_nodes ++;
-    } else {
-      prev_vsl = vsl;
-    }
-  }
-
-  // Verify list
-#ifdef ASSERT
-  if (num_purged_nodes > 0) {
-    verify(false);
-  }
-#endif
+// Create a new list. The list will contain one node only, which uses the given ReservedSpace.
+// It will be not expandable beyond that first node.
+VirtualSpaceList::VirtualSpaceList(const char* name, ReservedSpace rs, CommitLimiter* commit_limiter)
+: _name(name),
+  _first_node(NULL),
+  _can_expand(false),
+  _can_purge(false),
+  _commit_limiter(commit_limiter),
+  _reserved_words_counter(),
+  _committed_words_counter()
+{
+  // Create the first node spanning the existing ReservedSpace. This will be the only node created
+  // for this list since we cannot expand.
+  VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(next_node_id++,
+                                                        rs, _commit_limiter,
+                                                        &_reserved_words_counter, &_committed_words_counter);
+  assert(vsn != NULL, "node creation failed");
+  _first_node = vsn;
+  _first_node->set_next(NULL);
+  _nodes_counter.increment();
 }
 
-
-// This function looks at the mmap regions in the metaspace without locking.
-// The chunks are added with store ordering and not deleted except for at
-// unloading time during a safepoint.
-VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) {
-  // List should be stable enough to use an iterator here because removing virtual
-  // space nodes is only allowed at a safepoint.
-  if (is_within_envelope((address)ptr)) {
-    VirtualSpaceListIterator iter(virtual_space_list());
-    while (iter.repeat()) {
-      VirtualSpaceNode* vsn = iter.get_next();
-      if (vsn->contains(ptr)) {
-        return vsn;
-      }
-    }
-  }
-  return NULL;
-}
-
-void VirtualSpaceList::retire_current_virtual_space() {
-  assert_lock_strong(MetaspaceExpand_lock);
-
-  VirtualSpaceNode* vsn = current_virtual_space();
-
-  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
-                                  Metaspace::chunk_manager_metadata();
-
-  vsn->retire(cm);
-}
-
-VirtualSpaceList::VirtualSpaceList(size_t word_size) :
-                                   _virtual_space_list(NULL),
-                                   _current_virtual_space(NULL),
-                                   _is_class(false),
-                                   _reserved_words(0),
-                                   _committed_words(0),
-                                   _virtual_space_count(0),
-                                   _envelope_lo((address)max_uintx),
-                                   _envelope_hi(NULL) {
-  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
-  create_new_virtual_space(word_size);
-}
-
-VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
-                                   _virtual_space_list(NULL),
-                                   _current_virtual_space(NULL),
-                                   _is_class(true),
-                                   _reserved_words(0),
-                                   _committed_words(0),
-                                   _virtual_space_count(0),
-                                   _envelope_lo((address)max_uintx),
-                                   _envelope_hi(NULL) {
-  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
-  VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
-  bool succeeded = class_entry->initialize();
-  if (succeeded) {
-    expand_envelope_to_include_node(class_entry);
-    // ensure lock-free iteration sees fully initialized node
-    OrderAccess::storestore();
-    link_vs(class_entry);
+VirtualSpaceList::~VirtualSpaceList() {
+  // Note: normally, there is no reason ever to delete a vslist since they are
+  // global objects, but for gtests it makes sense to allow this.
+  VirtualSpaceNode* vsn = _first_node;
+  VirtualSpaceNode* vsn2 = vsn;
+  while (vsn != NULL) {
+    vsn2 = vsn->next();
+    delete vsn;
+    vsn = vsn2;
   }
 }
 
-size_t VirtualSpaceList::free_bytes() {
-  return current_virtual_space()->free_words_in_vs() * BytesPerWord;
-}
-
-// Allocate another meta virtual space and add it to the list.
-bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
+// Create a new node and append it to the list. After
+// this function, _current_node shall point to a new empty node.
+// List must be expandable for this to work.
+void VirtualSpaceList::create_new_node() {
+  assert(_can_expand, "List is not expandable");
   assert_lock_strong(MetaspaceExpand_lock);
 
-  if (is_class()) {
-    assert(false, "We currently don't support more than one VirtualSpace for"
-                  " the compressed class space. The initialization of the"
-                  " CCS uses another code path and should not hit this path.");
-    return false;
-  }
+  VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(next_node_id ++,
+                                                        Settings::virtual_space_node_default_word_size(),
+                                                        _commit_limiter,
+                                                        &_reserved_words_counter, &_committed_words_counter);
+  assert(vsn != NULL, "node creation failed");
+  vsn->set_next(_first_node);
+  _first_node = vsn;
+  _nodes_counter.increment();
+}
+
+// Allocate a root chunk from this list.
+// Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
+// Hence, before using this chunk, it must be committed.
+// Also, no limits are checked, since no committing takes place.
+Metachunk*  VirtualSpaceList::allocate_root_chunk() {
+  assert_lock_strong(MetaspaceExpand_lock);
 
-  if (vs_word_size == 0) {
-    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
-    return false;
+  log_debug(metaspace)("VirtualSpaceList %s: allocate root chunk.", _name);
+
+  if (_first_node == NULL ||
+      _first_node->free_words() == 0) {
+
+    // The current node is fully used up.
+    log_debug(metaspace)("VirtualSpaceList %s: need new node.", _name);
+
+    // Since all allocations from a VirtualSpaceNode happen in
+    // root-chunk-size units, and the node size must be root-chunk-size aligned,
+    // we should never have left-over space.
+    assert(_first_node == NULL ||
+           _first_node->free_words() == 0, "Sanity");
+
+    if (_can_expand) {
+      create_new_node();
+    } else {
+      return NULL; // We cannot expand this list.
+    }
   }
 
-  // Reserve the space
-  size_t vs_byte_size = vs_word_size * BytesPerWord;
-  assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
+  Metachunk* c = _first_node->allocate_root_chunk();
 
-  // Allocate the meta virtual space and initialize it.
-  VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
-  if (!new_entry->initialize()) {
-    delete new_entry;
-    return false;
-  } else {
-    assert(new_entry->reserved_words() == vs_word_size,
-        "Reserved memory size differs from requested memory size");
-    expand_envelope_to_include_node(new_entry);
-    // ensure lock-free iteration sees fully initialized node
-    OrderAccess::storestore();
-    link_vs(new_entry);
-    DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
-    return true;
-  }
+  assert(c != NULL, "This should have worked");
 
-  DEBUG_ONLY(verify(false);)
+  return c;
 
 }
 
-void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
-  if (virtual_space_list() == NULL) {
-      set_virtual_space_list(new_entry);
-  } else {
-    current_virtual_space()->set_next(new_entry);
+// Attempts to purge nodes. This will remove and delete nodes which only contain free chunks.
+// The free chunks are removed from the freelists before the nodes are deleted.
+// Return number of purged nodes.
+int VirtualSpaceList::purge(MetachunkListCluster* freelists) {
+
+  // Note: I am not sure all that purging business is even necessary anymore
+  // since we have a good reclaim mechanism in place. Need to measure.
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  if (_can_purge == false) {
+    log_debug(metaspace)("VirtualSpaceList %s: cannot purge this list.", _name);
+    return 0;
+  }
+
+  log_debug(metaspace)("VirtualSpaceList %s: purging...", _name);
+
+  VirtualSpaceNode* vsn = _first_node;
+  VirtualSpaceNode* prev_vsn = NULL;
+  int num = 0, num_purged = 0;
+  while (vsn != NULL) {
+    VirtualSpaceNode* next_vsn = vsn->next();
+    bool purged = vsn->attempt_purge(freelists);
+    if (purged) {
+      // Note: from now on do not dereference vsn!
+      log_debug(metaspace)("VirtualSpaceList %s: purged node @" PTR_FORMAT, _name, p2i(vsn));
+      if (_first_node == vsn) {
+        _first_node = next_vsn;
+      }
+      DEBUG_ONLY(vsn = (VirtualSpaceNode*)((uintptr_t)(0xdeadbeef));)
+      if (prev_vsn != NULL) {
+        prev_vsn->set_next(next_vsn);
+      }
+      num_purged ++;
+      _nodes_counter.decrement();
+    } else {
+      prev_vsn = vsn;
+    }
+    vsn = next_vsn;
+    num ++;
   }
-  set_current_virtual_space(new_entry);
-  inc_reserved_words(new_entry->reserved_words());
-  inc_committed_words(new_entry->committed_words());
-  inc_virtual_space_count();
+
+  log_debug(metaspace)("VirtualSpaceList %s: purged %d/%d nodes.", _name, num_purged, num);
+
+  return num_purged;
+
+}
+
+// Print all nodes in this space list.
+void VirtualSpaceList::print_on(outputStream* st) const {
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+
+  st->print_cr("vsl %s:", _name);
+  const VirtualSpaceNode* vsn = _first_node;
+  int n = 0;
+  while (vsn != NULL) {
+    st->print("- node #%d: ", n);
+    vsn->print_on(st);
+    vsn = vsn->next();
+    n ++;
+  }
+  st->print_cr("- total %d nodes, " SIZE_FORMAT " reserved words, " SIZE_FORMAT " committed words.",
+               n, reserved_words(), committed_words());
+}
+
 #ifdef ASSERT
-  new_entry->mangle();
-#endif
-  LogTarget(Trace, gc, metaspace) lt;
-  if (lt.is_enabled()) {
-    LogStream ls(lt);
-    VirtualSpaceNode* vsl = current_virtual_space();
-    ResourceMark rm;
-    vsl->print_on(&ls);
+void VirtualSpaceList::verify_locked(bool slow) const {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  assert(_name != NULL, "Sanity");
+
+  int n = 0;
+
+  if (_first_node != NULL) {
+
+    size_t total_reserved_words = 0;
+    size_t total_committed_words = 0;
+    const VirtualSpaceNode* vsn = _first_node;
+    while (vsn != NULL) {
+      n ++;
+      vsn->verify(slow);
+      total_reserved_words += vsn->word_size();
+      total_committed_words += vsn->committed_words();
+      vsn = vsn->next();
+    }
+
+    _nodes_counter.check(n);
+    _reserved_words_counter.check(total_reserved_words);
+    _committed_words_counter.check(total_committed_words);
+
+  } else {
+
+    _reserved_words_counter.check(0);
+    _committed_words_counter.check(0);
+
   }
 }
 
-bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
-                                      size_t min_words,
-                                      size_t preferred_words) {
-  size_t before = node->committed_words();
-
-  bool result = node->expand_by(min_words, preferred_words);
-
-  size_t after = node->committed_words();
-
-  // after and before can be the same if the memory was pre-committed.
-  assert(after >= before, "Inconsistency");
-  inc_committed_words(after - before);
-
-  return result;
+void VirtualSpaceList::verify(bool slow) const {
+  MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+  verify_locked(slow);
 }
-
-bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
-  assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
-  assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
-  assert(min_words <= preferred_words, "Invalid arguments");
-
-  const char* const class_or_not = (is_class() ? "class" : "non-class");
-
-  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
-    log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
-              class_or_not);
-    return  false;
-  }
+#endif
 
-  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
-  if (allowed_expansion_words < min_words) {
-    log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
-              class_or_not);
-    return false;
-  }
-
-  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
-
-  // Commit more memory from the the current virtual space.
-  bool vs_expanded = expand_node_by(current_virtual_space(),
-                                    min_words,
-                                    max_expansion_words);
-  if (vs_expanded) {
-     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
-               class_or_not);
-     return true;
-  }
-  log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
-            class_or_not);
-  retire_current_virtual_space();
-
-  // Get another virtual space.
-  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
-  grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
-
-  if (create_new_virtual_space(grow_vs_words)) {
-    if (current_virtual_space()->is_pre_committed()) {
-      // The memory was pre-committed, so we are done here.
-      assert(min_words <= current_virtual_space()->committed_words(),
-          "The new VirtualSpace was pre-committed, so it"
-          "should be large enough to fit the alloc request.");
+// Returns true if this pointer is contained in one of our nodes.
+bool VirtualSpaceList::contains(const MetaWord* p) const {
+  const VirtualSpaceNode* vsn = _first_node;
+  while (vsn != NULL) {
+    if (vsn->contains(p)) {
       return true;
     }
-
-    return expand_node_by(current_virtual_space(),
-                          min_words,
-                          max_expansion_words);
+    vsn = vsn->next();
   }
-
   return false;
 }
 
-// Given a chunk, calculate the largest possible padding space which
-// could be required when allocating it.
-static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
-  const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
-  if (chunk_type != HumongousIndex) {
-    // Normal, non-humongous chunks are allocated at chunk size
-    // boundaries, so the largest padding space required would be that
-    // minus the smallest chunk size.
-    const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
-    return chunk_word_size - smallest_chunk_size;
-  } else {
-    // Humongous chunks are allocated at smallest-chunksize
-    // boundaries, so there is no padding required.
-    return 0;
-  }
-}
-
-
-Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
-
-  // Allocate a chunk out of the current virtual space.
-  Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
+VirtualSpaceList* VirtualSpaceList::_vslist_class = NULL;
+VirtualSpaceList* VirtualSpaceList::_vslist_nonclass = NULL;
 
-  if (next != NULL) {
-    return next;
-  }
-
-  // The expand amount is currently only determined by the requested sizes
-  // and not how much committed memory is left in the current virtual space.
-
-  // We must have enough space for the requested size and any
-  // additional reqired padding chunks.
-  const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
-
-  size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
-  size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
-  if (min_word_size >= preferred_word_size) {
-    // Can happen when humongous chunks are allocated.
-    preferred_word_size = min_word_size;
-  }
-
-  bool expanded = expand_by(min_word_size, preferred_word_size);
-  if (expanded) {
-    next = current_virtual_space()->get_chunk_vs(chunk_word_size);
-    assert(next != NULL, "The allocation was expected to succeed after the expansion");
-  }
-
-   return next;
+void VirtualSpaceList::set_vslist_class(VirtualSpaceList* vsl) {
+  assert(_vslist_class == NULL, "Sanity");
+  _vslist_class = vsl;
 }
 
-void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
-  st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
-      _virtual_space_count, p2i(_current_virtual_space));
-  VirtualSpaceListIterator iter(virtual_space_list());
-  while (iter.repeat()) {
-    st->cr();
-    VirtualSpaceNode* node = iter.get_next();
-    node->print_on(st, scale);
-  }
-}
-
-void VirtualSpaceList::print_map(outputStream* st) const {
-  VirtualSpaceNode* list = virtual_space_list();
-  VirtualSpaceListIterator iter(list);
-  unsigned i = 0;
-  while (iter.repeat()) {
-    st->print_cr("Node %u:", i);
-    VirtualSpaceNode* node = iter.get_next();
-    node->print_map(st, this->is_class());
-    i ++;
-  }
-}
-
-// Given a node, expand range such that it includes the node.
-void VirtualSpaceList::expand_envelope_to_include_node(const VirtualSpaceNode* node) {
-  _envelope_lo = MIN2(_envelope_lo, (address)node->low_boundary());
-  _envelope_hi = MAX2(_envelope_hi, (address)node->high_boundary());
+void VirtualSpaceList::set_vslist_nonclass(VirtualSpaceList* vsl) {
+  assert(_vslist_nonclass == NULL, "Sanity");
+  _vslist_nonclass = vsl;
 }
 
-
-#ifdef ASSERT
-void VirtualSpaceList::verify(bool slow) {
-  VirtualSpaceNode* list = virtual_space_list();
-  VirtualSpaceListIterator iter(list);
-  size_t reserved = 0;
-  size_t committed = 0;
-  size_t node_count = 0;
-  while (iter.repeat()) {
-    VirtualSpaceNode* node = iter.get_next();
-    if (slow) {
-      node->verify(true);
-    }
-    // Check that the node resides fully within our envelope.
-    assert((address)node->low_boundary() >= _envelope_lo && (address)node->high_boundary() <= _envelope_hi,
-           "Node " SIZE_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT ") outside envelope [" PTR_FORMAT ", " PTR_FORMAT ").",
-           node_count, p2i(node->low_boundary()), p2i(node->high_boundary()), p2i(_envelope_lo), p2i(_envelope_hi));
-    reserved += node->reserved_words();
-    committed += node->committed_words();
-    node_count ++;
-  }
-  assert(reserved == reserved_words() && committed == committed_words() && node_count == _virtual_space_count,
-      "Mismatch: reserved real: " SIZE_FORMAT " expected: " SIZE_FORMAT
-      ", committed real: " SIZE_FORMAT " expected: " SIZE_FORMAT
-      ", node count real: " SIZE_FORMAT " expected: " SIZE_FORMAT ".",
-      reserved, reserved_words(), committed, committed_words(),
-      node_count, _virtual_space_count);
-}
-#endif // ASSERT
-
 } // namespace metaspace
 
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,143 +27,113 @@
 #define SHARE_MEMORY_METASPACE_VIRTUALSPACELIST_HPP
 
 #include "memory/allocation.hpp"
+#include "memory/metaspace/counter.hpp"
+#include "memory/metaspace/commitLimiter.hpp"
 #include "memory/metaspace/virtualSpaceNode.hpp"
+#include "memory/virtualspace.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+class outputStream;
 
 namespace metaspace {
 
 class Metachunk;
-class ChunkManager;
+class MetachunkListCluster;
 
-// List of VirtualSpaces for metadata allocation.
 class VirtualSpaceList : public CHeapObj<mtClass> {
-  friend class VirtualSpaceNode;
 
-  enum VirtualSpaceSizes {
-    VirtualSpaceSize = 256 * K
-  };
+  // Name
+  const char* const _name;
 
-  // Head of the list
-  VirtualSpaceNode* _virtual_space_list;
-  // virtual space currently being used for allocations
-  VirtualSpaceNode* _current_virtual_space;
+  // Head of the list.
+  VirtualSpaceNode* _first_node;
 
-  // Is this VirtualSpaceList used for the compressed class space
-  bool _is_class;
+  // Number of nodes (kept for statistics only).
+  IntCounter _nodes_counter;
 
-  // Sum of reserved and committed memory in the virtual spaces
-  size_t _reserved_words;
-  size_t _committed_words;
+  // Whether this list can expand by allocating new nodes.
+  const bool _can_expand;
 
-  // Number of virtual spaces
-  size_t _virtual_space_count;
+  // Whether this list can be purged.
+  const bool _can_purge;
 
-  // Optimization: we keep an address range to quickly exclude pointers
-  // which are clearly not pointing into metaspace. This is an optimization for
-  // VirtualSpaceList::contains().
-  address _envelope_lo;
-  address _envelope_hi;
+  // Used to check limits before committing memory.
+  CommitLimiter* const _commit_limiter;
 
-  bool is_within_envelope(address p) const {
-    return p >= _envelope_lo && p < _envelope_hi;
-  }
+  // Statistics
 
-  // Given a node, expand range such that it includes the node.
-  void expand_envelope_to_include_node(const VirtualSpaceNode* node);
+  // Holds sum of reserved space, in words, over all list nodes.
+  SizeCounter _reserved_words_counter;
 
-  ~VirtualSpaceList();
-
-  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
+  // Holds sum of committed space, in words, over all list nodes.
+  SizeCounter _committed_words_counter;
 
-  void set_virtual_space_list(VirtualSpaceNode* v) {
-    _virtual_space_list = v;
-  }
-  void set_current_virtual_space(VirtualSpaceNode* v) {
-    _current_virtual_space = v;
-  }
+  // Create a new node and append it to the list. After
+  // this function, _current_node shall point to a new empty node.
+  // List must be expandable for this to work.
+  void create_new_node();
 
-  void link_vs(VirtualSpaceNode* new_entry);
+public:
 
-  // Get another virtual space and add it to the list.  This
-  // is typically prompted by a failed attempt to allocate a chunk
-  // and is typically followed by the allocation of a chunk.
-  bool create_new_virtual_space(size_t vs_word_size);
+  // Create a new, empty, expandable list.
+  VirtualSpaceList(const char* name, CommitLimiter* commit_limiter);
 
-  // Chunk up the unused committed space in the current
-  // virtual space and add the chunks to the free list.
-  void retire_current_virtual_space();
+  // Create a new list. The list will contain one node only, which uses the given ReservedSpace.
+  // It will be not expandable beyond that first node.
+  VirtualSpaceList(const char* name, ReservedSpace rs, CommitLimiter* commit_limiter);
 
-  DEBUG_ONLY(bool contains_node(const VirtualSpaceNode* node) const;)
+  virtual ~VirtualSpaceList();
 
- public:
-  VirtualSpaceList(size_t word_size);
-  VirtualSpaceList(ReservedSpace rs);
-
-  size_t free_bytes();
-
-  Metachunk* get_new_chunk(size_t chunk_word_size,
-                           size_t suggested_commit_granularity);
-
-  bool expand_node_by(VirtualSpaceNode* node,
-                      size_t min_words,
-                      size_t preferred_words);
+  // Allocate a root chunk from this list.
+  // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
+  // Hence, before using this chunk, it must be committed.
+  // May return NULL if vslist would need to be expanded to hold the new root node but
+  // the list cannot be expanded (in practice this means we reached CompressedClassSpaceSize).
+  Metachunk* allocate_root_chunk();
 
-  bool expand_by(size_t min_words,
-                 size_t preferred_words);
+  // Attempts to purge nodes. This will remove and delete nodes which only contain free chunks.
+  // The free chunks are removed from the freelists before the nodes are deleted.
+  // Return number of purged nodes.
+  int purge(MetachunkListCluster* freelists);
 
-  VirtualSpaceNode* current_virtual_space() {
-    return _current_virtual_space;
-  }
+  //// Statistics ////
 
-  bool is_class() const { return _is_class; }
-
-  bool initialization_succeeded() { return _virtual_space_list != NULL; }
+  // Return sum of reserved words in all nodes.
+  size_t reserved_words() const     { return _reserved_words_counter.get(); }
 
-  size_t reserved_words()  { return _reserved_words; }
-  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
-  size_t committed_words() { return _committed_words; }
-  size_t committed_bytes() { return committed_words() * BytesPerWord; }
+  // Return sum of committed words in all nodes.
+  size_t committed_words() const    { return _committed_words_counter.get(); }
+
+  // Return number of nodes in this list.
+  int num_nodes() const             { return _nodes_counter.get(); }
 
-  void inc_reserved_words(size_t v);
-  void dec_reserved_words(size_t v);
-  void inc_committed_words(size_t v);
-  void dec_committed_words(size_t v);
-  void inc_virtual_space_count();
-  void dec_virtual_space_count();
+  //// Debug stuff ////
+  DEBUG_ONLY(void verify(bool slow) const;)
+  DEBUG_ONLY(void verify_locked(bool slow) const;)
 
-  VirtualSpaceNode* find_enclosing_space(const void* ptr);
-  bool contains(const void* ptr) { return find_enclosing_space(ptr) != NULL; }
-
-  // Unlink empty VirtualSpaceNodes and free it.
-  void purge(ChunkManager* chunk_manager);
+  // Print all nodes in this space list.
+  void print_on(outputStream* st) const;
 
-  void print_on(outputStream* st) const                 { print_on(st, K); }
-  void print_on(outputStream* st, size_t scale) const;
-  void print_map(outputStream* st) const;
+  // Returns true if this pointer is contained in one of our nodes.
+  bool contains(const MetaWord* p) const;
 
-  DEBUG_ONLY(void verify(bool slow);)
+private:
 
-  class VirtualSpaceListIterator : public StackObj {
-    VirtualSpaceNode* _virtual_spaces;
-   public:
-    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
-      _virtual_spaces(virtual_spaces) {}
+  static VirtualSpaceList* _vslist_class;
+  static VirtualSpaceList* _vslist_nonclass;
+
+public:
 
-    bool repeat() {
-      return _virtual_spaces != NULL;
-    }
+  static VirtualSpaceList* vslist_class()       { return _vslist_class; }
+  static VirtualSpaceList* vslist_nonclass()    { return _vslist_nonclass; }
 
-    VirtualSpaceNode* get_next() {
-      VirtualSpaceNode* result = _virtual_spaces;
-      if (_virtual_spaces != NULL) {
-        _virtual_spaces = _virtual_spaces->next();
-      }
-      return result;
-    }
-  };
+  static void set_vslist_class(VirtualSpaceList* vslist_class);
+  static void set_vslist_nonclass(VirtualSpaceList* vslist_class);
+
+
 };
 
 } // namespace metaspace
 
 #endif // SHARE_MEMORY_METASPACE_VIRTUALSPACELIST_HPP
+
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,567 +23,503 @@
  *
  */
 
+
+#include <memory/metaspace/settings.hpp>
 #include "precompiled.hpp"
 
 #include "logging/log.hpp"
-#include "logging/logStream.hpp"
+
+#include "memory/metaspace/chunkLevel.hpp"
+#include "memory/metaspace/chunkHeaderPool.hpp"
+#include "memory/metaspace/commitLimiter.hpp"
+#include "memory/metaspace/counter.hpp"
+#include "memory/metaspace/internStat.hpp"
 #include "memory/metaspace/metachunk.hpp"
-#include "memory/metaspace.hpp"
-#include "memory/metaspace/chunkManager.hpp"
-#include "memory/metaspace/metaDebug.hpp"
 #include "memory/metaspace/metaspaceCommon.hpp"
-#include "memory/metaspace/occupancyMap.hpp"
+#include "memory/metaspace/rootChunkArea.hpp"
+#include "memory/metaspace/runningCounters.hpp"
 #include "memory/metaspace/virtualSpaceNode.hpp"
-#include "memory/virtualspace.hpp"
+
+#include "runtime/mutexLocker.hpp"
 #include "runtime/os.hpp"
-#include "services/memTracker.hpp"
-#include "utilities/copy.hpp"
+
+#include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
 
 namespace metaspace {
 
-// Decide if large pages should be committed when the memory is reserved.
-static bool should_commit_large_pages_when_reserving(size_t bytes) {
-  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
-    size_t words = bytes / BytesPerWord;
-    bool is_class = false; // We never reserve large pages for the class space.
-    if (MetaspaceGC::can_expand(words, is_class) &&
-        MetaspaceGC::allowed_expansion() >= words) {
-      return true;
-    }
+#ifdef ASSERT
+void check_pointer_is_aligned_to_commit_granule(const MetaWord* p) {
+  assert(is_aligned(p, Settings::commit_granule_bytes()),
+         "Pointer not aligned to commit granule size: " PTR_FORMAT ".",
+         p2i(p));
+}
+void check_word_size_is_aligned_to_commit_granule(size_t word_size) {
+  assert(is_aligned(word_size, Settings::commit_granule_words()),
+         "Not aligned to commit granule size: " SIZE_FORMAT ".", word_size);
+}
+#endif
+
+// Given an address range, ensure it is committed.
+//
+// The range has to be aligned to granule size.
+//
+// Function will:
+// - check how many granules in that region are uncommitted; If all are committed, it
+//    returns true immediately.
+// - check if committing those uncommitted granules would bring us over the commit limit
+//    (GC threshold, MaxMetaspaceSize). If true, it returns false.
+// - commit the memory.
+// - mark the range as committed in the commit mask
+//
+// Returns true if success, false if it did hit a commit limit.
+bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
+
+  DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
+  DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  // First calculate how large the committed regions in this range are
+  const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
+  DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
+
+  // By how much words we would increase commit charge
+  //  were we to commit the given address range completely.
+  const size_t commit_increase_words = word_size - committed_words_in_range;
+
+  log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": committing range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
+                       _node_id, p2i(_base), p2i(p), p2i(p + word_size), word_size);
+
+  if (commit_increase_words == 0) {
+    log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": ... already fully committed.",
+                         _node_id, p2i(_base));
+    return true; // Already fully committed, nothing to do.
   }
 
-  return false;
-}
-
-// byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
-    _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) {
-  assert_is_aligned(bytes, Metaspace::reserve_alignment());
-  bool large_pages = should_commit_large_pages_when_reserving(bytes);
-  _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
-
-  if (_rs.is_reserved()) {
-    assert(_rs.base() != NULL, "Catch if we get a NULL address");
-    assert(_rs.size() != 0, "Catch if we get a 0 size");
-    assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
-    assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
-
-    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
+  // Before committing any more memory, check limits.
+  if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
+    return false;
   }
-}
 
-void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
-  // When a node is purged, lets give it a thorough examination.
-  DEBUG_ONLY(verify(true);)
-  Metachunk* chunk = first_chunk();
-  Metachunk* invalid_chunk = (Metachunk*) top();
-  while (chunk < invalid_chunk ) {
-    assert(chunk->is_tagged_free(), "Should be tagged free");
-    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
-    chunk_manager->remove_chunk(chunk);
-    chunk->remove_sentinel();
-    assert(chunk->next() == NULL &&
-        chunk->prev() == NULL,
-        "Was not removed from its list");
-    chunk = (Metachunk*) next;
-  }
-}
-
-void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
-
-  if (bottom() == top()) {
-    return;
+  // Commit...
+  if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
+    vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
   }
 
-  const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
-  const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
-  const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
-
-  int line_len = 100;
-  const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
-  line_len = (int)(section_len / spec_chunk_size);
-
-  static const int NUM_LINES = 4;
+  log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.",
+                           commit_increase_words * BytesPerWord);
 
-  char* lines[NUM_LINES];
-  for (int i = 0; i < NUM_LINES; i ++) {
-    lines[i] = (char*)os::malloc(line_len, mtInternal);
-  }
-  int pos = 0;
-  const MetaWord* p = bottom();
-  const Metachunk* chunk = (const Metachunk*)p;
-  const MetaWord* chunk_end = p + chunk->word_size();
-  while (p < top()) {
-    if (pos == line_len) {
-      pos = 0;
-      for (int i = 0; i < NUM_LINES; i ++) {
-        st->fill_to(22);
-        st->print_raw(lines[i], line_len);
-        st->cr();
-      }
-    }
-    if (pos == 0) {
-      st->print(PTR_FORMAT ":", p2i(p));
-    }
-    if (p == chunk_end) {
-      chunk = (Metachunk*)p;
-      chunk_end = p + chunk->word_size();
-    }
-    // line 1: chunk starting points (a dot if that area is a chunk start).
-    lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
+  // ... tell commit limiter...
+  _commit_limiter->increase_committed(commit_increase_words);
 
-    // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
-    // chunk is in use.
-    const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
-    if (chunk->word_size() == spec_chunk_size) {
-      lines[1][pos] = chunk_is_free ? 'x' : 'X';
-    } else if (chunk->word_size() == small_chunk_size) {
-      lines[1][pos] = chunk_is_free ? 's' : 'S';
-    } else if (chunk->word_size() == med_chunk_size) {
-      lines[1][pos] = chunk_is_free ? 'm' : 'M';
-    } else if (chunk->word_size() > med_chunk_size) {
-      lines[1][pos] = chunk_is_free ? 'h' : 'H';
-    } else {
-      ShouldNotReachHere();
-    }
-
-    // Line 3: chunk origin
-    const ChunkOrigin origin = chunk->get_origin();
-    lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
+  // ... update counters in containing vslist ...
+  _total_committed_words_counter->increment_by(commit_increase_words);
 
-    // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
-    //         but were never used.
-    lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
-
-    p += spec_chunk_size;
-    pos ++;
-  }
-  if (pos > 0) {
-    for (int i = 0; i < NUM_LINES; i ++) {
-      st->fill_to(22);
-      st->print_raw(lines[i], line_len);
-      st->cr();
-    }
-  }
-  for (int i = 0; i < NUM_LINES; i ++) {
-    os::free(lines[i]);
-  }
-}
-
+  // ... and update the commit mask.
+  _commit_mask.mark_range_as_committed(p, word_size);
 
 #ifdef ASSERT
+  // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
+  // in both class and non-class vslist (outside gtests).
+  if (_commit_limiter == CommitLimiter::globalLimiter()) {
+    assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
+  }
+#endif
 
-// Verify counters, all chunks in this list node and the occupancy map.
-void VirtualSpaceNode::verify(bool slow) {
-  log_trace(gc, metaspace, freelist)("verifying %s virtual space node (%s).",
-    (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick"));
-  // Fast mode: just verify chunk counters and basic geometry
-  // Slow mode: verify chunks and occupancy map
-  uintx num_in_use_chunks = 0;
-  Metachunk* chunk = first_chunk();
-  Metachunk* invalid_chunk = (Metachunk*) top();
+  DEBUG_ONLY(InternalStats::inc_num_space_committed();)
+
+  return true;
+
+}
 
-  // Iterate the chunks in this node and verify each chunk.
-  while (chunk < invalid_chunk ) {
-    if (slow) {
-      do_verify_chunk(chunk);
-    }
-    if (!chunk->is_tagged_free()) {
-      num_in_use_chunks ++;
-    }
-    const size_t s = chunk->word_size();
-    // Prevent endless loop on invalid chunk size.
-    assert(is_valid_chunksize(is_class(), s), "Invalid chunk size: " SIZE_FORMAT ".", s);
-    MetaWord* next = ((MetaWord*)chunk) + s;
-    chunk = (Metachunk*) next;
-  }
-  assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
-      ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
-  // Also verify the occupancy map.
-  if (slow) {
-    occupancy_map()->verify(bottom(), top());
-  }
+// Given an address range, ensure it is committed.
+//
+// The range does not have to be aligned to granule size. However, the function will always commit
+// whole granules.
+//
+// Function will:
+// - check how many granules in that region are uncommitted; If all are committed, it
+//    returns true immediately.
+// - check if committing those uncommitted granules would bring us over the commit limit
+//    (GC threshold, MaxMetaspaceSize). If true, it returns false.
+// - commit the memory.
+// - mark the range as committed in the commit mask
+//
+// !! Careful:
+//    calling ensure_range_is_committed on a range which contains both committed and uncommitted
+//    areas will commit the whole area, thus erase the content in the existing committed parts.
+//    Make sure you never call this on an address range containing live data. !!
+//
+// Returns true if success, false if it did hit a commit limit.
+bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+  assert(p != NULL && word_size > 0, "Sanity");
+
+  MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
+  MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
+
+  // Todo: simple for now. Make it more intelligent late
+  return commit_range(p_start, p_end - p_start);
+
 }
 
-// Verify that all free chunks in this node are ideally merged
-// (there not should be multiple small chunks where a large chunk could exist.)
-void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
-  Metachunk* chunk = first_chunk();
-  Metachunk* invalid_chunk = (Metachunk*) top();
-  // Shorthands.
-  const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
-  const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
-  int num_free_chunks_since_last_med_boundary = -1;
-  int num_free_chunks_since_last_small_boundary = -1;
-  bool error = false;
-  char err[256];
-  while (!error && chunk < invalid_chunk ) {
-    // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
-    // Reset the counter when encountering a non-free chunk.
-    if (chunk->get_chunk_type() != HumongousIndex) {
-      if (chunk->is_tagged_free()) {
-        // Count successive free, non-humongous chunks.
-        if (is_aligned(chunk, size_small)) {
-          if (num_free_chunks_since_last_small_boundary > 0) {
-            error = true;
-            jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a small chunk preceding " PTR_FORMAT ".", p2i(chunk));
-          } else {
-            num_free_chunks_since_last_small_boundary = 0;
-          }
-        } else if (num_free_chunks_since_last_small_boundary != -1) {
-          num_free_chunks_since_last_small_boundary ++;
-        }
-        if (is_aligned(chunk, size_med)) {
-          if (num_free_chunks_since_last_med_boundary > 0) {
-            error = true;
-            jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a medium chunk preceding " PTR_FORMAT ".", p2i(chunk));
-          } else {
-            num_free_chunks_since_last_med_boundary = 0;
-          }
-        } else if (num_free_chunks_since_last_med_boundary != -1) {
-          num_free_chunks_since_last_med_boundary ++;
-        }
-      } else {
-        // Encountering a non-free chunk, reset counters.
-        num_free_chunks_since_last_med_boundary = -1;
-        num_free_chunks_since_last_small_boundary = -1;
-      }
-    } else {
-      // One cannot merge areas with a humongous chunk in the middle. Reset counters.
-      num_free_chunks_since_last_med_boundary = -1;
-      num_free_chunks_since_last_small_boundary = -1;
-    }
+// Given an address range (which has to be aligned to commit granule size):
+//  - uncommit it
+//  - mark it as uncommitted in the commit mask
+void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
+
+  DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
+  DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  // First calculate how large the committed regions in this range are
+  const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
+  DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
+
+  log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": uncommitting range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
+                       _node_id, p2i(_base), p2i(p), p2i(p + word_size), word_size);
+
+  if (committed_words_in_range == 0) {
+    log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": ... already fully uncommitted.",
+                         _node_id, p2i(_base));
+    return; // Already fully uncommitted, nothing to do.
+  }
 
-    if (error) {
-      print_map(tty, is_class());
-      fatal("%s", err);
-    }
+  // Uncommit...
+  if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
+    // Note: this can actually happen, since uncommit may increase the number of mappings.
+    fatal("Failed to uncommit metaspace.");
+  }
+
+  log_debug(metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.",
+                        committed_words_in_range * BytesPerWord);
+
+  // ... tell commit limiter...
+  _commit_limiter->decrease_committed(committed_words_in_range);
 
-    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
-    chunk = (Metachunk*) next;
+  // ... and global counters...
+  _total_committed_words_counter->decrement_by(committed_words_in_range);
+
+   // ... and update the commit mask.
+  _commit_mask.mark_range_as_uncommitted(p, word_size);
+
+#ifdef ASSERT
+  // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
+  // in both class and non-class vslist (outside gtests).
+  if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
+    assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
   }
-}
-#endif // ASSERT
+#endif
 
-void VirtualSpaceNode::inc_container_count() {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _container_count++;
+  DEBUG_ONLY(InternalStats::inc_num_space_uncommitted();)
+
 }
 
-void VirtualSpaceNode::dec_container_count() {
-  assert_lock_strong(MetaspaceExpand_lock);
-  _container_count--;
+//// creation, destruction ////
+
+VirtualSpaceNode::VirtualSpaceNode(int node_id,
+                                   ReservedSpace rs,
+                                   CommitLimiter* limiter,
+                                   SizeCounter* reserve_words_counter,
+                                   SizeCounter* commit_words_counter)
+  : _next(NULL),
+    _rs(rs),
+    _base((MetaWord*)rs.base()),
+    _word_size(rs.size() / BytesPerWord),
+    _used_words(0),
+    _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord),
+    _root_chunk_area_lut((MetaWord*)rs.base(), rs.size() / BytesPerWord),
+    _commit_limiter(limiter),
+    _total_reserved_words_counter(reserve_words_counter),
+    _total_committed_words_counter(commit_words_counter),
+    _node_id(node_id)
+{
+
+  log_debug(metaspace)("Create new VirtualSpaceNode %d, base " PTR_FORMAT ", word size " SIZE_FORMAT ".",
+                       _node_id, p2i(_base), _word_size);
+
+  // Update reserved counter in vslist
+  _total_reserved_words_counter->increment_by(_word_size);
+
+  assert_is_aligned(_base, chklvl::MAX_CHUNK_BYTE_SIZE);
+  assert_is_aligned(_word_size, chklvl::MAX_CHUNK_WORD_SIZE);
+
+  // Explicitly uncommit the whole node to make it guaranteed
+  // inaccessible, for testing
+//  os::uncommit_memory((char*)_base, _word_size * BytesPerWord);
+
+}
+
+// Create a node of a given size
+VirtualSpaceNode* VirtualSpaceNode::create_node(int node_id,
+                                                size_t word_size,
+                                                CommitLimiter* limiter,
+                                                SizeCounter* reserve_words_counter,
+                                                SizeCounter* commit_words_counter)
+{
+
+  DEBUG_ONLY(assert_is_aligned(word_size, chklvl::MAX_CHUNK_WORD_SIZE);)
+
+  ReservedSpace rs(word_size * BytesPerWord,
+                   chklvl::MAX_CHUNK_BYTE_SIZE,
+                   false, // TODO deal with large pages
+                   false);
+
+  if (!rs.is_reserved()) {
+    vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
+  }
+
+  assert_is_aligned(rs.base(), chklvl::MAX_CHUNK_BYTE_SIZE);
+
+  return create_node(node_id, rs, limiter, reserve_words_counter, commit_words_counter);
+
+}
+
+// Create a node over an existing space
+VirtualSpaceNode* VirtualSpaceNode::create_node(int node_id,
+                                                ReservedSpace rs,
+                                                CommitLimiter* limiter,
+                                                SizeCounter* reserve_words_counter,
+                                                SizeCounter* commit_words_counter)
+{
+  DEBUG_ONLY(InternalStats::inc_num_vsnodes_created();)
+  return new VirtualSpaceNode(node_id, rs, limiter, reserve_words_counter, commit_words_counter);
 }
 
 VirtualSpaceNode::~VirtualSpaceNode() {
   _rs.release();
-  if (_occupancy_map != NULL) {
-    delete _occupancy_map;
-  }
-#ifdef ASSERT
-  size_t word_size = sizeof(*this) / BytesPerWord;
-  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
-#endif
-}
+
+  log_debug(metaspace)("Destroying VirtualSpaceNode %d, base " PTR_FORMAT ", word size " SIZE_FORMAT ".",
+                       _node_id, p2i(_base), _word_size);
 
-size_t VirtualSpaceNode::used_words_in_vs() const {
-  return pointer_delta(top(), bottom(), sizeof(MetaWord));
-}
+  // Update counters in vslist
+  _total_committed_words_counter->decrement_by(committed_words());
+  _total_reserved_words_counter->decrement_by(_word_size);
 
-// Space committed in the VirtualSpace
-size_t VirtualSpaceNode::capacity_words_in_vs() const {
-  return pointer_delta(end(), bottom(), sizeof(MetaWord));
-}
+  DEBUG_ONLY(InternalStats::inc_num_vsnodes_destroyed();)
 
-size_t VirtualSpaceNode::free_words_in_vs() const {
-  return pointer_delta(end(), top(), sizeof(MetaWord));
 }
 
-// Given an address larger than top(), allocate padding chunks until top is at the given address.
-void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
+
 
-  assert(target_top > top(), "Sanity");
+//// Chunk allocation, splitting, merging /////
 
-  // Padding chunks are added to the freelist.
-  ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
-
-  // shorthands
-  const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
-  const size_t small_word_size = chunk_manager->small_chunk_word_size();
-  const size_t med_word_size = chunk_manager->medium_chunk_word_size();
+// Allocate a root chunk from this node. Will fail and return NULL
+// if the node is full.
+// Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
+// Hence, before using this chunk, it must be committed.
+// Also, no limits are checked, since no committing takes place.
+Metachunk* VirtualSpaceNode::allocate_root_chunk() {
 
-  while (top() < target_top) {
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  assert_is_aligned(free_words(), chklvl::MAX_CHUNK_WORD_SIZE);
 
-    // We could make this coding more generic, but right now we only deal with two possible chunk sizes
-    // for padding chunks, so it is not worth it.
-    size_t padding_chunk_word_size = small_word_size;
-    if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
-      assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
-      padding_chunk_word_size = spec_word_size;
-    }
-    MetaWord* here = top();
-    assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
-    inc_top(padding_chunk_word_size);
+  if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) {
+
+    MetaWord* loc = _base + _used_words;
+    _used_words += chklvl::MAX_CHUNK_WORD_SIZE;
+
+    RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(loc);
 
-    // Create new padding chunk.
-    ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
-    assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
+    // Create a root chunk header and initialize it;
+    Metachunk* c = rca->alloc_root_chunk_header(this);
 
-    Metachunk* const padding_chunk =
-        ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
-    assert(padding_chunk == (Metachunk*)here, "Sanity");
-    DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
-    log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
-        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
-        (is_class() ? "class space " : "metaspace"),
-        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
+    assert(c->base() == loc && c->vsnode() == this &&
+           c->is_free(), "Sanity");
+
+    DEBUG_ONLY(c->verify(true);)
+
+    log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": newborn root chunk " METACHUNK_FORMAT ".",
+                         _node_id, p2i(_base), METACHUNK_FORMAT_ARGS(c));
 
-    // Mark chunk start in occupancy map.
-    occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
-
-    // Chunks are born as in-use (see MetaChunk ctor). So, before returning
-    // the padding chunk to its chunk manager, mark it as in use (ChunkManager
-    // will assert that).
-    do_update_in_use_info_for_chunk(padding_chunk, true);
+    if (Settings::newborn_root_chunks_are_fully_committed()) {
+      log_trace(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": committing newborn root chunk.",
+                           _node_id, p2i(_base));
+      // Note: use Metachunk::ensure_commit, do not commit directly. This makes sure the chunk knows
+      // its commit range and does not ask needlessly.
+      c->ensure_fully_committed_locked();
+    }
 
-    // Return Chunk to freelist.
-    inc_container_count();
-    chunk_manager->return_single_chunk(padding_chunk);
-    // Please note: at this point, ChunkManager::return_single_chunk()
-    // may already have merged the padding chunk with neighboring chunks, so
-    // it may have vanished at this point. Do not reference the padding
-    // chunk beyond this point.
+    return c;
+
   }
 
-  assert(top() == target_top, "Sanity");
-
-} // allocate_padding_chunks_until_top_is_at()
-
-// Allocates the chunk from the virtual space only.
-// This interface is also used internally for debugging.  Not all
-// chunks removed here are necessarily used for allocation.
-Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
-  // Non-humongous chunks are to be allocated aligned to their chunk
-  // size. So, start addresses of medium chunks are aligned to medium
-  // chunk size, those of small chunks to small chunk size and so
-  // forth. This facilitates merging of free chunks and reduces
-  // fragmentation. Chunk sizes are spec < small < medium, with each
-  // larger chunk size being a multiple of the next smaller chunk
-  // size.
-  // Because of this alignment, me may need to create a number of padding
-  // chunks. These chunks are created and added to the freelist.
-
-  // The chunk manager to which we will give our padding chunks.
-  ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
+  return NULL; // Node is full.
 
-  // shorthands
-  const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
-  const size_t small_word_size = chunk_manager->small_chunk_word_size();
-  const size_t med_word_size = chunk_manager->medium_chunk_word_size();
-
-  assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
-      chunk_word_size >= med_word_size, "Invalid chunk size requested.");
-
-  // Chunk alignment (in bytes) == chunk size unless humongous.
-  // Humongous chunks are aligned to the smallest chunk size (spec).
-  const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
-      spec_word_size : chunk_word_size) * sizeof(MetaWord);
-
-  // Do we have enough space to create the requested chunk plus
-  // any padding chunks needed?
-  MetaWord* const next_aligned =
-      static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
-  if (!is_available((next_aligned - top()) + chunk_word_size)) {
-    return NULL;
-  }
+}
 
-  // Before allocating the requested chunk, allocate padding chunks if necessary.
-  // We only need to do this for small or medium chunks: specialized chunks are the
-  // smallest size, hence always aligned. Homungous chunks are allocated unaligned
-  // (implicitly, also aligned to smallest chunk size).
-  if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
-    log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
-        (is_class() ? "class space " : "metaspace"),
-        top(), next_aligned);
-    allocate_padding_chunks_until_top_is_at(next_aligned);
-    // Now, top should be aligned correctly.
-    assert_is_aligned(top(), required_chunk_alignment);
-  }
-
-  // Now, top should be aligned correctly.
-  assert_is_aligned(top(), required_chunk_alignment);
-
-  // Bottom of the new chunk
-  MetaWord* chunk_limit = top();
-  assert(chunk_limit != NULL, "Not safe to call this method");
-
-  // The virtual spaces are always expanded by the
-  // commit granularity to enforce the following condition.
-  // Without this the is_available check will not work correctly.
-  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
-      "The committed memory doesn't match the expanded memory.");
+// Given a chunk c, split it recursively until you get a chunk of the given target_level.
+//
+// The original chunk must not be part of a freelist.
+//
+// Returns pointer to the result chunk; the splitted-off chunks are added as
+//  free chunks to the freelists.
+//
+// Returns NULL if chunk cannot be split at least once.
+Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, MetachunkListCluster* freelists) {
 
-  if (!is_available(chunk_word_size)) {
-    LogTarget(Trace, gc, metaspace, freelist) lt;
-    if (lt.is_enabled()) {
-      LogStream ls(lt);
-      ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
-      // Dump some information about the virtual space that is nearly full
-      print_on(&ls);
-    }
-    return NULL;
-  }
+  assert_lock_strong(MetaspaceExpand_lock);
 
-  // Take the space  (bump top on the current virtual space).
-  inc_top(chunk_word_size);
+  // Get the area associated with this chunk and let it handle the splitting
+  RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 
-  // Initialize the chunk
-  ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
-  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
-  assert(result == (Metachunk*)chunk_limit, "Sanity");
-  occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
-  do_update_in_use_info_for_chunk(result, true);
-
-  inc_container_count();
+  DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 
-#ifdef ASSERT
-  EVERY_NTH(VerifyMetaspaceInterval)
-    chunk_manager->locked_verify(true);
-    verify(true);
-  END_EVERY_NTH
-  do_verify_chunk(result);
-#endif
+  return rca->split(target_level, c, freelists);
 
-  result->inc_use_count();
-
-  return result;
 }
 
 
-// Expand the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
-  size_t min_bytes = min_words * BytesPerWord;
-  size_t preferred_bytes = preferred_words * BytesPerWord;
-
-  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
-
-  if (uncommitted < min_bytes) {
-    return false;
-  }
+// Given a chunk, attempt to merge it recursively with its neighboring chunks.
+//
+// If successful (merged at least once), returns address of
+// the merged chunk; NULL otherwise.
+//
+// The merged chunks are removed from the freelists.
+//
+// !!! Please note that if this method returns a non-NULL value, the
+// original chunk will be invalid and should not be accessed anymore! !!!
+Metachunk* VirtualSpaceNode::merge(Metachunk* c, MetachunkListCluster* freelists) {
 
-  size_t commit = MIN2(preferred_bytes, uncommitted);
-  bool result = virtual_space()->expand_by(commit, false);
+  assert(c != NULL && c->is_free(), "Sanity");
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  // Get the tree associated with this chunk and let it handle the merging
+  RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 
-  if (result) {
-    log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
-        (is_class() ? "class" : "non-class"), commit);
-    DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
-  } else {
-    log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
-        (is_class() ? "class" : "non-class"), commit);
-  }
+  Metachunk* c2 = rca->merge(c, freelists);
 
-  assert(result, "Failed to commit memory");
+  DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 
-  return result;
+  return c2;
+
 }
 
-Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
+// Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
+// enlarge it in place by claiming its trailing buddy.
+//
+// This will only work if c is the leader of the buddy pair and the trailing buddy is free.
+//
+// If successful, the follower chunk will be removed from the freelists, the leader chunk c will
+// double in size (level decreased by one).
+//
+// On success, true is returned, false otherwise.
+bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, MetachunkListCluster* freelists) {
+
+  assert(c != NULL && c->is_in_use() && !c->is_root_chunk(), "Sanity");
   assert_lock_strong(MetaspaceExpand_lock);
-  Metachunk* result = take_from_committed(chunk_word_size);
-  return result;
+
+  // Get the tree associated with this chunk and let it handle the merging
+  RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
+
+  bool rc = rca->attempt_enlarge_chunk(c, freelists);
+
+  DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
+
+  return rc;
+
 }
 
-bool VirtualSpaceNode::initialize() {
+// Attempts to purge the node:
+//
+// If all chunks living in this node are free, they will all be removed from their freelists
+//   and deletes the node.
+//
+// Returns true if the node has been deleted, false if not.
+// !! If this returns true, do not access the node from this point on. !!
+bool VirtualSpaceNode::attempt_purge(MetachunkListCluster* freelists) {
+
+  assert_lock_strong(MetaspaceExpand_lock);
 
-  if (!_rs.is_reserved()) {
-    return false;
+  // First find out if all areas are empty. Since empty chunks collapse to root chunk
+  // size, if all chunks in this node are free root chunks we are good to go.
+  for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea ++) {
+    const RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
+    const Metachunk* c = ra->first_chunk();
+    if (c != NULL) {
+      if (!(c->is_root_chunk() && c->is_free())) {
+        return false;
+      }
+    }
   }
 
-  // These are necessary restriction to make sure that the virtual space always
-  // grows in steps of Metaspace::commit_alignment(). If both base and size are
-  // aligned only the middle alignment of the VirtualSpace is used.
-  assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
-  assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
+  log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": purging.", _node_id, p2i(_base));
 
-  // ReservedSpaces marked as special will have the entire memory
-  // pre-committed. Setting a committed size will make sure that
-  // committed_size and actual_committed_size agrees.
-  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
-
-  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
-      Metaspace::commit_alignment());
-  if (result) {
-    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
-        "Checking that the pre-committed memory was registered by the VirtualSpace");
-
-    set_top((MetaWord*)virtual_space()->low());
+  // Okay, we can purge. Before we can do this, we need to remove all chunks from the freelist.
+  for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea ++) {
+    RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
+    Metachunk* c = ra->first_chunk();
+    if (c != NULL) {
+      log_trace(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": removing chunk " METACHUNK_FULL_FORMAT ".",
+                           _node_id, p2i(_base), METACHUNK_FULL_FORMAT_ARGS(c));
+      assert(c->is_free() && c->is_root_chunk(), "Sanity");
+      freelists->remove(c);
+    }
   }
 
-  // Initialize Occupancy Map.
-  const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
-  _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
+  // Now, delete the node, then right away return since this object is invalid.
+  delete this;
 
-  return result;
+  return true;
+
 }
 
-void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
-  size_t used_words = used_words_in_vs();
-  size_t commit_words = committed_words();
-  size_t res_words = reserved_words();
-  VirtualSpace* vs = virtual_space();
+
+void VirtualSpaceNode::print_on(outputStream* st) const {
 
-  st->print("node @" PTR_FORMAT ": ", p2i(this));
+  size_t scale = K;
+
+  st->print("id: %d, base " PTR_FORMAT ": ", _node_id, p2i(base()));
   st->print("reserved=");
-  print_scaled_words(st, res_words, scale);
+  print_scaled_words(st, word_size(), scale);
   st->print(", committed=");
-  print_scaled_words_and_percentage(st, commit_words, res_words, scale);
+  print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
   st->print(", used=");
-  print_scaled_words_and_percentage(st, used_words, res_words, scale);
+  print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
+
   st->cr();
-  st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
-      PTR_FORMAT ", " PTR_FORMAT ")",
-      p2i(bottom()), p2i(top()), p2i(end()),
-      p2i(vs->high_boundary()));
+
+  _root_chunk_area_lut.print_on(st);
+  _commit_mask.print_on(st);
+
+}
+
+// Returns size, in words, of committed space in this node alone.
+// Note: iterates over commit mask and hence may be a tad expensive on large nodes.
+size_t VirtualSpaceNode::committed_words() const {
+  return _commit_mask.get_committed_size();
 }
 
 #ifdef ASSERT
-void VirtualSpaceNode::mangle() {
-  size_t word_size = capacity_words_in_vs();
-  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
-}
-#endif // ASSERT
+// Verify counters and basic structure. Slow mode: verify all chunks in depth
+void VirtualSpaceNode::verify(bool slow) const {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  assert(base() != NULL, "Invalid base");
+  assert(base() == (MetaWord*)_rs.base() &&
+         word_size() == _rs.size() / BytesPerWord,
+         "Sanity");
+  assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE);
+  assert(used_words() <= word_size(), "Sanity");
 
-void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
-  assert(is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
-#ifdef ASSERT
-  verify(false);
-  EVERY_NTH(VerifyMetaspaceInterval)
-    verify(true);
-  END_EVERY_NTH
+  // Since we only ever hand out root chunks from a vsnode, top should always be aligned
+  // to root chunk size.
+  assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE);
+
+  _commit_mask.verify(slow);
+  assert(committed_words() <= word_size(), "Sanity");
+  assert_is_aligned(committed_words(), Settings::commit_granule_words());
+  _root_chunk_area_lut.verify(slow);
+
+}
+
 #endif
-  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
-    ChunkIndex index = (ChunkIndex)i;
-    size_t chunk_size = chunk_manager->size_by_index(index);
 
-    while (free_words_in_vs() >= chunk_size) {
-      Metachunk* chunk = get_chunk_vs(chunk_size);
-      // Chunk will be allocated aligned, so allocation may require
-      // additional padding chunks. That may cause above allocation to
-      // fail. Just ignore the failed allocation and continue with the
-      // next smaller chunk size. As the VirtualSpaceNode comitted
-      // size should be a multiple of the smallest chunk size, we
-      // should always be able to fill the VirtualSpace completely.
-      if (chunk == NULL) {
-        break;
-      }
-      chunk_manager->return_single_chunk(chunk);
-    }
-  }
-  assert(free_words_in_vs() == 0, "should be empty now");
-}
 
 } // namespace metaspace
 
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,139 +26,247 @@
 #ifndef SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
 #define SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
 
+
+#include <memory/metaspace/settings.hpp>
+#include "memory/allocation.hpp"
+#include "memory/metaspace/counter.hpp"
+#include "memory/metaspace/commitMask.hpp"
+#include "memory/metaspace/rootChunkArea.hpp"
 #include "memory/virtualspace.hpp"
 #include "memory/memRegion.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/bitMap.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+
 class outputStream;
 
 namespace metaspace {
 
-class Metachunk;
-class ChunkManager;
-class OccupancyMap;
+class CommitLimiter;
+class MetachunkListCluster;
 
-// A VirtualSpaceList node.
+// VirtualSpaceNode manages a single address range of the Metaspace.
+//
+// That address range may contain interleaved committed and uncommitted
+// regions. It keeps track of which regions have committed and offers
+// functions to commit and uncommit regions.
+//
+// It allocates and hands out memory ranges, starting at the bottom.
+//
+// Address range must be aligned to root chunk size.
+//
 class VirtualSpaceNode : public CHeapObj<mtClass> {
-  friend class VirtualSpaceList;
 
   // Link to next VirtualSpaceNode
   VirtualSpaceNode* _next;
 
-  // Whether this node is contained in class or metaspace.
-  const bool _is_class;
+  ReservedSpace _rs;
+
+  // Start pointer of the area.
+  MetaWord* const _base;
+
+  // Size, in words, of the whole node
+  const size_t _word_size;
+
+  // Size, in words, of the range of this node which has been handed out in
+  // the form of chunks.
+  size_t _used_words;
+
+  // The bitmap describing the commit state of the region:
+  // Each bit covers a region of 64K (see constants::commit_granule_size).
+  CommitMask _commit_mask;
+
+  // An array/LUT of RootChunkArea objects. Each one describes
+  // fragmentation inside a root chunk.
+  RootChunkAreaLUT _root_chunk_area_lut;
 
-  // total in the VirtualSpace
-  ReservedSpace _rs;
-  VirtualSpace _virtual_space;
-  MetaWord* _top;
-  // count of chunks contained in this VirtualSpace
-  uintx _container_count;
+  // Limiter object to ask before expanding the committed size of this node.
+  CommitLimiter* const _commit_limiter;
+
+  // Points to outside size counters which we are to increase/decrease when we commit/uncommit
+  // space from this node.
+  SizeCounter* const _total_reserved_words_counter;
+  SizeCounter* const _total_committed_words_counter;
 
-  OccupancyMap* _occupancy_map;
+  // For debug and tracing purposes
+  const int _node_id;
+
+  /// committing, uncommitting ///
 
-  // Convenience functions to access the _virtual_space
-  char* low()  const { return virtual_space()->low(); }
-  char* high() const { return virtual_space()->high(); }
-  char* low_boundary()  const { return virtual_space()->low_boundary(); }
-  char* high_boundary() const { return virtual_space()->high_boundary(); }
+  // Given a pointer into this node, calculate the start of the commit granule
+  // the pointer points into.
+  MetaWord* calc_start_of_granule(MetaWord* p) const {
+    DEBUG_ONLY(check_pointer(p));
+    return align_down(p, Settings::commit_granule_bytes());
+  }
 
-  // The first Metachunk will be allocated at the bottom of the
-  // VirtualSpace
-  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
+  // Given an address range, ensure it is committed.
+  //
+  // The range has to be aligned to granule size.
+  //
+  // Function will:
+  // - check how many granules in that region are uncommitted; If all are committed, it
+  //    returns true immediately.
+  // - check if committing those uncommitted granules would bring us over the commit limit
+  //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
+  // - commit the memory.
+  // - mark the range as committed in the commit mask
+  //
+  // Returns true if success, false if it did hit a commit limit.
+  bool commit_range(MetaWord* p, size_t word_size);
 
-  // Committed but unused space in the virtual space
-  size_t free_words_in_vs() const;
-
-  // True if this node belongs to class metaspace.
-  bool is_class() const { return _is_class; }
+  //// creation ////
 
-  // Helper function for take_from_committed: allocate padding chunks
-  // until top is at the given address.
-  void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
+  // Create a new empty node spanning the given reserved space.
+  VirtualSpaceNode(int node_id,
+                   ReservedSpace rs,
+                   CommitLimiter* limiter,
+                   SizeCounter* reserve_counter,
+                   SizeCounter* commit_counter);
+
+  MetaWord* base() const        { return _base; }
+
+public:
 
- public:
+  // Create a node of a given size
+  static VirtualSpaceNode* create_node(int node_id,
+                                       size_t word_size,
+                                       CommitLimiter* limiter,
+                                       SizeCounter* reserve_words_counter,
+                                       SizeCounter* commit_words_counter);
 
-  VirtualSpaceNode(bool is_class, size_t byte_size);
-  VirtualSpaceNode(bool is_class, ReservedSpace rs) :
-    _next(NULL), _is_class(is_class), _rs(rs), _top(NULL), _container_count(0), _occupancy_map(NULL) {}
+  // Create a node over an existing space
+  static VirtualSpaceNode* create_node(int node_id,
+                                       ReservedSpace rs,
+                                       CommitLimiter* limiter,
+                                       SizeCounter* reserve_words_counter,
+                                       SizeCounter* commit_words_counter);
+
   ~VirtualSpaceNode();
 
-  // Convenience functions for logical bottom and end
-  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
-  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 
-  const OccupancyMap* occupancy_map() const { return _occupancy_map; }
-  OccupancyMap* occupancy_map() { return _occupancy_map; }
+  // Reserved size of the whole node.
+  size_t word_size() const      { return _word_size; }
+
+  //// Chunk allocation, splitting, merging /////
 
-  bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
+  // Allocate a root chunk from this node. Will fail and return NULL
+  // if the node is full.
+  // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
+  // Hence, before using this chunk, it must be committed.
+  // Also, no limits are checked, since no committing takes place.
+  Metachunk* allocate_root_chunk();
 
-  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
-  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
-
-  bool is_pre_committed() const { return _virtual_space.special(); }
+  // Given a chunk c, split it recursively until you get a chunk of the given target_level.
+  //
+  // The original chunk must not be part of a freelist.
+  //
+  // Returns pointer to the result chunk; the splitted-off chunks are added as
+  //  free chunks to the freelists.
+  //
+  // Returns NULL if chunk cannot be split at least once.
+  Metachunk* split(chklvl_t target_level, Metachunk* c, MetachunkListCluster* freelists);
 
-  // address of next available space in _virtual_space;
-  // Accessors
-  VirtualSpaceNode* next() { return _next; }
-  void set_next(VirtualSpaceNode* v) { _next = v; }
-
-  void set_top(MetaWord* v) { _top = v; }
-
-  // Accessors
-  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
+  // Given a chunk, attempt to merge it recursively with its neighboring chunks.
+  //
+  // If successful (merged at least once), returns address of
+  // the merged chunk; NULL otherwise.
+  //
+  // The merged chunks are removed from the freelists.
+  //
+  // !!! Please note that if this method returns a non-NULL value, the
+  // original chunk will be invalid and should not be accessed anymore! !!!
+  Metachunk* merge(Metachunk* c, MetachunkListCluster* freelists);
 
-  // Returns true if "word_size" is available in the VirtualSpace
-  bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
+  // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
+  // enlarge it in place by claiming its trailing buddy.
+  //
+  // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
+  //
+  // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
+  // double in size (level decreased by one).
+  //
+  // On success, true is returned, false otherwise.
+  bool attempt_enlarge_chunk(Metachunk* c, MetachunkListCluster* freelists);
 
-  MetaWord* top() const { return _top; }
-  void inc_top(size_t word_size) { _top += word_size; }
+  // Attempts to purge the node:
+  //
+  // If all chunks living in this node are free, they will all be removed from their freelists
+  //   and deletes the node.
+  //
+  // Returns true if the node has been deleted, false if not.
+  // !! If this returns true, do not access the node from this point on. !!
+  bool attempt_purge(MetachunkListCluster* freelists);
 
-  uintx container_count() { return _container_count; }
-  void inc_container_count();
-  void dec_container_count();
+  // Attempts to uncommit free areas according to the rules set in settings.
+  // Returns number of words uncommitted.
+  size_t uncommit_free_areas();
 
-  // used and capacity in this single entry in the list
-  size_t used_words_in_vs() const;
-  size_t capacity_words_in_vs() const;
+  /// misc /////
 
-  bool initialize();
-
-  // get space from the virtual space
-  Metachunk* take_from_committed(size_t chunk_word_size);
+  // Returns size, in words, of the used space in this node alone.
+  // (Notes:
+  //  - This is the space handed out to the ChunkManager, so it is "used" from the viewpoint of this node,
+  //    but not necessarily used for Metadata.
+  //  - This may or may not be committed memory.
+  size_t used_words() const             { return _used_words; }
 
-  // Allocate a chunk from the virtual space and return it.
-  Metachunk* get_chunk_vs(size_t chunk_word_size);
+  // Returns size, in words, of how much space is left in this node alone.
+  size_t free_words() const             { return _word_size - _used_words; }
 
-  // Expands the committed space by at least min_words words.
-  bool expand_by(size_t min_words, size_t preferred_words);
+  // Returns size, in words, of committed space in this node alone.
+  // Note: iterates over commit mask and hence may be a tad expensive on large nodes.
+  size_t committed_words() const;
 
-  // In preparation for deleting this node, remove all the chunks
-  // in the node from any freelist.
-  void purge(ChunkManager* chunk_manager);
+  //// Committing/uncommitting memory /////
 
-  // If an allocation doesn't fit in the current node a new node is created.
-  // Allocate chunks out of the remaining committed space in this node
-  // to avoid wasting that memory.
-  // This always adds up because all the chunk sizes are multiples of
-  // the smallest chunk size.
-  void retire(ChunkManager* chunk_manager);
+  // Given an address range, ensure it is committed.
+  //
+  // The range does not have to be aligned to granule size. However, the function will always commit
+  // whole granules.
+  //
+  // Function will:
+  // - check how many granules in that region are uncommitted; If all are committed, it
+  //    returns true immediately.
+  // - check if committing those uncommitted granules would bring us over the commit limit
+  //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
+  // - commit the memory.
+  // - mark the range as committed in the commit mask
+  //
+  // Returns true if success, false if it did hit a commit limit.
+  bool ensure_range_is_committed(MetaWord* p, size_t word_size);
+
+  // Given an address range (which has to be aligned to commit granule size):
+  //  - uncommit it
+  //  - mark it as uncommitted in the commit mask
+  void uncommit_range(MetaWord* p, size_t word_size);
 
-  void print_on(outputStream* st) const                 { print_on(st, K); }
-  void print_on(outputStream* st, size_t scale) const;
-  void print_map(outputStream* st, bool is_class) const;
+  //// List stuff ////
+  VirtualSpaceNode* next() const        { return _next; }
+  void set_next(VirtualSpaceNode* vsn)  { _next = vsn; }
+
+
+  /// Debug stuff ////
+
+  // Print a description about this node.
+  void print_on(outputStream* st) const;
 
-  // Debug support
-  DEBUG_ONLY(void mangle();)
-  // Verify counters and basic structure. Slow mode: verify all chunks in depth and occupancy map.
-  DEBUG_ONLY(void verify(bool slow);)
-  // Verify that all free chunks in this node are ideally merged
-  // (there should not be multiple small chunks where a large chunk could exist.)
-  DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
+  // Verify counters and basic structure. Slow mode: verify all chunks in depth
+  bool contains(const MetaWord* p) const {
+    return p >= _base && p < _base + _used_words;
+  }
+
+#ifdef ASSERT
+  void check_pointer(const MetaWord* p) const {
+    assert(contains(p), "invalid pointer");
+  }
+  void verify(bool slow) const;
+#endif
 
 };
 
+
 } // namespace metaspace
 
 #endif // SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
--- a/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP
 #define SHARE_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP
 
+#include "utilities/globalDefinitions.hpp"
 
 class MetaspaceChunkFreeListSummary {
   size_t _num_specialized_chunks;
--- a/src/hotspot/share/memory/metaspaceCounters.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspaceCounters.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -98,15 +98,15 @@
 MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
 
 size_t CompressedClassSpaceCounters::used() {
-  return MetaspaceUtils::used_bytes(Metaspace::ClassType);
+  return MetaspaceUtils::used_bytes(metaspace::ClassType);
 }
 
 size_t CompressedClassSpaceCounters::capacity() {
-  return MetaspaceUtils::committed_bytes(Metaspace::ClassType);
+  return MetaspaceUtils::committed_bytes(metaspace::ClassType);
 }
 
 size_t CompressedClassSpaceCounters::max_capacity() {
-  return MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
+  return MetaspaceUtils::reserved_bytes(metaspace::ClassType);
 }
 
 void CompressedClassSpaceCounters::update_performance_counters() {
--- a/src/hotspot/share/memory/metaspaceGCThresholdUpdater.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_MEMORY_METASPACEGCTHRESHOLDUPDATER_HPP
-#define SHARE_MEMORY_METASPACEGCTHRESHOLDUPDATER_HPP
-
-#include "memory/allocation.hpp"
-#include "utilities/debug.hpp"
-
-class MetaspaceGCThresholdUpdater : public AllStatic {
- public:
-  enum Type {
-    ComputeNewSize,
-    ExpandAndAllocate,
-    Last
-  };
-
-  static const char* to_string(MetaspaceGCThresholdUpdater::Type updater) {
-    switch (updater) {
-      case ComputeNewSize:
-        return "compute_new_size";
-      case ExpandAndAllocate:
-        return "expand_and_allocate";
-      default:
-        assert(false, "Got bad updater: %d", (int) updater);
-        return NULL;
-    };
-  }
-};
-
-#endif // SHARE_MEMORY_METASPACEGCTHRESHOLDUPDATER_HPP
--- a/src/hotspot/share/memory/metaspaceTracer.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspaceTracer.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -43,14 +43,14 @@
 void MetaspaceTracer::report_metaspace_allocation_failure(ClassLoaderData *cld,
                                                           size_t word_size,
                                                           MetaspaceObj::Type objtype,
-                                                          Metaspace::MetadataType mdtype) const {
+                                                          metaspace::MetadataType mdtype) const {
   send_allocation_failure_event<EventMetaspaceAllocationFailure>(cld, word_size, objtype, mdtype);
 }
 
 void MetaspaceTracer::report_metadata_oom(ClassLoaderData *cld,
                                          size_t word_size,
                                          MetaspaceObj::Type objtype,
-                                         Metaspace::MetadataType mdtype) const {
+                                         metaspace::MetadataType mdtype) const {
   send_allocation_failure_event<EventMetaspaceOOM>(cld, word_size, objtype, mdtype);
 }
 
@@ -58,7 +58,7 @@
 void MetaspaceTracer::send_allocation_failure_event(ClassLoaderData *cld,
                                                     size_t word_size,
                                                     MetaspaceObj::Type objtype,
-                                                    Metaspace::MetadataType mdtype) const {
+                                                    metaspace::MetadataType mdtype) const {
   E event;
   if (event.should_commit()) {
     event.set_classLoader(cld);
--- a/src/hotspot/share/memory/metaspaceTracer.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/metaspaceTracer.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -27,7 +27,7 @@
 
 #include "memory/allocation.hpp"
 #include "memory/metaspace.hpp"
-#include "memory/metaspaceGCThresholdUpdater.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 
 class ClassLoaderData;
 
@@ -36,7 +36,7 @@
   void send_allocation_failure_event(ClassLoaderData *cld,
                                      size_t word_size,
                                      MetaspaceObj::Type objtype,
-                                     Metaspace::MetadataType mdtype) const;
+                                     metaspace::MetadataType mdtype) const;
  public:
   void report_gc_threshold(size_t old_val,
                            size_t new_val,
@@ -44,11 +44,11 @@
   void report_metaspace_allocation_failure(ClassLoaderData *cld,
                                            size_t word_size,
                                            MetaspaceObj::Type objtype,
-                                           Metaspace::MetadataType mdtype) const;
+                                           metaspace::MetadataType mdtype) const;
   void report_metadata_oom(ClassLoaderData *cld,
                            size_t word_size,
                            MetaspaceObj::Type objtype,
-                           Metaspace::MetadataType mdtype) const;
+                           metaspace::MetadataType mdtype) const;
 
 };
 
--- a/src/hotspot/share/memory/universe.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1114,7 +1114,7 @@
 #endif
   if (should_verify_subset(Verify_MetaspaceUtils)) {
     log_debug(gc, verify)("MetaspaceUtils");
-    MetaspaceUtils::verify_free_chunks();
+    DEBUG_ONLY(MetaspaceUtils::verify(true);)
   }
   if (should_verify_subset(Verify_JNIHandles)) {
     log_debug(gc, verify)("JNIHandles");
--- a/src/hotspot/share/runtime/globals.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/runtime/globals.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -1609,6 +1609,12 @@
           "class pointers are used")                                        \
           range(1*M, 3*G)                                                   \
                                                                             \
+  product(ccstr, MetaspaceReclaimStrategy, "balanced",                      \
+          "options: balanced, aggressive, none")                            \
+                                                                            \
+  product(bool, MetaspaceEnlargeChunksInPlace, true,                        \
+          "Metapace chunks are enlarged in place.")                         \
+                                                                            \
   manageable(uintx, MinHeapFreeRatio, 40,                                   \
           "The minimum percentage of heap free after GC to avoid expansion."\
           " For most GCs this applies to the old generation. In G1 and"     \
--- a/src/hotspot/share/runtime/os.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/runtime/os.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -416,6 +416,8 @@
   static void    make_polling_page_readable();
 
   // Check if pointer points to readable memory (by 4-byte read access)
+  // !! Unreliable before VM initialization! Use CanUseSafeFetch32() to test
+  //    if this function is reliable !!
   static bool    is_readable_pointer(const void* p);
   static bool    is_readable_range(const void* from, const void* to);
 
--- a/src/hotspot/share/runtime/vmOperations.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/runtime/vmOperations.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -33,6 +33,7 @@
 #include "logging/logStream.hpp"
 #include "logging/logConfiguration.hpp"
 #include "memory/heapInspection.hpp"
+#include "memory/metaspace/metaspaceReport.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/symbol.hpp"
@@ -209,7 +210,7 @@
 }
 
 void VM_PrintMetadata::doit() {
-  MetaspaceUtils::print_report(_out, _scale, _flags);
+  metaspace::MetaspaceReporter::print_report(_out, _scale, _flags);
 }
 
 VM_FindDeadlocks::~VM_FindDeadlocks() {
--- a/src/hotspot/share/services/memReporter.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/services/memReporter.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -200,35 +200,35 @@
         amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale);
     } else if (flag == mtClass) {
       // Metadata information
-      report_metadata(Metaspace::NonClassType);
+      report_metadata(metaspace::NonClassType);
       if (Metaspace::using_class_space()) {
-        report_metadata(Metaspace::ClassType);
+        report_metadata(metaspace::ClassType);
       }
     }
     out->print_cr(" ");
   }
 }
 
-void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const {
-  assert(type == Metaspace::NonClassType || type == Metaspace::ClassType,
-    "Invalid metadata type");
-  const char* name = (type == Metaspace::NonClassType) ?
-    "Metadata:   " : "Class space:";
+void MemSummaryReporter::report_metadata(metaspace::MetadataType mdType) const {
+  DEBUG_ONLY(metaspace::check_valid_mdtype(mdType));
+  const char* const name = metaspace::describe_mdtype(mdType);
 
   outputStream* out = output();
   const char* scale = current_scale();
-  size_t committed   = MetaspaceUtils::committed_bytes(type);
-  size_t used = MetaspaceUtils::used_bytes(type);
-  size_t free = (MetaspaceUtils::capacity_bytes(type) - used)
-              + MetaspaceUtils::free_chunks_total_bytes(type)
-              + MetaspaceUtils::free_in_vs_bytes(type);
+  size_t committed   = MetaspaceUtils::committed_bytes(mdType);
+  size_t used = MetaspaceUtils::used_bytes(mdType);
+  size_t free = 0; //
+      // TODO think this thru. What is free in this context?
+      // (MetaspaceUtils::capacity_bytes(type) - used)
+  //         + MetaspaceUtils::free_chunks_total_bytes(type)
+  //          + MetaspaceUtils::free_in_vs_bytes(type);
 
   assert(committed >= used + free, "Sanity");
   size_t waste = committed - (used + free);
 
   out->print_cr("%27s (  %s)", " ", name);
   out->print("%27s (    ", " ");
-  print_total(MetaspaceUtils::reserved_bytes(type), committed);
+  print_total(MetaspaceUtils::reserved_bytes(mdType), committed);
   out->print_cr(")");
   out->print_cr("%27s (    used=" SIZE_FORMAT "%s)", " ", amount_in_current_scale(used), scale);
   out->print_cr("%27s (    free=" SIZE_FORMAT "%s)", " ", amount_in_current_scale(free), scale);
@@ -593,43 +593,43 @@
 
 void MemSummaryDiffReporter::print_metaspace_diff(const MetaspaceSnapshot* current_ms,
                                                   const MetaspaceSnapshot* early_ms) const {
-  print_metaspace_diff(Metaspace::NonClassType, current_ms, early_ms);
+  print_metaspace_diff(metaspace::NonClassType, current_ms, early_ms);
   if (Metaspace::using_class_space()) {
-    print_metaspace_diff(Metaspace::ClassType, current_ms, early_ms);
+    print_metaspace_diff(metaspace::ClassType, current_ms, early_ms);
   }
 }
 
-void MemSummaryDiffReporter::print_metaspace_diff(Metaspace::MetadataType type,
+void MemSummaryDiffReporter::print_metaspace_diff(metaspace::MetadataType mdType,
                                                   const MetaspaceSnapshot* current_ms,
                                                   const MetaspaceSnapshot* early_ms) const {
-  const char* name = (type == Metaspace::NonClassType) ?
-    "Metadata:   " : "Class space:";
+  DEBUG_ONLY(metaspace::check_valid_mdtype(mdType));
+  const char* const name = metaspace::describe_mdtype(mdType);
 
   outputStream* out = output();
   const char* scale = current_scale();
 
   out->print_cr("%27s (  %s)", " ", name);
   out->print("%27s (    ", " ");
-  print_virtual_memory_diff(current_ms->reserved_in_bytes(type),
-                            current_ms->committed_in_bytes(type),
-                            early_ms->reserved_in_bytes(type),
-                            early_ms->committed_in_bytes(type));
+  print_virtual_memory_diff(current_ms->reserved_in_bytes(mdType),
+                            current_ms->committed_in_bytes(mdType),
+                            early_ms->reserved_in_bytes(mdType),
+                            early_ms->committed_in_bytes(mdType));
   out->print_cr(")");
 
-  long diff_used = diff_in_current_scale(current_ms->used_in_bytes(type),
-                                         early_ms->used_in_bytes(type));
-  long diff_free = diff_in_current_scale(current_ms->free_in_bytes(type),
-                                         early_ms->free_in_bytes(type));
+  long diff_used = diff_in_current_scale(current_ms->used_in_bytes(mdType),
+                                         early_ms->used_in_bytes(mdType));
+  long diff_free = diff_in_current_scale(current_ms->free_in_bytes(mdType),
+                                         early_ms->free_in_bytes(mdType));
 
-  size_t current_waste = current_ms->committed_in_bytes(type)
-    - (current_ms->used_in_bytes(type) + current_ms->free_in_bytes(type));
-  size_t early_waste = early_ms->committed_in_bytes(type)
-    - (early_ms->used_in_bytes(type) + early_ms->free_in_bytes(type));
+  size_t current_waste = current_ms->committed_in_bytes(mdType)
+    - (current_ms->used_in_bytes(mdType) + current_ms->free_in_bytes(mdType));
+  size_t early_waste = early_ms->committed_in_bytes(mdType)
+    - (early_ms->used_in_bytes(mdType) + early_ms->free_in_bytes(mdType));
   long diff_waste = diff_in_current_scale(current_waste, early_waste);
 
   // Diff used
   out->print("%27s (    used=" SIZE_FORMAT "%s", " ",
-    amount_in_current_scale(current_ms->used_in_bytes(type)), scale);
+    amount_in_current_scale(current_ms->used_in_bytes(mdType)), scale);
   if (diff_used != 0) {
     out->print(" %+ld%s", diff_used, scale);
   }
@@ -637,7 +637,7 @@
 
   // Diff free
   out->print("%27s (    free=" SIZE_FORMAT "%s", " ",
-    amount_in_current_scale(current_ms->free_in_bytes(type)), scale);
+    amount_in_current_scale(current_ms->free_in_bytes(mdType)), scale);
   if (diff_free != 0) {
     out->print(" %+ld%s", diff_free, scale);
   }
@@ -647,7 +647,7 @@
   // Diff waste
   out->print("%27s (    waste=" SIZE_FORMAT "%s =%2.2f%%", " ",
     amount_in_current_scale(current_waste), scale,
-    ((float)current_waste * 100) / current_ms->committed_in_bytes(type));
+    ((float)current_waste * 100) / current_ms->committed_in_bytes(mdType));
   if (diff_waste != 0) {
     out->print(" %+ld%s", diff_waste, scale);
   }
--- a/src/hotspot/share/services/memReporter.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/services/memReporter.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -27,7 +27,7 @@
 
 #if INCLUDE_NMT
 
-#include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "oops/instanceKlass.hpp"
 #include "services/memBaseline.hpp"
 #include "services/nmtCommon.hpp"
@@ -114,7 +114,7 @@
   void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
     VirtualMemory* virtual_memory);
 
-  void report_metadata(Metaspace::MetadataType type) const;
+  void report_metadata(metaspace::MetadataType type) const;
 };
 
 /*
@@ -189,7 +189,7 @@
 
   void print_metaspace_diff(const MetaspaceSnapshot* current_ms,
                             const MetaspaceSnapshot* early_ms) const;
-  void print_metaspace_diff(Metaspace::MetadataType type,
+  void print_metaspace_diff(metaspace::MetadataType type,
     const MetaspaceSnapshot* current_ms, const MetaspaceSnapshot* early_ms) const;
 };
 
--- a/src/hotspot/share/services/memoryPool.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/services/memoryPool.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "memory/metaspace.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
@@ -211,10 +212,10 @@
   MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
 
 size_t CompressedKlassSpacePool::used_in_bytes() {
-  return MetaspaceUtils::used_bytes(Metaspace::ClassType);
+  return MetaspaceUtils::used_bytes(metaspace::ClassType);
 }
 
 MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
-  size_t committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
+  size_t committed = MetaspaceUtils::committed_bytes(metaspace::ClassType);
   return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -602,8 +602,8 @@
 
 // Metaspace Support
 MetaspaceSnapshot::MetaspaceSnapshot() {
-  for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) {
-    Metaspace::MetadataType type = (Metaspace::MetadataType)index;
+  for (int index = (int)metaspace::ClassType; index < (int)metaspace::MetadataTypeCount; index ++) {
+    metaspace::MetadataType type = (metaspace::MetadataType)index;
     assert_valid_metadata_type(type);
     _reserved_in_bytes[type]  = 0;
     _committed_in_bytes[type] = 0;
@@ -612,22 +612,22 @@
   }
 }
 
-void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) {
+void MetaspaceSnapshot::snapshot(metaspace::MetadataType type, MetaspaceSnapshot& mss) {
   assert_valid_metadata_type(type);
 
   mss._reserved_in_bytes[type]   = MetaspaceUtils::reserved_bytes(type);
   mss._committed_in_bytes[type]  = MetaspaceUtils::committed_bytes(type);
   mss._used_in_bytes[type]       = MetaspaceUtils::used_bytes(type);
 
-  size_t free_in_bytes = (MetaspaceUtils::capacity_bytes(type) - MetaspaceUtils::used_bytes(type))
-                       + MetaspaceUtils::free_chunks_total_bytes(type)
-                       + MetaspaceUtils::free_in_vs_bytes(type);
+  size_t free_in_bytes = 0;// TODO fix(MetaspaceUtils::capacity_bytes(type) - MetaspaceUtils::used_bytes(type))
+                     //  + MetaspaceUtils::free_chunks_total_bytes(type)
+                     //  + MetaspaceUtils::free_in_vs_bytes(type);
   mss._free_in_bytes[type] = free_in_bytes;
 }
 
 void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) {
-  snapshot(Metaspace::ClassType, mss);
+  snapshot(metaspace::ClassType, mss);
   if (Metaspace::using_class_space()) {
-    snapshot(Metaspace::NonClassType, mss);
+    snapshot(metaspace::NonClassType, mss);
   }
 }
--- a/src/hotspot/share/services/virtualMemoryTracker.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/services/virtualMemoryTracker.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -28,7 +28,7 @@
 #if INCLUDE_NMT
 
 #include "memory/allocation.hpp"
-#include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceEnums.hpp"
 #include "services/allocationSite.hpp"
 #include "services/nmtCommon.hpp"
 #include "utilities/linkedlist.hpp"
@@ -420,25 +420,25 @@
 
 class MetaspaceSnapshot : public ResourceObj {
 private:
-  size_t  _reserved_in_bytes[Metaspace::MetadataTypeCount];
-  size_t  _committed_in_bytes[Metaspace::MetadataTypeCount];
-  size_t  _used_in_bytes[Metaspace::MetadataTypeCount];
-  size_t  _free_in_bytes[Metaspace::MetadataTypeCount];
+  size_t  _reserved_in_bytes[metaspace::MetadataTypeCount];
+  size_t  _committed_in_bytes[metaspace::MetadataTypeCount];
+  size_t  _used_in_bytes[metaspace::MetadataTypeCount];
+  size_t  _free_in_bytes[metaspace::MetadataTypeCount];
 
 public:
   MetaspaceSnapshot();
-  size_t reserved_in_bytes(Metaspace::MetadataType type)   const { assert_valid_metadata_type(type); return _reserved_in_bytes[type]; }
-  size_t committed_in_bytes(Metaspace::MetadataType type)  const { assert_valid_metadata_type(type); return _committed_in_bytes[type]; }
-  size_t used_in_bytes(Metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _used_in_bytes[type]; }
-  size_t free_in_bytes(Metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _free_in_bytes[type]; }
+  size_t reserved_in_bytes(metaspace::MetadataType type)   const { assert_valid_metadata_type(type); return _reserved_in_bytes[type]; }
+  size_t committed_in_bytes(metaspace::MetadataType type)  const { assert_valid_metadata_type(type); return _committed_in_bytes[type]; }
+  size_t used_in_bytes(metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _used_in_bytes[type]; }
+  size_t free_in_bytes(metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _free_in_bytes[type]; }
 
   static void snapshot(MetaspaceSnapshot& s);
 
 private:
-  static void snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& s);
+  static void snapshot(metaspace::MetadataType type, MetaspaceSnapshot& s);
 
-  static void assert_valid_metadata_type(Metaspace::MetadataType type) {
-    assert(type == Metaspace::ClassType || type == Metaspace::NonClassType,
+  static void assert_valid_metadata_type(metaspace::MetadataType type) {
+    assert(type == metaspace::ClassType || type == metaspace::NonClassType,
       "Invalid metadata type");
   }
 };
--- a/src/hotspot/share/utilities/bitMap.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/utilities/bitMap.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -609,7 +609,7 @@
 // then modifications in and to the left of the _bit_ being
 // currently sampled will not be seen. Note also that the
 // interval [leftOffset, rightOffset) is right open.
-bool BitMap::iterate(BitMapClosure* blk, idx_t leftOffset, idx_t rightOffset) {
+bool BitMap::iterate(BitMapClosure* blk, idx_t leftOffset, idx_t rightOffset) const {
   verify_range(leftOffset, rightOffset);
 
   idx_t startIndex = word_index(leftOffset);
--- a/src/hotspot/share/utilities/bitMap.hpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/src/hotspot/share/utilities/bitMap.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -195,6 +195,7 @@
     return calc_size_in_words(size_in_bits) * BytesPerWord;
   }
 
+  // Size, in number of bits, of this map.
   idx_t size() const          { return _size; }
   idx_t size_in_words() const { return calc_size_in_words(size()); }
   idx_t size_in_bytes() const { return calc_size_in_bytes(size()); }
@@ -253,11 +254,11 @@
   void clear_large();
   inline void clear();
 
-  // Iteration support.  Returns "true" if the iteration completed, false
+  // Iteration support [leftIndex, rightIndex).  Returns "true" if the iteration completed, false
   // if the iteration terminated early (because the closure "blk" returned
   // false).
-  bool iterate(BitMapClosure* blk, idx_t leftIndex, idx_t rightIndex);
-  bool iterate(BitMapClosure* blk) {
+  bool iterate(BitMapClosure* blk, idx_t leftIndex, idx_t rightIndex) const;
+  bool iterate(BitMapClosure* blk) const {
     // call the version that takes an interval
     return iterate(blk, 0, size());
   }
@@ -279,6 +280,9 @@
   // aligned to bitsizeof(bm_word_t).
   idx_t get_next_one_offset_aligned_right(idx_t l_index, idx_t r_index) const;
 
+  // Returns the number of bits set between [l_index, r_index) in the bitmap.
+  idx_t count_one_bits(idx_t l_index, idx_t r_index) const;
+
   // Returns the number of bits set in the bitmap.
   idx_t count_one_bits() const;
 
--- a/test/hotspot/gtest/memory/test_chunkManager.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2016, 2018 Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "memory/metaspace/chunkManager.hpp"
-#include "memory/metaspace/metaspaceCommon.hpp"
-
-// The test function is only available in debug builds
-#ifdef ASSERT
-
-#include "unittest.hpp"
-
-using namespace metaspace;
-
-TEST(ChunkManager, list_index) {
-
-  // Test previous bug where a query for a humongous class metachunk,
-  // incorrectly matched the non-class medium metachunk size.
-  {
-    ChunkManager manager(true);
-
-    ASSERT_TRUE(MediumChunk > ClassMediumChunk) << "Precondition for test";
-
-    ChunkIndex index = manager.list_index(MediumChunk);
-
-    ASSERT_TRUE(index == HumongousIndex) <<
-        "Requested size is larger than ClassMediumChunk,"
-        " so should return HumongousIndex. Got index: " << index;
-  }
-
-  // Check the specified sizes as well.
-  {
-    ChunkManager manager(true);
-    ASSERT_TRUE(manager.list_index(ClassSpecializedChunk) == SpecializedIndex);
-    ASSERT_TRUE(manager.list_index(ClassSmallChunk) == SmallIndex);
-    ASSERT_TRUE(manager.list_index(ClassMediumChunk) == MediumIndex);
-    ASSERT_TRUE(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex);
-  }
-  {
-    ChunkManager manager(false);
-    ASSERT_TRUE(manager.list_index(SpecializedChunk) == SpecializedIndex);
-    ASSERT_TRUE(manager.list_index(SmallChunk) == SmallIndex);
-    ASSERT_TRUE(manager.list_index(MediumChunk) == MediumIndex);
-    ASSERT_TRUE(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex);
-  }
-
-}
-
-#endif // ASSERT
--- a/test/hotspot/gtest/memory/test_is_metaspace_obj.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2016, 2018 Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/metaspace.hpp"
-#include "memory/metaspace/virtualSpaceList.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
-#include "unittest.hpp"
-
-using namespace metaspace;
-
-
-// Test the cheerful multitude of metaspace-contains-functions.
-class MetaspaceIsMetaspaceObjTest : public ::testing::Test {
-  Mutex* _lock;
-  ClassLoaderMetaspace* _ms;
-
-public:
-
-  MetaspaceIsMetaspaceObjTest() : _lock(NULL), _ms(NULL) {}
-
-  virtual void SetUp() {
-  }
-
-  virtual void TearDown() {
-    delete _ms;
-    delete _lock;
-  }
-
-  void do_test(Metaspace::MetadataType mdType) {
-    _lock = new Mutex(Monitor::native, "gtest-IsMetaspaceObjTest-lock", false, Monitor::_safepoint_check_never);
-    {
-      MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
-      _ms = new ClassLoaderMetaspace(_lock, Metaspace::StandardMetaspaceType);
-    }
-
-    const MetaspaceObj* p = (MetaspaceObj*) _ms->allocate(42, mdType);
-
-    // Test MetaspaceObj::is_metaspace_object
-    ASSERT_TRUE(MetaspaceObj::is_valid(p));
-
-    // A misaligned object shall not be recognized
-    const MetaspaceObj* p_misaligned = (MetaspaceObj*)((address)p) + 1;
-    ASSERT_FALSE(MetaspaceObj::is_valid(p_misaligned));
-
-    // Test VirtualSpaceList::contains and find_enclosing_space
-    VirtualSpaceList* list = Metaspace::space_list();
-    if (mdType == Metaspace::ClassType && Metaspace::using_class_space()) {
-      list = Metaspace::class_space_list();
-    }
-    ASSERT_TRUE(list->contains(p));
-    VirtualSpaceNode* const n = list->find_enclosing_space(p);
-    ASSERT_TRUE(n != NULL);
-    ASSERT_TRUE(n->contains(p));
-
-    // A misaligned pointer shall be recognized by list::contains
-    ASSERT_TRUE(list->contains((address)p) + 1);
-
-    // Now for some bogus values
-    ASSERT_FALSE(MetaspaceObj::is_valid((MetaspaceObj*)NULL));
-
-    // Should exercise various paths in MetaspaceObj::is_valid()
-    ASSERT_FALSE(MetaspaceObj::is_valid((MetaspaceObj*)1024));
-    ASSERT_FALSE(MetaspaceObj::is_valid((MetaspaceObj*)8192));
-
-    MetaspaceObj* p_stack = (MetaspaceObj*) &_lock;
-    ASSERT_FALSE(MetaspaceObj::is_valid(p_stack));
-
-    MetaspaceObj* p_heap = (MetaspaceObj*) os::malloc(41, mtInternal);
-    ASSERT_FALSE(MetaspaceObj::is_valid(p_heap));
-    os::free(p_heap);
-
-    // Test Metaspace::contains_xxx
-    ASSERT_TRUE(Metaspace::contains(p));
-    ASSERT_TRUE(Metaspace::contains_non_shared(p));
-
-    delete _ms;
-    _ms = NULL;
-    delete _lock;
-    _lock = NULL;
-  }
-
-};
-
-TEST_VM_F(MetaspaceIsMetaspaceObjTest, non_class_space) {
-  do_test(Metaspace::NonClassType);
-}
-
-TEST_VM_F(MetaspaceIsMetaspaceObjTest, class_space) {
-  do_test(Metaspace::ClassType);
-}
-
--- a/test/hotspot/gtest/memory/test_metachunk.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "memory/allocation.hpp"
-#include "memory/metaspace/metachunk.hpp"
-#include "unittest.hpp"
-#include "utilities/align.hpp"
-#include "utilities/copy.hpp"
-#include "utilities/debug.hpp"
-
-using namespace metaspace;
-
-class MetachunkTest {
- public:
-  static MetaWord* initial_top(Metachunk* metachunk) {
-    return metachunk->initial_top();
-  }
-  static MetaWord* top(Metachunk* metachunk) {
-    return metachunk->top();
-  }
-
-};
-
-TEST(Metachunk, basic) {
-  const ChunkIndex chunk_type = MediumIndex;
-  const bool is_class = false;
-  const size_t word_size = get_size_for_nonhumongous_chunktype(chunk_type, is_class);
-  // Allocate the chunk with correct alignment.
-  void* memory = malloc(word_size * BytesPerWord * 2);
-  ASSERT_TRUE(NULL != memory) << "Failed to malloc 2MB";
-
-  void* p_placement = align_up(memory, word_size * BytesPerWord);
-
-  Metachunk* metachunk = ::new (p_placement) Metachunk(chunk_type, is_class, word_size, NULL);
-
-  EXPECT_EQ((MetaWord*) metachunk, metachunk->bottom());
-  EXPECT_EQ((uintptr_t*) metachunk + metachunk->size(), metachunk->end());
-
-  // Check sizes
-  EXPECT_EQ(metachunk->size(), metachunk->word_size());
-  EXPECT_EQ(pointer_delta(metachunk->end(), metachunk->bottom(),
-                          sizeof (MetaWord)),
-            metachunk->word_size());
-
-  // Check usage
-  EXPECT_EQ(metachunk->used_word_size(), metachunk->overhead());
-  EXPECT_EQ(metachunk->word_size() - metachunk->used_word_size(),
-            metachunk->free_word_size());
-  EXPECT_EQ(MetachunkTest::top(metachunk), MetachunkTest::initial_top(metachunk));
-  EXPECT_TRUE(metachunk->is_empty());
-
-  // Allocate
-  size_t alloc_size = 64; // Words
-  EXPECT_TRUE(is_aligned(alloc_size, Metachunk::object_alignment()));
-
-  MetaWord* mem = metachunk->allocate(alloc_size);
-
-  // Check post alloc
-  EXPECT_EQ(MetachunkTest::initial_top(metachunk), mem);
-  EXPECT_EQ(MetachunkTest::top(metachunk), mem + alloc_size);
-  EXPECT_EQ(metachunk->overhead() + alloc_size, metachunk->used_word_size());
-  EXPECT_EQ(metachunk->word_size() - metachunk->used_word_size(),
-            metachunk->free_word_size());
-  EXPECT_FALSE(metachunk->is_empty());
-
-  // Clear chunk
-  metachunk->reset_empty();
-
-  // Check post clear
-  EXPECT_EQ(metachunk->used_word_size(), metachunk->overhead());
-  EXPECT_EQ(metachunk->word_size() - metachunk->used_word_size(),
-            metachunk->free_word_size());
-  EXPECT_EQ(MetachunkTest::top(metachunk), MetachunkTest::initial_top(metachunk));
-  EXPECT_TRUE(metachunk->is_empty());
-
-  free(memory);
-}
--- a/test/hotspot/gtest/memory/test_metaspace.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "memory/metaspace.hpp"
-#include "memory/metaspace/virtualSpaceList.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
-#include "unittest.hpp"
-
-using namespace metaspace;
-
-TEST_VM(MetaspaceUtils, reserved) {
-  size_t reserved = MetaspaceUtils::reserved_bytes();
-  EXPECT_GT(reserved, 0UL);
-
-  size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
-  EXPECT_GT(reserved_metadata, 0UL);
-  EXPECT_LE(reserved_metadata, reserved);
-}
-
-TEST_VM(MetaspaceUtils, reserved_compressed_class_pointers) {
-  if (!UseCompressedClassPointers) {
-    return;
-  }
-  size_t reserved = MetaspaceUtils::reserved_bytes();
-  EXPECT_GT(reserved, 0UL);
-
-  size_t reserved_class = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
-  EXPECT_GT(reserved_class, 0UL);
-  EXPECT_LE(reserved_class, reserved);
-}
-
-TEST_VM(MetaspaceUtils, committed) {
-  size_t committed = MetaspaceUtils::committed_bytes();
-  EXPECT_GT(committed, 0UL);
-
-  size_t reserved  = MetaspaceUtils::reserved_bytes();
-  EXPECT_LE(committed, reserved);
-
-  size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
-  EXPECT_GT(committed_metadata, 0UL);
-  EXPECT_LE(committed_metadata, committed);
-}
-
-TEST_VM(MetaspaceUtils, committed_compressed_class_pointers) {
-  if (!UseCompressedClassPointers) {
-    return;
-  }
-  size_t committed = MetaspaceUtils::committed_bytes();
-  EXPECT_GT(committed, 0UL);
-
-  size_t committed_class = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
-  EXPECT_GT(committed_class, 0UL);
-  EXPECT_LE(committed_class, committed);
-}
-
-TEST_VM(MetaspaceUtils, virtual_space_list_large_chunk) {
-  VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
-  MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
-  // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
-  // vm_allocation_granularity aligned on Windows.
-  size_t large_size = (size_t)(2*256*K + (os::vm_page_size() / BytesPerWord));
-  large_size += (os::vm_page_size() / BytesPerWord);
-  vs_list->get_new_chunk(large_size, 0);
-}
--- a/test/hotspot/gtest/memory/test_metaspace_allocation.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,273 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/metaspace.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/ostream.hpp"
-#include "unittest.hpp"
-
-#define NUM_PARALLEL_METASPACES                 50
-#define MAX_PER_METASPACE_ALLOCATION_WORDSIZE   (512 * K)
-
-//#define DEBUG_VERBOSE true
-
-#ifdef DEBUG_VERBOSE
-
-struct chunkmanager_statistics_t {
-  int num_specialized_chunks;
-  int num_small_chunks;
-  int num_medium_chunks;
-  int num_humongous_chunks;
-};
-
-extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out);
-
-static void print_chunkmanager_statistics(outputStream* st, Metaspace::MetadataType mdType) {
-  chunkmanager_statistics_t stat;
-  test_metaspace_retrieve_chunkmanager_statistics(mdType, &stat);
-  st->print_cr("free chunks: %d / %d / %d / %d", stat.num_specialized_chunks, stat.num_small_chunks,
-               stat.num_medium_chunks, stat.num_humongous_chunks);
-}
-
-#endif
-
-struct chunk_geometry_t {
-  size_t specialized_chunk_word_size;
-  size_t small_chunk_word_size;
-  size_t medium_chunk_word_size;
-};
-
-extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out);
-
-
-class MetaspaceAllocationTest : public ::testing::Test {
-protected:
-
-  struct {
-    size_t allocated;
-    Mutex* lock;
-    ClassLoaderMetaspace* space;
-    bool is_empty() const { return allocated == 0; }
-    bool is_full() const { return allocated >= MAX_PER_METASPACE_ALLOCATION_WORDSIZE; }
-  } _spaces[NUM_PARALLEL_METASPACES];
-
-  chunk_geometry_t _chunk_geometry;
-
-  virtual void SetUp() {
-    ::memset(_spaces, 0, sizeof(_spaces));
-    test_metaspace_retrieve_chunk_geometry(Metaspace::NonClassType, &_chunk_geometry);
-  }
-
-  virtual void TearDown() {
-    for (int i = 0; i < NUM_PARALLEL_METASPACES; i ++) {
-      if (_spaces[i].space != NULL) {
-        delete _spaces[i].space;
-        delete _spaces[i].lock;
-      }
-    }
-  }
-
-  void create_space(int i) {
-    assert(i >= 0 && i < NUM_PARALLEL_METASPACES, "Sanity");
-    assert(_spaces[i].space == NULL && _spaces[i].allocated == 0, "Sanity");
-    if (_spaces[i].lock == NULL) {
-      _spaces[i].lock = new Mutex(Monitor::native, "gtest-MetaspaceAllocationTest-lock", false, Monitor::_safepoint_check_never);
-      ASSERT_TRUE(_spaces[i].lock != NULL);
-    }
-    // Let every ~10th space be an unsafe anonymous one to test different allocation patterns.
-    const Metaspace::MetaspaceType msType = (os::random() % 100 < 10) ?
-      Metaspace::UnsafeAnonymousMetaspaceType : Metaspace::StandardMetaspaceType;
-    {
-      // Pull lock during space creation, since this is what happens in the VM too
-      // (see ClassLoaderData::metaspace_non_null(), which we mimick here).
-      MutexLocker ml(_spaces[i].lock,  Mutex::_no_safepoint_check_flag);
-      _spaces[i].space = new ClassLoaderMetaspace(_spaces[i].lock, msType);
-    }
-    _spaces[i].allocated = 0;
-    ASSERT_TRUE(_spaces[i].space != NULL);
-  }
-
-  // Returns the index of a random space where index is [0..metaspaces) and which is
-  //   empty, non-empty or full.
-  // Returns -1 if no matching space exists.
-  enum fillgrade { fg_empty, fg_non_empty, fg_full };
-  int get_random_matching_space(int metaspaces, fillgrade fg) {
-    const int start_index = os::random() % metaspaces;
-    int i = start_index;
-    do {
-      if (fg == fg_empty && _spaces[i].is_empty()) {
-        return i;
-      } else if ((fg == fg_full && _spaces[i].is_full()) ||
-                 (fg == fg_non_empty && !_spaces[i].is_full() && !_spaces[i].is_empty())) {
-        return i;
-      }
-      i ++;
-      if (i == metaspaces) {
-        i = 0;
-      }
-    } while (i != start_index);
-    return -1;
-  }
-
-  int get_random_emtpy_space(int metaspaces) { return get_random_matching_space(metaspaces, fg_empty); }
-  int get_random_non_emtpy_space(int metaspaces) { return get_random_matching_space(metaspaces, fg_non_empty); }
-  int get_random_full_space(int metaspaces) { return get_random_matching_space(metaspaces, fg_full); }
-
-  void do_test(Metaspace::MetadataType mdType, int metaspaces, int phases, int allocs_per_phase,
-               float probability_for_large_allocations // 0.0-1.0
-  ) {
-    // Alternate between breathing in (allocating n blocks for a random Metaspace) and
-    // breathing out (deleting a random Metaspace). The intent is to stress the coalescation
-    // and splitting of free chunks.
-    int phases_done = 0;
-    bool allocating = true;
-    while (phases_done < phases) {
-      bool force_switch = false;
-      if (allocating) {
-        // Allocate space from metaspace, with a preference for completely empty spaces. This
-        // should provide a good mixture of metaspaces in the virtual space.
-        int index = get_random_emtpy_space(metaspaces);
-        if (index == -1) {
-          index = get_random_non_emtpy_space(metaspaces);
-        }
-        if (index == -1) {
-          // All spaces are full, switch to freeing.
-          force_switch = true;
-        } else {
-          // create space if it does not yet exist.
-          if (_spaces[index].space == NULL) {
-            create_space(index);
-          }
-          // Allocate a bunch of blocks from it. Mostly small stuff but mix in large allocations
-          //  to force humongous chunk allocations.
-          int allocs_done = 0;
-          while (allocs_done < allocs_per_phase && !_spaces[index].is_full()) {
-            size_t size = 0;
-            int r = os::random() % 1000;
-            if ((float)r < probability_for_large_allocations * 1000.0) {
-              size = (os::random() % _chunk_geometry.medium_chunk_word_size) + _chunk_geometry.medium_chunk_word_size;
-            } else {
-              size = os::random() % 64;
-            }
-            // Note: In contrast to space creation, no need to lock here. ClassLoaderMetaspace::allocate() will lock itself.
-            MetaWord* const p = _spaces[index].space->allocate(size, mdType);
-            if (p == NULL) {
-              // We very probably did hit the metaspace "until-gc" limit.
-#ifdef DEBUG_VERBOSE
-              tty->print_cr("OOM for " SIZE_FORMAT " words. ", size);
-#endif
-              // Just switch to deallocation and resume tests.
-              force_switch = true;
-              break;
-            } else {
-              _spaces[index].allocated += size;
-              allocs_done ++;
-            }
-          }
-        }
-      } else {
-        // freeing: find a metaspace and delete it, with preference for completely filled spaces.
-        int index = get_random_full_space(metaspaces);
-        if (index == -1) {
-          index = get_random_non_emtpy_space(metaspaces);
-        }
-        if (index == -1) {
-          force_switch = true;
-        } else {
-          assert(_spaces[index].space != NULL && _spaces[index].allocated > 0, "Sanity");
-          // Note: do not lock here. In the "wild" (the VM), we do not so either (see ~ClassLoaderData()).
-          delete _spaces[index].space;
-          _spaces[index].space = NULL;
-          _spaces[index].allocated = 0;
-        }
-      }
-
-      if (force_switch) {
-        allocating = !allocating;
-      } else {
-        // periodically switch between allocating and freeing, but prefer allocation because
-        // we want to intermingle allocations of multiple metaspaces.
-        allocating = os::random() % 5 < 4;
-      }
-      phases_done ++;
-#ifdef DEBUG_VERBOSE
-      int metaspaces_in_use = 0;
-      size_t total_allocated = 0;
-      for (int i = 0; i < metaspaces; i ++) {
-        if (_spaces[i].allocated > 0) {
-          total_allocated += _spaces[i].allocated;
-          metaspaces_in_use ++;
-        }
-      }
-      tty->print("%u:\tspaces: %d total words: " SIZE_FORMAT "\t\t\t", phases_done, metaspaces_in_use, total_allocated);
-      print_chunkmanager_statistics(tty, mdType);
-#endif
-    }
-#ifdef DEBUG_VERBOSE
-    tty->print_cr("Test finished. ");
-    MetaspaceUtils::print_metaspace_map(tty, mdType);
-    print_chunkmanager_statistics(tty, mdType);
-#endif
-  }
-};
-
-
-
-TEST_F(MetaspaceAllocationTest, chunk_geometry) {
-  ASSERT_GT(_chunk_geometry.specialized_chunk_word_size, (size_t) 0);
-  ASSERT_GT(_chunk_geometry.small_chunk_word_size, _chunk_geometry.specialized_chunk_word_size);
-  ASSERT_EQ(_chunk_geometry.small_chunk_word_size % _chunk_geometry.specialized_chunk_word_size, (size_t)0);
-  ASSERT_GT(_chunk_geometry.medium_chunk_word_size, _chunk_geometry.small_chunk_word_size);
-  ASSERT_EQ(_chunk_geometry.medium_chunk_word_size % _chunk_geometry.small_chunk_word_size, (size_t)0);
-}
-
-
-TEST_VM_F(MetaspaceAllocationTest, single_space_nonclass) {
-  do_test(Metaspace::NonClassType, 1, 1000, 100, 0);
-}
-
-TEST_VM_F(MetaspaceAllocationTest, single_space_class) {
-  do_test(Metaspace::ClassType, 1, 1000, 100, 0);
-}
-
-TEST_VM_F(MetaspaceAllocationTest, multi_space_nonclass) {
-  do_test(Metaspace::NonClassType, NUM_PARALLEL_METASPACES, 100, 1000, 0.0);
-}
-
-TEST_VM_F(MetaspaceAllocationTest, multi_space_class) {
-  do_test(Metaspace::ClassType, NUM_PARALLEL_METASPACES, 100, 1000, 0.0);
-}
-
-TEST_VM_F(MetaspaceAllocationTest, multi_space_nonclass_2) {
-  // many metaspaces, with humongous chunks mixed in.
-  do_test(Metaspace::NonClassType, NUM_PARALLEL_METASPACES, 100, 1000, .006f);
-}
-
--- a/test/hotspot/gtest/memory/test_spaceManager.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "memory/metaspace/spaceManager.hpp"
-
-using metaspace::SpaceManager;
-
-// The test function is only available in debug builds
-#ifdef ASSERT
-
-#include "unittest.hpp"
-
-
-static void test_adjust_initial_chunk_size(bool is_class) {
-  const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
-  const size_t normal   = SpaceManager::small_chunk_size(is_class);
-  const size_t medium   = SpaceManager::medium_chunk_size(is_class);
-
-#define do_test(value, expected, is_class_value)                                 \
-    do {                                                                         \
-      size_t v = value;                                                          \
-      size_t e = expected;                                                       \
-      assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
-             "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
-    } while (0)
-
-    // Smallest (specialized)
-    do_test(1,            smallest, is_class);
-    do_test(smallest - 1, smallest, is_class);
-    do_test(smallest,     smallest, is_class);
-
-    // Small
-    do_test(smallest + 1, normal, is_class);
-    do_test(normal - 1,   normal, is_class);
-    do_test(normal,       normal, is_class);
-
-    // Medium
-    do_test(normal + 1, medium, is_class);
-    do_test(medium - 1, medium, is_class);
-    do_test(medium,     medium, is_class);
-
-    // Humongous
-    do_test(medium + 1, medium + 1, is_class);
-
-#undef test_adjust_initial_chunk_size
-}
-
-TEST(SpaceManager, adjust_initial_chunk_size) {
-  test_adjust_initial_chunk_size(true);
-  test_adjust_initial_chunk_size(false);
-}
-
-#endif // ASSERT
--- a/test/hotspot/gtest/memory/test_virtualspace.cpp	Wed Oct 16 16:54:56 2019 +0200
+++ b/test/hotspot/gtest/memory/test_virtualspace.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -28,6 +28,7 @@
 #include "utilities/align.hpp"
 #include "unittest.hpp"
 
+
 namespace {
   class MemoryReleaser {
     ReservedSpace* const _rs;
@@ -337,3 +338,6 @@
   EXPECT_NO_FATAL_FAILURE(test_virtual_space_actual_committed_space(10 * M, 5 * M,  Commit));
   EXPECT_NO_FATAL_FAILURE(test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit));
 }
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/metaspaceTestsCommon.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+#include "precompiled.hpp"
+
+#include "metaspaceTestsCommon.hpp"
+
+#include "utilities/globalDefinitions.hpp"
+
+#ifdef _WIN32
+#include <psapi.h>
+#endif
+
+void calc_random_range(size_t outer_range_len, range_t* out, size_t alignment) {
+
+  assert(is_aligned(outer_range_len, alignment), "bad input range");
+  assert(outer_range_len > 0, "no zero range");
+
+  size_t l1 = os::random() % outer_range_len;
+  size_t l2 = os::random() % outer_range_len;
+  if (l1 > l2) {
+    size_t l = l1;
+    l1 = l2;
+    l2 = l;
+  }
+  l1 = align_down(l1, alignment);
+  l2 = align_up(l2, alignment);
+
+  // disallow zero range
+  if (l2 == l1) {
+    if (l1 >= alignment) {
+      l1 -= alignment;
+    } else {
+      assert(l2 <= outer_range_len - alignment, "Sanity");
+      l2 += alignment;
+    }
+  }
+
+  assert(l2 - l1 > 0 && l2 - l1 <= outer_range_len, "Sanity " SIZE_FORMAT "-" SIZE_FORMAT ".", l1, l2);
+  assert(is_aligned(l1, alignment), "Sanity");
+  assert(is_aligned(l2, alignment), "Sanity");
+
+  out->from = l1; out->to = l2;
+
+}
+
+void calc_random_address_range(const address_range_t* outer_range, address_range_t* out, size_t alignment) {
+  range_t r;
+  calc_random_range(outer_range->word_size, &r, alignment);
+
+  out->p = outer_range->p + r.from;
+  out->word_size = r.from - r.to;
+}
+
+size_t get_workingset_size() {
+#if defined(_WIN32)
+  PROCESS_MEMORY_COUNTERS info;
+  GetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info));
+  return (size_t)info.WorkingSetSize;
+#elif defined(LINUX)
+  long result = 0L;
+  FILE* f = fopen("/proc/self/statm", "r");
+  if (f == NULL) {
+    return 0;
+  }
+  // Second number in statm, in num of pages
+  if (fscanf(f, "%*s%ld", &result) != 1 ) {
+    fclose(f);
+    return 0;
+  }
+  fclose(f);
+  return (size_t)result * (size_t)os::vm_page_size();
+#else
+  return 0L;
+#endif
+}
+
+
+
+void zap_range(MetaWord* p, size_t word_size) {
+  for (MetaWord* pzap = p; pzap < p + word_size; pzap += os::vm_page_size()) {
+    *pzap = 0;
+  }
+}
+
+
+
+// Writes a uniqe pattern to p
+void mark_address(MetaWord* p, uintx pattern) {
+  MetaWord x = (MetaWord)((uintx) p ^ pattern);
+  *p = x;
+}
+
+// checks pattern at address
+bool check_marked_address(const MetaWord* p, uintx pattern) {
+  MetaWord x = (MetaWord)((uintx) p ^ pattern);
+  EXPECT_EQ(*p, x);
+  return *p == x;
+}
+
+// "fill_range_with_pattern" fills a range of heap words with pointers to itself.
+//
+// The idea is to fill a memory range with a pattern which is both marked clearly to the caller
+// and cannot be moved without becoming invalid.
+//
+// The filled range can be checked with check_range_for_pattern. One also can only check
+// a sub range of the original range.
+void fill_range_with_pattern(MetaWord* p, uintx pattern, size_t word_size) {
+  assert(word_size > 0 && p != NULL, "sanity");
+  for (MetaWord* p2 = p; p2 < p + word_size; p2 ++) {
+    mark_address(p2, pattern);
+  }
+}
+
+bool check_range_for_pattern(const MetaWord* p, uintx pattern, size_t word_size) {
+  assert(word_size > 0 && p != NULL, "sanity");
+  const MetaWord* p2 = p;
+  while (p2 < p + word_size && check_marked_address(p2, pattern)) {
+    p2 ++;
+  }
+  return p2 < p + word_size;
+}
+
+
+// Similar to fill_range_with_pattern, but only marks start and end. This is optimized for cases
+// where fill_range_with_pattern just is too slow.
+// Use check_marked_range to check the range. In contrast to check_range_for_pattern, only the original
+// range can be checked.
+void mark_range(MetaWord* p, uintx pattern, size_t word_size) {
+  assert(word_size > 0 && p != NULL, "sanity");
+  mark_address(p, pattern);
+  mark_address(p + word_size - 1, pattern);
+}
+
+bool check_marked_range(const MetaWord* p, uintx pattern, size_t word_size) {
+  assert(word_size > 0 && p != NULL, "sanity");
+  return check_marked_address(p, pattern) && check_marked_address(p + word_size - 1, pattern);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/metaspaceTestsCommon.hpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef GTEST_METASPACE_METASPACETESTCOMMON_HPP
+
+#include "memory/allocation.hpp"
+
+#include "memory/metaspace/chunkHeaderPool.hpp"
+#include "memory/metaspace/chunkLevel.hpp"
+#include "memory/metaspace/chunkManager.hpp"
+#include "memory/metaspace/counter.hpp"
+#include "memory/metaspace/commitLimiter.hpp"
+#include "memory/metaspace/metachunk.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/metaspaceStatistics.hpp"
+#include "memory/metaspace/virtualSpaceList.hpp"
+#include "memory/metaspace/spaceManager.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#include "unittest.hpp"
+
+#include <stdio.h>
+
+
+//////////////////////////////////////////////////////////
+// handy aliases
+
+using metaspace::ChunkAllocSequence;
+using metaspace::ChunkHeaderPool;
+using metaspace::ChunkManager;
+using metaspace::CommitLimiter;
+using metaspace::CommitMask;
+using metaspace::SizeCounter;
+using metaspace::SizeAtomicCounter;
+using metaspace::IntCounter;
+using metaspace::Metachunk;
+using metaspace::MetachunkList;
+using metaspace::MetachunkListCluster;
+using metaspace::Settings;
+using metaspace::sm_stats_t;
+using metaspace::in_use_chunk_stats_t;
+using metaspace::cm_stats_t;
+using metaspace::SizeCounter;
+using metaspace::SpaceManager;
+using metaspace::VirtualSpaceList;
+using metaspace::VirtualSpaceNode;
+
+using metaspace::chklvl_t;
+using metaspace::chklvl::HIGHEST_CHUNK_LEVEL;
+using metaspace::chklvl::MAX_CHUNK_WORD_SIZE;
+using metaspace::chklvl::MAX_CHUNK_BYTE_SIZE;
+using metaspace::chklvl::LOWEST_CHUNK_LEVEL;
+using metaspace::chklvl::NUM_CHUNK_LEVELS;
+
+
+/////////////////////////////////////////////////////////////////////
+// A little mockup to mimick and test the CommitMask in various tests
+
+class TestMap {
+  const size_t _len;
+  char* _arr;
+public:
+  TestMap(size_t len) : _len(len), _arr(NULL) {
+    _arr = NEW_C_HEAP_ARRAY(char, len, mtInternal);
+    memset(_arr, 0, _len);
+  }
+  ~TestMap() { FREE_C_HEAP_ARRAY(char, _arr); }
+
+  int get_num_set(size_t from, size_t to) const {
+    int result = 0;
+    for(size_t i = from; i < to; i ++) {
+      if (_arr[i] > 0) {
+        result ++;
+      }
+    }
+    return result;
+  }
+
+  size_t get_num_set() const { return get_num_set(0, _len); }
+
+  void set_range(size_t from, size_t to) {
+    memset(_arr + from, 1, to - from);
+  }
+
+  void clear_range(size_t from, size_t to) {
+    memset(_arr + from, 0, to - from);
+  }
+
+};
+
+
+
+///////////////////////////////////////////////////////////////
+// Functions to calculate random ranges in outer ranges
+
+// Note: [ from..to )
+struct range_t {
+  size_t from; size_t to;
+};
+
+void calc_random_range(size_t outer_range_len, range_t* out, size_t alignment);
+
+// Note:  [ p..p+word_size )
+struct address_range_t {
+  MetaWord* p; size_t word_size;
+};
+
+void calc_random_address_range(const address_range_t* outer_range, address_range_t* out, size_t alignment);
+
+
+///////////////////////////////////////////////////////////
+// Helper class for generating random allocation sizes
+class RandSizeGenerator {
+  const size_t _min; // [
+  const size_t _max; // )
+  const float _outlier_chance; // 0.0 -- 1.0
+  const size_t _outlier_min; // [
+  const size_t _outlier_max; // )
+public:
+  RandSizeGenerator(size_t min, size_t max)
+    : _min(min), _max(max), _outlier_chance(0.0), _outlier_min(min), _outlier_max(max)
+  {}
+
+  RandSizeGenerator(size_t min, size_t max, float outlier_chance, size_t outlier_min, size_t outlier_max)
+    : _min(min), _max(max), _outlier_chance(outlier_chance), _outlier_min(outlier_min), _outlier_max(outlier_max)
+  {}
+
+  size_t get() const {
+    size_t l1 = _min;
+    size_t l2 = _max;
+    int r = os::random() % 1000;
+    if ((float)r < _outlier_chance * 1000.0) {
+      l1 = _outlier_min;
+      l2 = _outlier_max;
+    }
+    const size_t d = l2 - l1;
+    return l1 + (os::random() % d);
+  }
+
+}; // end RandSizeGenerator
+
+
+///////////////////////////////////////////////////////////
+// Function to test-access a memory range
+
+void zap_range(MetaWord* p, size_t word_size);
+
+// "fill_range_with_pattern" fills a range of heap words with pointers to itself.
+//
+// The idea is to fill a memory range with a pattern which is both marked clearly to the caller
+// and cannot be moved without becoming invalid.
+//
+// The filled range can be checked with check_range_for_pattern. One also can only check
+// a sub range of the original range.
+void fill_range_with_pattern(MetaWord* p, uintx pattern, size_t word_size);
+bool check_range_for_pattern(const MetaWord* p, uintx pattern, size_t word_size);
+
+// Writes a uniqe pattern to p
+void mark_address(MetaWord* p, uintx pattern);
+// checks pattern at address
+bool check_marked_address(const MetaWord* p, uintx pattern);
+
+// Similar to fill_range_with_pattern, but only marks start and end. This is optimized for cases
+// where fill_range_with_pattern just is too slow.
+// Use check_marked_range to check the range. In contrast to check_range_for_pattern, only the original
+// range can be checked.
+void mark_range(MetaWord* p, uintx pattern, size_t word_size);
+bool check_marked_range(const MetaWord* p, uintx pattern, size_t word_size);
+
+//////////////////////////////////////////////////////////
+// Some helpers to avoid typing out those annoying casts for NULL
+
+#define ASSERT_NOT_NULL(ptr)      ASSERT_NE((void*)NULL, (void*)ptr)
+#define ASSERT_NULL(ptr)          ASSERT_EQ((void*)NULL, (void*)ptr)
+#define EXPECT_NOT_NULL(ptr)      EXPECT_NE((void*)NULL, (void*)ptr)
+#define EXPECT_NULL(ptr)          EXPECT_EQ((void*)NULL, (void*)ptr)
+
+
+//////////////////////////////////////////////////////////
+// logging
+
+// Define "LOG_PLEASE" to switch on logging for a particular test before inclusion of this header.
+#ifdef LOG_PLEASE
+  #define LOG(...) { printf(__VA_ARGS__); printf("\n"); }
+#else
+  #define LOG(...)
+#endif
+
+//////////////////////////////////////////////////////////
+// Helper
+
+size_t get_workingset_size();
+
+
+#endif // GTEST_METASPACE_METASPACETESTCOMMON_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_chunkManager.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+#include "precompiled.hpp"
+#include "metaspace/metaspaceTestsCommon.hpp"
+
+class ChunkManagerTest {
+
+  static const int max_num_chunks = 0x100;
+
+  // These counters are updated by the Node.
+  CommitLimiter _commit_limiter;
+  VirtualSpaceList* _vs_list;
+
+  ChunkManager* _cm;
+
+  Metachunk* _elems[max_num_chunks];
+  SizeCounter _word_size_allocated;
+  IntCounter _num_allocated;
+
+  // Note: [min_level ... max_level] (inclusive max)
+  static chklvl_t get_random_level(chklvl_t min_level, chklvl_t max_level) {
+    int range = max_level - min_level + 1; // [ ]
+    chklvl_t l = min_level + (os::random() % range);
+    return l;
+  }
+
+  struct chklvl_range_t { chklvl_t from; chklvl_t to; };
+
+  // Note: [min_level ... max_level] (inclusive max)
+  static void get_random_level_range(chklvl_t min_level, chklvl_t max_level, chklvl_range_t* out) {
+    chklvl_t l1 = get_random_level(min_level, max_level);
+    chklvl_t l2 = get_random_level(min_level, max_level);
+    if (l1 > l2) {
+      chklvl_t l = l2;
+      l1 = l2;
+      l2 = l;
+    }
+    out->from = l1; out->to = l2;
+  }
+
+  bool attempt_free_at(size_t index) {
+
+    LOG("attempt_free_at " SIZE_FORMAT ".", index);
+
+    if (_elems[index] == NULL) {
+      return false;
+    }
+
+    Metachunk* c = _elems[index];
+    const size_t chunk_word_size = c->word_size();
+    _cm->return_chunk(c);
+
+    _elems[index] = NULL;
+
+    DEBUG_ONLY(_vs_list->verify(true);)
+    DEBUG_ONLY(_cm->verify(true);)
+
+    _word_size_allocated.decrement_by(chunk_word_size);
+    _num_allocated.decrement();
+
+    return true;
+  }
+
+  bool attempt_allocate_at(size_t index, chklvl_t max_level, chklvl_t pref_level, bool fully_commit = false) {
+
+    LOG("attempt_allocate_at " SIZE_FORMAT ". (" CHKLVL_FORMAT "-" CHKLVL_FORMAT ")", index, max_level, pref_level);
+
+    if (_elems[index] != NULL) {
+      return false;
+    }
+
+    Metachunk* c = _cm->get_chunk(max_level, pref_level);
+
+    EXPECT_NOT_NULL(c);
+    EXPECT_TRUE(c->is_in_use());
+    EXPECT_LE(c->level(), max_level);
+    EXPECT_GE(c->level(), pref_level);
+
+    _elems[index] = c;
+
+    DEBUG_ONLY(c->verify(true);)
+    DEBUG_ONLY(_vs_list->verify(true);)
+    DEBUG_ONLY(_cm->verify(true);)
+
+    _word_size_allocated.increment_by(c->word_size());
+    _num_allocated.increment();
+
+    if (fully_commit) {
+      c->ensure_fully_committed();
+    }
+
+    return true;
+  }
+
+  // Note: [min_level ... max_level] (inclusive max)
+  void allocate_n_random_chunks(int n, chklvl_t min_level, chklvl_t max_level) {
+
+    assert(n <= max_num_chunks, "Sanity");
+
+    for (int i = 0; i < n; i ++) {
+      chklvl_range_t r;
+      get_random_level_range(min_level, max_level, &r);
+      attempt_allocate_at(i, r.to, r.from);
+    }
+
+  }
+
+  void free_all_chunks() {
+    for (int i = 0; i < max_num_chunks; i ++) {
+      attempt_free_at(i);
+    }
+    assert(_num_allocated.get() == 0, "Sanity");
+    assert(_word_size_allocated.get() == 0, "Sanity");
+  }
+
+  void random_alloc_free(int iterations, chklvl_t min_level, chklvl_t max_level) {
+    for (int i = 0; i < iterations; i ++) {
+      int index = os::random() % max_num_chunks;
+      if ((os::random() % 100) > 50) {
+        attempt_allocate_at(index, max_level, min_level);
+      } else {
+        attempt_free_at(index);
+      }
+    }
+  }
+
+public:
+
+  ChunkManagerTest()
+    : _commit_limiter(50 * M), _vs_list(NULL), _cm(NULL), _word_size_allocated()
+  {
+    _vs_list = new VirtualSpaceList("test_vs_lust", &_commit_limiter);
+    _cm = new ChunkManager("test_cm", _vs_list);
+    memset(_elems, 0, sizeof(_elems));
+  }
+
+  void test(int iterations, chklvl_t min_level, chklvl_t max_level) {
+    for (int run = 0; run < iterations; run ++) {
+      allocate_n_random_chunks(max_num_chunks, min_level, max_level);
+      random_alloc_free(iterations, min_level, max_level);
+      free_all_chunks();
+    }
+  }
+
+  void test_enlarge_chunk() {
+    // On an empty state, request a chunk of the smallest possible size from chunk manager; then,
+    // attempt to enlarge it in place. Since all splinters should be free, this should work until
+    // we are back at root chunk size.
+    assert(_cm->total_num_chunks() == 0, "Call this on an empty cm.");
+    Metachunk* c = _cm->get_chunk(HIGHEST_CHUNK_LEVEL, HIGHEST_CHUNK_LEVEL);
+    ASSERT_NOT_NULL(c);
+    ASSERT_EQ(c->level(), HIGHEST_CHUNK_LEVEL);
+
+    int num_splinter_chunks = _cm->total_num_chunks();
+
+    // Getting a chunk of the smallest size there is should have yielded us one splinter for every level beyond 0.
+    ASSERT_EQ(num_splinter_chunks, NUM_CHUNK_LEVELS - 1);
+
+    // Now enlarge n-1 times until c is of root chunk level size again.
+    for (chklvl_t l = HIGHEST_CHUNK_LEVEL; l > LOWEST_CHUNK_LEVEL; l --) {
+      bool rc = _cm->attempt_enlarge_chunk(c);
+      ASSERT_TRUE(rc);
+      ASSERT_EQ(c->level(), l - 1);
+      num_splinter_chunks --;
+      ASSERT_EQ(num_splinter_chunks, _cm->total_num_chunks());
+    }
+  }
+
+  void test_recommit_chunk() {
+
+    // test that if a chunk is committed again, already committed content stays.
+    assert(_cm->total_num_chunks() == 0, "Call this on an empty cm.");
+    const chklvl_t lvl = metaspace::chklvl::level_fitting_word_size(Settings::commit_granule_words());
+    Metachunk* c = _cm->get_chunk(lvl, lvl);
+    ASSERT_NOT_NULL(c);
+    ASSERT_EQ(c->level(), lvl);
+
+    // clean slate.
+    c->set_free();
+    c->uncommit();
+    c->set_in_use();
+
+    c->ensure_committed(10);
+
+    const size_t committed_words_1 = c->committed_words();
+    fill_range_with_pattern(c->base(), (uintx) this, committed_words_1);
+
+    // enlarge chunk, then recommit again, which will make sure we
+    bool rc = _cm->attempt_enlarge_chunk(c);
+    ASSERT_TRUE(rc);
+    rc = _cm->attempt_enlarge_chunk(c);
+    ASSERT_TRUE(rc);
+    rc = _cm->attempt_enlarge_chunk(c);
+    ASSERT_TRUE(rc);
+    ASSERT_EQ(c->level(), lvl - 3);
+
+    c->ensure_committed(c->word_size());
+    check_range_for_pattern(c->base(), (uintx) this, committed_words_1);
+
+  }
+
+  void test_wholesale_reclaim() {
+
+    // test that if a chunk is committed again, already committed content stays.
+    assert(_num_allocated.get() == 0, "Call this on an empty cm.");
+
+    // Get a number of random sized but large chunks, be sure to cover multiple vsnodes.
+    // Also, commit those chunks.
+    const size_t min_words_to_allocate = 4 * Settings::virtual_space_node_default_word_size(); // about 16 m
+
+    while (_num_allocated.get() < max_num_chunks &&
+           _word_size_allocated.get() < min_words_to_allocate) {
+      const chklvl_t lvl = LOWEST_CHUNK_LEVEL + os::random() % 4;
+      attempt_allocate_at(_num_allocated.get(), lvl, lvl, true); // < fully committed please
+    }
+
+//_cm->print_on(tty);
+//_vs_list->print_on(tty);
+    DEBUG_ONLY(_cm->verify(true);)
+    DEBUG_ONLY(_vs_list->verify(true);)
+
+    // Return about three quarter of the chunks.
+    for (int i = 0; i < max_num_chunks; i ++) {
+      if (os::random() % 100 < 75) {
+        attempt_free_at(i);
+      }
+    }
+
+    // Now do a reclaim.
+    _cm->wholesale_reclaim();
+
+//_cm->print_on(tty);
+//_vs_list->print_on(tty);
+    DEBUG_ONLY(_cm->verify(true);)
+    DEBUG_ONLY(_vs_list->verify(true);)
+
+    // Return all chunks
+    free_all_chunks();
+
+    // Now do a second reclaim.
+    _cm->wholesale_reclaim();
+
+//_cm->print_on(tty);
+//_vs_list->print_on(tty);
+    DEBUG_ONLY(_cm->verify(true);)
+    DEBUG_ONLY(_vs_list->verify(true);)
+
+    // All space should be gone now, if the settings are not preventing reclaim
+    if (Settings::delete_nodes_on_purge()) {
+      ASSERT_EQ(_vs_list->reserved_words(), (size_t)0);
+    }
+    if (Settings::uncommit_on_purge() || Settings::delete_nodes_on_purge()) {
+      ASSERT_EQ(_vs_list->committed_words(), (size_t)0);
+    }
+
+  }
+
+};
+
+// Note: we unfortunately need TEST_VM even though the system tested
+// should be pretty independent since we need things like os::vm_page_size()
+// which in turn need OS layer initialization.
+TEST_VM(metaspace, chunkmanager_test_whole_range) {
+  ChunkManagerTest ct;
+  ct.test(100, LOWEST_CHUNK_LEVEL, HIGHEST_CHUNK_LEVEL);
+}
+
+TEST_VM(metaspace, chunkmanager_test_small_chunks) {
+  ChunkManagerTest ct;
+  ct.test(100, HIGHEST_CHUNK_LEVEL / 2, HIGHEST_CHUNK_LEVEL);
+}
+
+TEST_VM(metaspace, chunkmanager_test_large_chunks) {
+  ChunkManagerTest ct;
+  ct.test(100, LOWEST_CHUNK_LEVEL, HIGHEST_CHUNK_LEVEL / 2);
+}
+
+TEST_VM(metaspace, chunkmanager_test_enlarge_chunk) {
+  ChunkManagerTest ct;
+  ct.test_enlarge_chunk();
+}
+
+TEST_VM(metaspace, chunkmanager_test_recommit_chunk) {
+  ChunkManagerTest ct;
+  ct.test_recommit_chunk();
+}
+
+TEST_VM(metaspace, chunkmanager_test_wholesale_reclaim) {
+  ChunkManagerTest ct;
+  ct.test_wholesale_reclaim();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_chunkheaderpool.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+#include "precompiled.hpp"
+
+#include "metaspaceTestsCommon.hpp"
+
+class ChunkHeaderPoolTest {
+
+  static const size_t max_cap = 0x1000;
+
+  ChunkHeaderPool _pool;
+
+  // Array of the same size as the pool max capacity; holds the allocated elements.
+  Metachunk* _elems[max_cap];
+  SizeCounter _num_allocated;
+
+  void attempt_free_at(size_t index) {
+
+    LOG("attempt_free_at " SIZE_FORMAT ".", index);
+
+    if (_elems[index] == NULL) {
+      return;
+    }
+
+    _pool.return_chunk_header(_elems[index]);
+    _elems[index] = NULL;
+
+    _num_allocated.decrement();
+    DEBUG_ONLY(_num_allocated.check(_pool.used());)
+
+    DEBUG_ONLY(_pool.verify(true);)
+
+  }
+
+  void attempt_allocate_at(size_t index) {
+
+    LOG("attempt_allocate_at " SIZE_FORMAT ".", index);
+
+    if (_elems[index] != NULL) {
+      return;
+    }
+
+    Metachunk* c = _pool.allocate_chunk_header();
+    EXPECT_NOT_NULL(c);
+    _elems[index] = c;
+    c->set_free();
+
+    _num_allocated.increment();
+    DEBUG_ONLY(_num_allocated.check(_pool.used());)
+
+    DEBUG_ONLY(_pool.verify(true);)
+  }
+
+  void attempt_allocate_or_free_at(size_t index) {
+    if (_elems[index] == NULL) {
+      attempt_allocate_at(index);
+    } else {
+      attempt_free_at(index);
+    }
+  }
+
+  // Randomly allocate from the pool and free. Slight preference for allocation.
+  void test_random_alloc_free(int num_iterations) {
+
+    for (int iter = 0; iter < num_iterations; iter ++) {
+      size_t index = (size_t)os::random() % max_cap;
+      attempt_allocate_or_free_at(index);
+    }
+
+    DEBUG_ONLY(_pool.verify(true);)
+
+  }
+
+  static void test_once() {
+    ChunkHeaderPoolTest test;
+    test.test_random_alloc_free(100);
+  }
+
+
+public:
+
+  ChunkHeaderPoolTest() : _pool(true) {
+    memset(_elems, 0, sizeof(_elems));
+  }
+
+  static void run_tests() {
+    for (int i = 0; i < 1000; i ++) {
+      test_once();
+    }
+  }
+
+};
+
+TEST_VM(metaspace, chunk_header_pool) {
+  ChunkHeaderPoolTest::run_tests();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_commitmask.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+#include "precompiled.hpp"
+#include "runtime/os.hpp"
+
+#include "metaspaceTestsCommon.hpp"
+
+static int get_random(int limit) { return os::random() % limit; }
+
+class CommitMaskTest {
+  const MetaWord* const _base;
+  const size_t _word_size;
+
+  CommitMask _mask;
+
+  void verify_mask() {
+    // Note: we omit the touch test since we operate on fictional
+    // memory
+    DEBUG_ONLY(_mask.verify(true, false);)
+  }
+
+  // Return a random sub range within [_base.._base + word_size),
+  // aligned to granule size
+  const MetaWord* calc_random_subrange(size_t* p_word_size) {
+    size_t l1 = get_random((int)_word_size);
+    size_t l2 = get_random((int)_word_size);
+    if (l1 > l2) {
+      size_t l = l1;
+      l1 = l2;
+      l2 = l;
+    }
+    l1 = align_down(l1, Settings::commit_granule_words());
+    l2 = align_up(l2, Settings::commit_granule_words());
+
+    const MetaWord* p = _base + l1;
+    const size_t len = l2 - l1;
+
+    assert(p >= _base && p + len <= _base + _word_size,
+           "Sanity");
+    *p_word_size = len;
+
+    return p;
+  }
+
+  void test1() {
+
+    LOG("test1");
+
+    // Commit everything
+    size_t prior_committed = _mask.mark_range_as_committed(_base, _word_size);
+    verify_mask();
+    ASSERT_LE(prior_committed, _word_size); // We do not really know
+
+    // Commit everything again, should be a noop
+    prior_committed = _mask.mark_range_as_committed(_base, _word_size);
+    verify_mask();
+    ASSERT_EQ(prior_committed, _word_size);
+
+    ASSERT_EQ(_mask.get_committed_size(),
+              _word_size);
+    ASSERT_EQ(_mask.get_committed_size_in_range(_base, _word_size),
+              _word_size);
+
+    for (const MetaWord* p = _base; p < _base + _word_size; p ++) {
+      ASSERT_TRUE(_mask.is_committed_address(p));
+    }
+
+    // Now make an uncommitted hole
+    size_t sr_word_size;
+    const MetaWord* sr_base = calc_random_subrange(&sr_word_size);
+    LOG("subrange " PTR_FORMAT "-" PTR_FORMAT ".",
+        p2i(sr_base), p2i(sr_base + sr_word_size));
+
+    size_t prior_uncommitted =
+        _mask.mark_range_as_uncommitted(sr_base, sr_word_size);
+    verify_mask();
+    ASSERT_EQ(prior_uncommitted, (size_t)0);
+
+    // Again, for fun, should be a noop now.
+    prior_uncommitted = _mask.mark_range_as_uncommitted(sr_base, sr_word_size);
+    verify_mask();
+    ASSERT_EQ(prior_uncommitted, sr_word_size);
+
+    ASSERT_EQ(_mask.get_committed_size_in_range(sr_base, sr_word_size),
+              (size_t)0);
+    ASSERT_EQ(_mask.get_committed_size(),
+              _word_size - sr_word_size);
+    ASSERT_EQ(_mask.get_committed_size_in_range(_base, _word_size),
+              _word_size - sr_word_size);
+    for (const MetaWord* p = _base; p < _base + _word_size; p ++) {
+      if (p >= sr_base && p < sr_base + sr_word_size) {
+        ASSERT_FALSE(_mask.is_committed_address(p));
+      } else {
+        ASSERT_TRUE(_mask.is_committed_address(p));
+      }
+    }
+
+    // Recommit whole range
+    prior_committed = _mask.mark_range_as_committed(_base, _word_size);
+    verify_mask();
+    ASSERT_EQ(prior_committed, _word_size - sr_word_size);
+
+    ASSERT_EQ(_mask.get_committed_size_in_range(sr_base, sr_word_size),
+              sr_word_size);
+    ASSERT_EQ(_mask.get_committed_size(),
+              _word_size);
+    ASSERT_EQ(_mask.get_committed_size_in_range(_base, _word_size),
+              _word_size);
+    for (const MetaWord* p = _base; p < _base + _word_size; p ++) {
+      ASSERT_TRUE(_mask.is_committed_address(p));
+    }
+
+  }
+
+  void test2() {
+
+    LOG("test2");
+
+    // Uncommit everything
+    size_t prior_uncommitted = _mask.mark_range_as_uncommitted(_base, _word_size);
+    verify_mask();
+    ASSERT_LE(prior_uncommitted, _word_size);
+
+    // Uncommit everything again, should be a noop
+    prior_uncommitted = _mask.mark_range_as_uncommitted(_base, _word_size);
+    verify_mask();
+    ASSERT_EQ(prior_uncommitted, _word_size);
+
+    ASSERT_EQ(_mask.get_committed_size(),
+        (size_t)0);
+    ASSERT_EQ(_mask.get_committed_size_in_range(_base, _word_size),
+        (size_t)0);
+
+    // Now make an committed region
+    size_t sr_word_size;
+    const MetaWord* sr_base = calc_random_subrange(&sr_word_size);
+    LOG("subrange " PTR_FORMAT "-" PTR_FORMAT ".",
+        p2i(sr_base), p2i(sr_base + sr_word_size));
+
+    ASSERT_EQ(_mask.get_committed_size_in_range(sr_base, sr_word_size),
+              (size_t)0);
+    for (const MetaWord* p = _base; p < _base + _word_size; p ++) {
+      ASSERT_FALSE(_mask.is_committed_address(p));
+    }
+
+    size_t prior_committed = _mask.mark_range_as_committed(sr_base, sr_word_size);
+    verify_mask();
+    ASSERT_EQ(prior_committed, (size_t)0);
+
+    // Again, for fun, should be a noop now.
+    prior_committed = _mask.mark_range_as_committed(sr_base, sr_word_size);
+    verify_mask();
+    ASSERT_EQ(prior_committed, sr_word_size);
+
+    ASSERT_EQ(_mask.get_committed_size_in_range(sr_base, sr_word_size),
+        sr_word_size);
+    ASSERT_EQ(_mask.get_committed_size(),
+        sr_word_size);
+    ASSERT_EQ(_mask.get_committed_size_in_range(_base, _word_size),
+        sr_word_size);
+    for (const MetaWord* p = _base; p < _base + _word_size; p ++) {
+      if (p >= sr_base && p < sr_base + sr_word_size) {
+        ASSERT_TRUE(_mask.is_committed_address(p));
+      } else {
+        ASSERT_FALSE(_mask.is_committed_address(p));
+      }
+    }
+
+    // Re-uncommit whole range
+    prior_uncommitted = _mask.mark_range_as_uncommitted(_base, _word_size);
+    verify_mask();
+    ASSERT_EQ(prior_uncommitted, _word_size - sr_word_size);
+
+    EXPECT_EQ(_mask.get_committed_size_in_range(sr_base, sr_word_size),
+        (size_t)0);
+    EXPECT_EQ(_mask.get_committed_size(),
+        (size_t)0);
+    EXPECT_EQ(_mask.get_committed_size_in_range(_base, _word_size),
+        (size_t)0);
+    for (const MetaWord* p = _base; p < _base + _word_size; p ++) {
+      ASSERT_FALSE(_mask.is_committed_address(p));
+    }
+
+  }
+
+
+  void test3() {
+
+    // arbitrary ranges are set and cleared and compared with the test map
+    TestMap map(_word_size);
+
+    _mask.clear_large();
+
+    for (int run = 0; run < 100; run ++) {
+
+      range_t r;
+      calc_random_range(_word_size, &r, Settings::commit_granule_words());
+
+      ASSERT_EQ(_mask.get_committed_size(), (size_t)map.get_num_set());
+      ASSERT_EQ(_mask.get_committed_size_in_range(_base + r.from, r.to - r.from),
+                (size_t)map.get_num_set(r.from, r.to));
+
+      if (os::random() % 100 < 50) {
+        _mask.mark_range_as_committed(_base + r.from, r.to - r.from);
+        map.set_range(r.from, r.to);
+      } else {
+        _mask.mark_range_as_uncommitted(_base + r.from, r.to - r.from);
+        map.clear_range(r.from, r.to);
+      }
+
+      ASSERT_EQ(_mask.get_committed_size(), (size_t)map.get_num_set());
+      ASSERT_EQ(_mask.get_committed_size_in_range(_base + r.from, r.to - r.from),
+                (size_t)map.get_num_set(r.from, r.to));
+
+    }
+
+  }
+
+
+public:
+
+  CommitMaskTest(const MetaWord* base, size_t size)
+    : _base(base), _word_size(size), _mask(base, size)
+  {}
+
+  void test() {
+    LOG("mask range: " PTR_FORMAT "-" PTR_FORMAT
+         " (" SIZE_FORMAT " words).",
+         p2i(_base), p2i(_base + _word_size), _word_size);
+    for (int i = 0; i < 5; i ++) {
+      test1(); test2(); test3();
+    }
+  }
+
+
+};
+
+TEST(metaspace, commit_mask_basics) {
+
+  const MetaWord* const base = (const MetaWord*) 0x100000;
+
+  CommitMask mask1(base, Settings::commit_granule_words());
+  ASSERT_EQ(mask1.size(), (BitMap::idx_t)1);
+
+  CommitMask mask2(base, Settings::commit_granule_words() * 4);
+  ASSERT_EQ(mask2.size(), (BitMap::idx_t)4);
+
+  CommitMask mask3(base, Settings::commit_granule_words() * 43);
+  ASSERT_EQ(mask3.size(), (BitMap::idx_t)43);
+
+  mask3.mark_range_as_committed(base, Settings::commit_granule_words());
+  mask3.mark_range_as_committed(base + (Settings::commit_granule_words() * 42), Settings::commit_granule_words());
+
+  ASSERT_EQ(mask3.at(0), 1);
+  for (int i = 1; i < 42; i ++) {
+    ASSERT_EQ(mask3.at(i), 0);
+  }
+  ASSERT_EQ(mask3.at(42), 1);
+
+}
+
+TEST(metaspace, commit_mask_small) {
+
+  const MetaWord* const base = (const MetaWord*) 0x100000;
+
+  CommitMaskTest test(base, Settings::commit_granule_words());
+  test.test();
+
+}
+
+TEST(metaspace, commit_mask_range) {
+
+  const MetaWord* const base = (const MetaWord*) 0x100000;
+  const size_t len = Settings::commit_granule_words() * 4;
+  const MetaWord* const end = base + len;
+  CommitMask mask(base, len);
+
+  LOG("total range: " PTR_FORMAT "-" PTR_FORMAT "\n", p2i(base), p2i(end));
+
+  size_t l = mask.mark_range_as_committed(base, len);
+  ASSERT_LE(l, len);
+
+  for (const MetaWord* p = base; p <= end - Settings::commit_granule_words();
+       p += Settings::commit_granule_words()) {
+    for (const MetaWord* p2 = p + Settings::commit_granule_words();
+         p2 <= end; p2 += Settings::commit_granule_words()) {
+      LOG(PTR_FORMAT "-" PTR_FORMAT "\n", p2i(p), p2i(p2));
+      EXPECT_EQ(mask.get_committed_size_in_range(p, p2 - p),
+                (size_t)(p2 - p));
+    }
+  }
+
+  l = mask.mark_range_as_uncommitted(base, len);
+  ASSERT_EQ(l, (size_t)0);
+
+  for (const MetaWord* p = base; p <= end - Settings::commit_granule_words();
+       p += Settings::commit_granule_words()) {
+    for (const MetaWord* p2 = p + Settings::commit_granule_words();
+         p2 <= end; p2 += Settings::commit_granule_words()) {
+      LOG(PTR_FORMAT "-" PTR_FORMAT "\n", p2i(p), p2i(p2));
+      EXPECT_EQ(mask.get_committed_size_in_range(p, p2 - p),
+                (size_t)(0));
+    }
+  }
+
+}
+
+
+TEST(metaspace, commit_mask_random) {
+
+  for (int i = 0; i < 5; i ++) {
+
+    // make up a range out of thin air
+    const MetaWord* const base =
+        align_down( (const MetaWord*) ((uintptr_t) os::random() * os::random()),
+                    Settings::commit_granule_bytes());
+    const size_t len = align_up( 1 + (os::random() % M),
+                    Settings::commit_granule_words());
+
+    CommitMaskTest test(base, len);
+    test.test();
+
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_is_metaspace_obj.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspace/classLoaderMetaspace.hpp"
+#include "memory/metaspace/virtualSpaceList.hpp"
+#include "memory/metaspace.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+#include "unittest.hpp"
+
+using namespace metaspace;
+
+
+
+// Test the cheerful multitude of metaspace-contains-functions.
+class MetaspaceIsMetaspaceObjTest {
+  Mutex* _lock;
+  ClassLoaderMetaspace* _ms;
+
+public:
+
+  MetaspaceIsMetaspaceObjTest() : _lock(NULL), _ms(NULL) {}
+  ~MetaspaceIsMetaspaceObjTest() {
+    delete _ms;
+    delete _lock;
+  }
+
+  void do_test(MetadataType mdType) {
+    _lock = new Mutex(Monitor::native, "gtest-IsMetaspaceObjTest-lock", false, Monitor::_safepoint_check_never);
+    {
+      MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+      _ms = new ClassLoaderMetaspace(_lock, StandardMetaspaceType);
+    }
+
+    const MetaspaceObj* p = (MetaspaceObj*) _ms->allocate(42, mdType);
+
+    // Test MetaspaceObj::is_metaspace_object
+    ASSERT_TRUE(MetaspaceObj::is_valid(p));
+
+    // A misaligned object shall not be recognized
+    const MetaspaceObj* p_misaligned = (MetaspaceObj*)((address)p) + 1;
+    ASSERT_FALSE(MetaspaceObj::is_valid(p_misaligned));
+
+    // Test VirtualSpaceList::contains
+    const VirtualSpaceList* const vslist =
+        (mdType == ClassType && Metaspace::using_class_space()) ?
+         VirtualSpaceList::vslist_class() : VirtualSpaceList::vslist_nonclass();
+
+    ASSERT_TRUE(vslist->contains((MetaWord*)p));
+
+    // A misaligned pointer shall still be recognized by list::contains
+    ASSERT_TRUE(vslist->contains((MetaWord*)((address)p) + 1));
+
+    // Now for some bogus values
+    ASSERT_FALSE(MetaspaceObj::is_valid((MetaspaceObj*)NULL));
+
+    // Should exercise various paths in MetaspaceObj::is_valid()
+    ASSERT_FALSE(MetaspaceObj::is_valid((MetaspaceObj*)1024));
+    ASSERT_FALSE(MetaspaceObj::is_valid((MetaspaceObj*)8192));
+
+    MetaspaceObj* p_stack = (MetaspaceObj*) &_lock;
+    ASSERT_FALSE(MetaspaceObj::is_valid(p_stack));
+
+    MetaspaceObj* p_heap = (MetaspaceObj*) os::malloc(41, mtInternal);
+    ASSERT_FALSE(MetaspaceObj::is_valid(p_heap));
+    os::free(p_heap);
+
+    // Test Metaspace::contains_xxx
+    ASSERT_TRUE(Metaspace::contains(p));
+    ASSERT_TRUE(Metaspace::contains_non_shared(p));
+
+    delete _ms;
+    _ms = NULL;
+    delete _lock;
+    _lock = NULL;
+  }
+
+};
+
+TEST_VM(metaspace, is_metaspace_obj_non_class) {
+  MetaspaceIsMetaspaceObjTest test;
+  test.do_test(NonClassType);
+}
+
+TEST_VM(metaspace, is_metaspace_obj_class) {
+  MetaspaceIsMetaspaceObjTest test;
+  test.do_test(ClassType);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_metachunk.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "metaspace/metaspaceTestsCommon.hpp"
+#include "runtime/mutexLocker.hpp"
+
+
+class MetachunkTest {
+
+  CommitLimiter _commit_limiter;
+  VirtualSpaceList _vs_list;
+  ChunkManager _cm;
+
+  Metachunk* alloc_chunk(chklvl_t lvl) {
+
+    Metachunk* c = _cm.get_chunk(lvl, lvl);
+    EXPECT_NOT_NULL(c);
+    EXPECT_EQ(c->level(), lvl);
+    check_chunk(c);
+
+    DEBUG_ONLY(c->verify(true);)
+
+    return c;
+  }
+
+  void check_chunk(const Metachunk* c) const {
+    EXPECT_LE(c->used_words(), c->committed_words());
+    EXPECT_LE(c->committed_words(), c->word_size());
+    EXPECT_NOT_NULL(c->base());
+    EXPECT_TRUE(_vs_list.contains(c->base()));
+    EXPECT_TRUE(is_aligned(c->base(), MAX_CHUNK_BYTE_SIZE));
+    EXPECT_TRUE(is_aligned(c->word_size(), MAX_CHUNK_WORD_SIZE));
+    EXPECT_TRUE(metaspace::chklvl::is_valid_level(c->level()));
+
+    if (c->next() != NULL) EXPECT_EQ(c->next()->prev(), c);
+    if (c->prev() != NULL) EXPECT_EQ(c->prev()->next(), c);
+
+    {
+      MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+      if (c->next_in_vs() != NULL) EXPECT_EQ(c->next_in_vs()->prev_in_vs(), c);
+      if (c->prev_in_vs() != NULL) EXPECT_EQ(c->prev_in_vs()->next_in_vs(), c);
+    }
+
+    DEBUG_ONLY(c->verify(true);)
+  }
+
+public:
+
+  MetachunkTest(size_t commit_limit)
+    : _commit_limiter(commit_limit),
+      _vs_list("test_vs_list", &_commit_limiter),
+      _cm("test_cm", &_vs_list)
+  {
+  }
+
+  void test_random_allocs() {
+
+    // Randomly alloc from a chunk until it is full.
+    Metachunk* c = alloc_chunk(LOWEST_CHUNK_LEVEL);
+
+    check_chunk(c);
+
+    EXPECT_TRUE(c->is_in_use());
+    EXPECT_EQ(c->used_words(), (size_t)0);
+
+    // uncommit to start off with uncommitted chunk; then start allocating.
+    c->set_free();
+    c->uncommit();
+    c->set_in_use();
+
+    EXPECT_EQ(c->committed_words(), (size_t)0);
+
+    RandSizeGenerator rgen(1, 256, 0.1f, 1024, 4096); // note: word sizes
+    SizeCounter words_allocated;
+
+    MetaWord* p = NULL;
+    bool did_hit_commit_limit = false;
+    do {
+
+      const size_t alloc_size = align_up(rgen.get(), Metachunk::allocation_alignment_words);
+
+      // Note: about net and raw sizes: these concepts only exist at the SpaceManager level.
+      // At the chunk level (where we test here), we allocate exactly what we ask, in number of words.
+
+      const bool may_hit_commit_limit =
+          _commit_limiter.possible_expansion_words() <= align_up(alloc_size, Settings::commit_granule_words());
+
+      p = c->allocate(alloc_size, &did_hit_commit_limit);
+      LOG("Allocated " SIZE_FORMAT " words, chunk: " METACHUNK_FULL_FORMAT, alloc_size, METACHUNK_FULL_FORMAT_ARGS(c));
+
+      check_chunk(c);
+
+      if (p != NULL) {
+        // From time to time deallocate to test deallocation. Since we do this on the very last allocation,
+        // this should always work.
+        if (os::random() % 100 > 95) {
+          LOG("Test dealloc in place");
+          EXPECT_TRUE(c->attempt_rollback_allocation(p, alloc_size));
+        } else {
+          fill_range_with_pattern(p, (uintx) this, alloc_size); // test that we can access this.
+          words_allocated.increment_by(alloc_size);
+          EXPECT_EQ(c->used_words(), words_allocated.get());
+        }
+      } else {
+        // Allocating from a chunk can only fail for one of two reasons: Either the chunk is full, or
+        // we attempted to increase the chunk's commit region and hit the commit limit.
+        if (did_hit_commit_limit) {
+          EXPECT_TRUE(may_hit_commit_limit);
+        } else {
+          // Chunk is full.
+          EXPECT_LT(c->free_words(), alloc_size);
+        }
+      }
+
+    } while(p != NULL);
+
+    check_range_for_pattern(c->base(), (uintx) this, c->used_words());
+
+    // At the end of the test return the chunk to the chunk manager to
+    // avoid asserts on destruction time.
+    _cm.return_chunk(c);
+
+  }
+
+
+
+};
+
+
+
+TEST_VM(metaspace, metachunk_test_random_allocs_no_commit_limit) {
+
+  // The test only allocates one root chunk and plays with it, so anything
+  // above the size of a root chunk should not hit commit limit.
+  MetachunkTest test(2 * MAX_CHUNK_WORD_SIZE);
+  test.test_random_allocs();
+
+}
+
+TEST_VM(metaspace, metachunk_test_random_allocs_with_commit_limit) {
+
+  // The test allocates one root chunk and plays with it, so a limit smaller
+  // than root chunk size will be hit.
+  MetachunkTest test(MAX_CHUNK_WORD_SIZE / 2);
+  test.test_random_allocs();
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_metaspaceUtils.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "memory/metaspace.hpp"
+#include "unittest.hpp"
+
+
+TEST_VM(MetaspaceUtils, reserved) {
+  size_t reserved = MetaspaceUtils::reserved_bytes();
+  EXPECT_GT(reserved, 0UL);
+
+  size_t reserved_metadata = MetaspaceUtils::reserved_bytes(metaspace::NonClassType);
+  EXPECT_GT(reserved_metadata, 0UL);
+  EXPECT_LE(reserved_metadata, reserved);
+}
+
+TEST_VM(MetaspaceUtils, reserved_compressed_class_pointers) {
+  if (!UseCompressedClassPointers) {
+    return;
+  }
+  size_t reserved = MetaspaceUtils::reserved_bytes();
+  EXPECT_GT(reserved, 0UL);
+
+  size_t reserved_class = MetaspaceUtils::reserved_bytes(metaspace::ClassType);
+  EXPECT_GT(reserved_class, 0UL);
+  EXPECT_LE(reserved_class, reserved);
+}
+
+TEST_VM(MetaspaceUtils, committed) {
+  size_t committed = MetaspaceUtils::committed_bytes();
+  EXPECT_GT(committed, 0UL);
+
+  size_t reserved  = MetaspaceUtils::reserved_bytes();
+  EXPECT_LE(committed, reserved);
+
+  size_t committed_metadata = MetaspaceUtils::committed_bytes(metaspace::NonClassType);
+  EXPECT_GT(committed_metadata, 0UL);
+  EXPECT_LE(committed_metadata, committed);
+}
+
+TEST_VM(MetaspaceUtils, committed_compressed_class_pointers) {
+  if (!UseCompressedClassPointers) {
+    return;
+  }
+  size_t committed = MetaspaceUtils::committed_bytes();
+  EXPECT_GT(committed, 0UL);
+
+  size_t committed_class = MetaspaceUtils::committed_bytes(metaspace::ClassType);
+  EXPECT_GT(committed_class, 0UL);
+  EXPECT_LE(committed_class, committed);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_spacemanager.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+
+#include "precompiled.hpp"
+
+#define LOG_PLEASE
+
+#include "metaspace/metaspaceTestsCommon.hpp"
+
+
+
+// See spaceManager.cpp : needed for predicting commit sizes.
+namespace metaspace {
+  extern size_t get_raw_allocation_word_size(size_t net_word_size);
+}
+
+
+
+class SpaceManagerTest {
+
+  // One global chunk manager, with an assiociated global virtual space list as backing,
+  // and a number of space managers feeding from that manager in parallel.
+  CommitLimiter _commit_limiter;
+
+  const size_t _avg_occupancy;
+
+  VirtualSpaceList* _vslist;
+  ChunkManager* _cm;
+  const ChunkAllocSequence* _alloc_sequence;
+
+  const int _num_spaces;
+
+  size_t _rss_at_start;
+  size_t _rss_at_end;
+  size_t _rss_after_cleanup;
+
+
+  // A little test bed holding one SpaceManager and its lock
+  //  and keeping track of its allocations.
+  struct SpaceManagerTestBed : public CHeapObj<mtInternal> {
+
+    Mutex* _lock;
+    SpaceManager* _sm;
+    int _index;
+
+    // We keep track of individual allocations. Note that this adds
+    // 256K per instance of SpaceManagerTestBed.
+    struct allocation_t {
+      MetaWord* p;
+      size_t word_size;
+    };
+    static const int _max_allocations = 0x4000;
+    int _num_allocations;
+    size_t _words_allocated;
+    allocation_t _allocations[_max_allocations];
+
+    // Note: used counter contains "used" from the chunk perspective, which is
+    // used + freelist + alignment corrections. This does not translate 1:1 to
+    // _words_allocated, so this is difficult to test.
+    SizeAtomicCounter _used_counter;
+
+  public:
+
+    SpaceManager* sm() { return _sm; }
+
+    SpaceManagerTestBed(int index, ChunkManager* cm, const ChunkAllocSequence* alloc_sequence)
+      : _lock(NULL), _sm(NULL), _index(index), _num_allocations(0), _words_allocated(0)
+    {
+      memset(_allocations, 0, sizeof(_allocations));
+      _lock = new Mutex(Monitor::native, "gtest-SpaceManagerTestBed-lock", false, Monitor::_safepoint_check_never);
+      {
+        // Pull lock during space creation, since this is what happens in the VM too
+        // (see ClassLoaderData::metaspace_non_null(), which we mimick here).
+        MutexLocker ml(_lock,  Mutex::_no_safepoint_check_flag);
+        _sm = new SpaceManager(cm, alloc_sequence, _lock, &_used_counter, "gtest-SpaceManagerTestBed-sm");
+      }
+    }
+
+    ~SpaceManagerTestBed() {
+
+ //     EXPECT_EQ(_used_counter.get(), _words_allocated);
+
+      // Iterate thru all allocation records and test content first.
+      for (int i = 0; i < _num_allocations; i ++) {
+        const allocation_t* al = _allocations + i;
+        EXPECT_TRUE(al->p == NULL || check_marked_range(al->p, (uintx)this, al->word_size));
+      }
+
+      // Delete SpaceManager. That should clean up all metaspace.
+      delete _sm;
+      delete _lock;
+
+    }
+
+
+    size_t words_allocated() const        { return _words_allocated; }
+    int num_allocations() const           { return _num_allocations; }
+
+    int index() const                     { return _index; }
+
+    bool is_full() const                  { return _num_allocations == _max_allocations; }
+    bool is_empty() const                 { return _num_allocations == 0; }
+
+    // Allocate from space. Returns NULL if either the bed is full or if the allocation
+    // itself failed.
+    MetaWord* allocate_and_test(size_t word_size) {
+      if (is_full()) {
+        return NULL;
+      }
+      MetaWord* p = _sm->allocate(word_size);
+      if (p != NULL) {
+        EXPECT_TRUE(is_aligned(p, sizeof(void*)));
+        mark_range(p, (uintx)this, word_size);
+        // Remember this allocation.
+        allocation_t* al = _allocations + _num_allocations;
+        al->p = p; al->word_size = word_size;
+        _num_allocations ++;
+        _words_allocated += word_size;
+ //       EXPECT_EQ(_used_counter.get(), _words_allocated);
+      }
+      return p;
+    }
+
+    // Deallocate the last allocation done.
+    void deallocate_last() {
+      assert(_num_allocations > 0, "Sanity");
+      allocation_t* al = &_allocations[_num_allocations - 1];
+      _sm->deallocate(al->p, al->word_size);
+      al->p = NULL;
+      _num_allocations --;
+    }
+
+    // Deallocate a random single allocation.
+    void deallocate_random() {
+      if (_num_allocations > 0) {
+        int idx = os::random() % _num_allocations;
+        allocation_t* al = _allocations + idx;
+        if (al->p == NULL) {
+          // already deallocated? Should still have its old word size set
+          assert(al->word_size > 0, "Sanity");
+        } else {
+          _sm->deallocate(al->p, al->word_size);
+          al->p = NULL; // but leave word_size, see above
+        }
+      }
+    }
+
+  }; // End: SpaceManagerTestBed
+
+  SpaceManagerTestBed** _testbeds;
+
+  SpaceManagerTestBed* testbed_at(int index) {
+    assert(index < _num_spaces, "Sanity");
+    // Create on the fly if necessary.
+    if (_testbeds[index] == NULL) {
+      _testbeds[index] = new SpaceManagerTestBed(index, _cm, _alloc_sequence);
+    }
+    return _testbeds[index];
+  }
+
+  SpaceManagerTestBed* next_testbed(SpaceManagerTestBed* bed) {
+    int i = bed->index() + 1;
+    if (i == _num_spaces) {
+      i = 0;
+    }
+    return testbed_at(i);
+  }
+
+  SpaceManagerTestBed* get_random_testbed() {
+    const int index = os::random() % _num_spaces;
+    return testbed_at(index);
+  }
+
+  SpaceManagerTestBed* get_random_matching_testbed(bool should_be_empty) {
+    const int start_index = os::random() % _num_spaces;
+    int i = start_index;
+    do {
+      SpaceManagerTestBed* bed = testbed_at(i);
+      if ((should_be_empty && bed->words_allocated() == 0) ||
+          (!should_be_empty && bed->words_allocated() > 0)) {
+        return bed;
+      }
+      i ++;
+      if (i == _num_spaces) {
+        i = 0;
+      }
+    } while (i != start_index);
+    return NULL;
+  }
+
+  SpaceManagerTestBed* get_random_nonempty_testbed() {
+    return get_random_matching_testbed(false);
+  }
+
+  SpaceManagerTestBed* get_random_empty_testbed() {
+    return get_random_matching_testbed(true);
+  }
+
+  MetaWord* alloc_from_testbed(SpaceManagerTestBed* bed, size_t word_size) {
+    MetaWord* p = bed->allocate_and_test(word_size);
+    if (p == NULL) {
+      // Getting NULL back may mean:
+      // - testbed is full.
+      // - We hit the commit limit.
+      if (!bed->is_full()) {
+        EXPECT_LT(_commit_limiter.possible_expansion_words(),
+                  metaspace::get_raw_allocation_word_size(word_size));
+      }
+    }
+    return p;
+  }
+
+  void delete_testbed_at(int index) {
+    delete _testbeds[index];
+    _testbeds[index] = NULL;
+  }
+
+  void delete_testbed(SpaceManagerTestBed* bed) {
+    assert(_testbeds[bed->index()] == bed, "Sanity");
+    _testbeds[bed->index()] = NULL;
+    delete bed;
+  }
+
+  // Allocate multiple times random sizes from a single spacemanager.
+  // Will stop allocation prematurely if per-space max is reached or if commit limit is reached.
+  bool allocate_multiple_random(SpaceManagerTestBed* bed, int num_allocations, RandSizeGenerator* rgen) {
+    for (int n = 0; n < num_allocations; n ++) {
+      const size_t alloc_size = rgen->get();
+      if (alloc_from_testbed(bed, alloc_size) == NULL) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  int get_total_number_of_allocations() const {
+    int sum = 0;
+    for (int i = 0; i < _num_spaces; i ++) {
+      if (_testbeds[i] != NULL) {
+        sum += _testbeds[i]->num_allocations();
+      }
+    }
+    return sum;
+  }
+
+  size_t get_total_words_allocated() const {
+    size_t sum = 0;
+    for (int i = 0; i < _num_spaces; i ++) {
+      if (_testbeds[i] != NULL) {
+        sum += _testbeds[i]->words_allocated();
+      }
+    }
+    return sum;
+  }
+
+  // Allocate until you reach avg occupancy, hover there by allocating/freeing.
+  void test_hover(int num_cycles, int avg_allocs_per_space_manager, RandSizeGenerator* rgen,
+                  bool exercise_reclaim, bool exercise_dealloc) {
+
+    int alloc_cycles = 0;
+    int free_cycles = 0;
+    for (int cyc = 0; cyc < num_cycles; cyc ++) {
+      if (get_total_words_allocated() < _avg_occupancy) {
+        SpaceManagerTestBed* bed = get_random_testbed();
+        if (allocate_multiple_random(bed, avg_allocs_per_space_manager, rgen)) {
+          alloc_cycles ++;
+        }
+      } else {
+        SpaceManagerTestBed* bed = get_random_nonempty_testbed();
+        if (bed != NULL) {
+          free_cycles ++;
+          delete_testbed(bed);
+        }
+      }
+      if (exercise_dealloc) {
+        if (os::random() % 100 > 95) {
+          SpaceManagerTestBed* bed = get_random_nonempty_testbed();
+          if (bed != NULL) {
+            bed->deallocate_random();
+          }
+        }
+      }
+      if (cyc % 100 == 0) {
+        const size_t committed_before = _vslist->committed_words();
+        if (exercise_reclaim) {
+          _cm->wholesale_reclaim();
+        }
+        LOG("cyc: %d (a %d f %d) allocated: " SIZE_FORMAT ", committed " SIZE_FORMAT "->" SIZE_FORMAT ".",
+            cyc, alloc_cycles, free_cycles, get_total_words_allocated(), committed_before, _vslist->committed_words());
+      }
+    }
+
+//    _vslist->print_on(tty);
+//    _cm->print_on(tty);
+
+  } // end: test_hover
+
+  // Allocate until you reach avg occupancy, then drain completely. Repeat.
+  void test_wave(int num_cycles, int avg_allocs_per_space_manager, RandSizeGenerator* rgen,
+                 bool exercise_reclaim, bool exercise_dealloc) {
+
+    bool rising = true;
+    int num_waves = 0;
+    for (int cyc = 0; cyc < num_cycles; cyc ++) {
+      if (rising) {
+        num_waves ++;
+        if (get_total_words_allocated() >= _avg_occupancy) {
+          rising = false;
+        } else {
+          SpaceManagerTestBed* bed = get_random_testbed();
+          allocate_multiple_random(bed, avg_allocs_per_space_manager, rgen);
+        }
+      } else {
+        SpaceManagerTestBed* bed = get_random_nonempty_testbed();
+        if (bed == NULL) {
+          EXPECT_EQ(get_total_words_allocated(), (size_t)0);
+          rising = true;
+        } else {
+          delete_testbed(bed);
+        }
+      }
+      if (exercise_dealloc) {
+        if (os::random() % 100 > 95) {
+          SpaceManagerTestBed* bed = get_random_nonempty_testbed();
+          if (bed != NULL) {
+            bed->deallocate_random();
+          }
+        }
+      }
+      if (cyc % 100 == 0) {
+        LOG("cyc: %d num waves: %d num allocations: %d , words allocated: " SIZE_FORMAT ", committed " SIZE_FORMAT ".",
+            cyc, num_waves, get_total_number_of_allocations(), get_total_words_allocated(), _vslist->committed_words());
+        const size_t committed_before = _vslist->committed_words();
+        if (exercise_reclaim) {
+          _cm->wholesale_reclaim();
+          LOG(".. reclaim: " SIZE_FORMAT "->" SIZE_FORMAT ".", committed_before, _vslist->committed_words());
+        }
+      }
+    }
+
+//    _vslist->print_on(tty);
+    //    _cm->print_on(tty);
+
+  } // end: test_wave
+
+
+  static void check_sm_stat_is_empty(sm_stats_t& stat) {
+    in_use_chunk_stats_t totals = stat.totals();
+    EXPECT_EQ(totals.word_size, (size_t)0);
+    EXPECT_EQ(totals.committed_words, (size_t)0);
+    EXPECT_EQ(totals.used_words, (size_t)0);
+    EXPECT_EQ(totals.free_words, (size_t)0);
+    EXPECT_EQ(totals.waste_words, (size_t)0);
+  }
+
+  static void check_sm_stat_is_consistent(sm_stats_t& stat) {
+    in_use_chunk_stats_t totals = stat.totals();
+    EXPECT_GE(totals.word_size, totals.committed_words);
+    EXPECT_EQ(totals.committed_words, totals.used_words + totals.free_words + totals.waste_words);
+    EXPECT_GE(totals.used_words, stat.free_blocks_word_size);
+  }
+
+  void test_total_statistics() {
+    sm_stats_t totals1;
+    check_sm_stat_is_empty(totals1);
+    sm_stats_t totals2;
+    check_sm_stat_is_empty(totals1);
+    for (int i = 0; i < _num_spaces; i ++) {
+      if (_testbeds[i] != NULL) {
+        sm_stats_t stat;
+        _testbeds[i]->_sm->add_to_statistics(&stat);
+        check_sm_stat_is_consistent(stat);
+        DEBUG_ONLY(stat.verify());
+        _testbeds[i]->_sm->add_to_statistics(&totals1);
+        check_sm_stat_is_consistent(totals1);
+        totals2.add(stat);
+        check_sm_stat_is_consistent(totals2);
+      }
+    }
+    EXPECT_EQ(totals1.totals().used_words,
+              totals2.totals().used_words);
+  }
+
+public:
+
+  void run_test(int num_cycles, int avg_allocs_per_space_manager, RandSizeGenerator* rgen,
+            bool exercise_reclaim, bool exercise_dealloc) {
+    LOG("hover test");
+    test_hover(num_cycles, avg_allocs_per_space_manager, rgen, exercise_reclaim, exercise_dealloc);
+
+    test_total_statistics();
+
+    LOG("wave test");
+    test_wave(num_cycles, avg_allocs_per_space_manager, rgen, exercise_reclaim, exercise_dealloc);
+
+    test_total_statistics();
+
+  }
+
+  SpaceManagerTest(int num_spaces, size_t avg_occupancy, size_t max_commit_limit, const ChunkAllocSequence* alloc_sequence)
+    : _commit_limiter(max_commit_limit), _avg_occupancy(avg_occupancy), _vslist(NULL), _cm(NULL),
+      _alloc_sequence(alloc_sequence), _num_spaces(num_spaces),
+      _rss_at_start(0), _rss_at_end(0), _rss_after_cleanup(0),
+      _testbeds(NULL)
+  {
+    _rss_at_start = get_workingset_size();
+
+    // Allocate test bed array
+    _testbeds = NEW_C_HEAP_ARRAY(SpaceManagerTestBed*, _num_spaces, mtInternal);
+    for (int i = 0; i < _num_spaces; i ++) {
+      _testbeds[i] = NULL;
+    }
+
+    // Create VirtualSpaceList and ChunkManager as backing memory
+    _vslist = new VirtualSpaceList("test_vs", &_commit_limiter);
+
+    _cm = new ChunkManager("test_cm", _vslist);
+
+  }
+
+  ~SpaceManagerTest () {
+
+    _rss_at_end = get_workingset_size();
+
+    // Is the memory footprint abnormal? This is necessarily very fuzzy. The memory footprint of these tests
+    // is dominated by all metaspace allocations done and the number of spaces, since the SpaceManagerTestBed
+    // - due to the fact that we track individual allocations - is rather big.
+    const size_t reasonable_expected_footprint = _avg_occupancy * BytesPerWord +
+                                                  sizeof(SpaceManagerTestBed) * _num_spaces +
+                                                  sizeof(SpaceManagerTestBed*) * _num_spaces +
+                                                  sizeof(ChunkManager) + sizeof(VirtualSpaceList);
+    const size_t reasonable_expected_footprint_with_margin =
+        (reasonable_expected_footprint * 2) + 1 * M;
+    EXPECT_LE(_rss_at_end, _rss_at_start + reasonable_expected_footprint_with_margin);
+
+    for (int i = 0; i < _num_spaces; i ++) {
+      delete _testbeds[i];
+    }
+
+    FREE_C_HEAP_ARRAY(SpaceManagerTestBed*, _testbeds);
+
+    delete _cm;
+    delete _vslist;
+
+    _rss_after_cleanup = get_workingset_size();
+
+    // Check for memory leaks. We should ideally be at the baseline of _rss_at_start. However, this depends
+    // on whether this gtest was executed as a first test in the suite, since gtest suite adds overhead of 2-4 MB.
+    EXPECT_LE(_rss_after_cleanup, _rss_at_start + 4 * M);
+
+    LOG("rss at start: " INTX_FORMAT ", at end " INTX_FORMAT " (" INTX_FORMAT "), after cleanup: " INTX_FORMAT " (" INTX_FORMAT ").", \
+        _rss_at_start, _rss_at_end, _rss_at_end - _rss_at_start, _rss_after_cleanup, _rss_after_cleanup - _rss_at_start); \
+
+  }
+
+
+
+  void test_deallocation_in_place() {
+
+    // When deallocating, it is attempted to deallocate in place, i.e.
+    // if the allocation is the most recent one, the current usage pointer
+    // in the current chunk is just reversed back to its original position
+    // before the original allocation.
+    //
+    // But in-place-deallocation will not reverse allocation of the
+    // current chunk itself if its usage pointer reaches 0 due to in-place
+    // deallocation!
+    //
+    // In theory, allocating n times, then deallocation in reverse order should
+    // happin in place and at the end the usage counter of the Space Manager should
+    // be at the original place.
+    // However, this is fragile, since when one of the allocations happens to
+    // cause the current chunk to be retired and a new one created, the chain
+    // breaks at that point (one cannot deallocate in-place from a non-current chunk).
+    //
+    // Therefore, to make this test reliable, we work on a new empty testbed - so
+    // we have a fresh chunk - and with miniscule allocation sizes, to not
+    // cause allocation beyond the smallest possible chunk size. That way we
+    // will never cause the initial chunk to be retired, regardless of how small it
+    // is.
+
+    delete_testbed_at(0);
+    SpaceManagerTestBed* bed = testbed_at(0); // new bed
+
+    const int num_loops = 5;
+    const size_t max_alloc_size = metaspace::chklvl::MIN_CHUNK_WORD_SIZE / num_loops * 2;
+
+    // Doing this multiple times should work too as long as we keep the
+    // reverse allocation order.
+    sm_stats_t stat[10]; // taken before each allocation
+    size_t alloc_sizes[10] = {
+        max_alloc_size,
+        1, 2, 3, // <- small sizes to have difference between raw size and net size
+        0, 0, 0, 0, 0, 0 // <- use random values;
+    };
+
+    RandSizeGenerator rgen(1, max_alloc_size);
+    int i = 0;
+    while (i < 10) {
+      // take stats before allocating...
+      bed->sm()->add_to_statistics(stat + i);
+      check_sm_stat_is_consistent(stat[i]);
+
+      // and allocate.
+      LOG("alloc round #%d (used: " SIZE_FORMAT ").", i, stat[i].totals().used_words);
+      const size_t alloc_size = alloc_sizes[i] > 0 ? alloc_sizes[i] : rgen.get();
+      MetaWord* p = bed->allocate_and_test(alloc_size);
+      ASSERT_NOT_NULL(p);
+      i ++;
+    }
+
+    // Now reverse-deallocate and compare used_words while doing so.
+    // (Note: used_words should be the same after deallocating as before the
+    //  original allocation. All other stats cannot be relied on being the same.)
+    i --;
+    while (i >= 0) {
+      // dealloc, measure statistics after each deallocation and compare with
+      // the statistics taken at allocation time.
+      LOG("dealloc round #%d", i);
+      bed->deallocate_last();
+      sm_stats_t stat_now;
+      bed->sm()->add_to_statistics(&stat_now);
+      check_sm_stat_is_consistent(stat_now);
+      ASSERT_EQ(stat_now.totals().used_words,
+                stat[i].totals().used_words);
+      i --;
+    }
+
+  } // end: test_deallocation_in_place
+
+};
+
+// for convenience, here some shorthands for standard alloc sequences.
+static const ChunkAllocSequence* const g_standard_allocseq_class =
+    ChunkAllocSequence::alloc_sequence_by_space_type(metaspace::StandardMetaspaceType, true);
+static const ChunkAllocSequence* const g_standard_allocseq_nonclass =
+    ChunkAllocSequence::alloc_sequence_by_space_type(metaspace::StandardMetaspaceType, false);
+static const ChunkAllocSequence* const g_boot_allocseq_class =
+    ChunkAllocSequence::alloc_sequence_by_space_type(metaspace::BootMetaspaceType, true);
+static const ChunkAllocSequence* const g_boot_allocseq_nonclass =
+    ChunkAllocSequence::alloc_sequence_by_space_type(metaspace::BootMetaspaceType, false);
+static const ChunkAllocSequence* const g_refl_allocseq_class =
+    ChunkAllocSequence::alloc_sequence_by_space_type(metaspace::ReflectionMetaspaceType, true);
+static const ChunkAllocSequence* const g_refl_allocseq_nonclass =
+    ChunkAllocSequence::alloc_sequence_by_space_type(metaspace::ReflectionMetaspaceType, false);
+
+// Some standard random size gens.
+
+// generates sizes between 1 and 128 words.
+static RandSizeGenerator rgen_1K_no_outliers(1, 128);
+
+// generates sizes between 1 and 256 words, small chance of large outliers
+static RandSizeGenerator rgen_1K_some_huge_outliers(1, 256, 0.05f, MAX_CHUNK_WORD_SIZE / 64, MAX_CHUNK_WORD_SIZE / 2);
+
+// generates medium sized sizes
+static RandSizeGenerator rgen_32K_no_outliers(128, 0x4000);
+
+// large (and pretty unrealistic) spread
+static RandSizeGenerator rgen_large_spread(1, MAX_CHUNK_WORD_SIZE);
+
+
+
+#define TEST_WITH_PARAMS(name, num_spaces, avg_occ, commit_limit, alloc_seq, rgen, exercise_reclaim, exercise_dealloc) \
+TEST_VM(metaspace, space_manager_test_##name) { \
+  SpaceManagerTest stest(num_spaces, avg_occ, commit_limit, alloc_seq); \
+  stest.run_test(1000, 50, &rgen, exercise_reclaim, exercise_dealloc); \
+}
+
+TEST_WITH_PARAMS(test0, 1, 64 * K, SIZE_MAX, g_standard_allocseq_nonclass, rgen_1K_no_outliers, true, false);
+
+TEST_WITH_PARAMS(test1, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_1K_no_outliers, true, false);
+TEST_WITH_PARAMS(test2, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_1K_no_outliers, false, true);
+TEST_WITH_PARAMS(test3, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_1K_no_outliers, false, false);
+
+TEST_WITH_PARAMS(test4, 10, 1 * M, SIZE_MAX, g_boot_allocseq_nonclass, rgen_1K_no_outliers, true, false);
+TEST_WITH_PARAMS(test5, 10, 1 * M, SIZE_MAX, g_boot_allocseq_nonclass, rgen_1K_no_outliers, false, false);
+
+TEST_WITH_PARAMS(test6, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_1K_some_huge_outliers, true, false);
+TEST_WITH_PARAMS(test7, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_1K_some_huge_outliers, false, false);
+
+TEST_WITH_PARAMS(test8, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_32K_no_outliers, true, false);
+TEST_WITH_PARAMS(test9, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_32K_no_outliers, false, false);
+
+TEST_WITH_PARAMS(test10,  10, 10 * M, 2 * M, g_standard_allocseq_nonclass, rgen_1K_some_huge_outliers, true, false);
+TEST_WITH_PARAMS(test11,  10, 10 * M, 2 * M, g_standard_allocseq_nonclass, rgen_1K_some_huge_outliers, false, false);
+
+TEST_WITH_PARAMS(test12,  10, 10 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_large_spread, true, false);
+TEST_WITH_PARAMS(test13,  10, 10 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_large_spread, false, false);
+
+TEST_WITH_PARAMS(test_14, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_32K_no_outliers, true, true);
+TEST_WITH_PARAMS(test_15, 10, 1 * M, SIZE_MAX, g_standard_allocseq_nonclass, rgen_large_spread, true, false);
+
+
+TEST_VM(metaspace, space_manager_test_deallocation_in_place) {
+  // Test deallocation with no commit limit
+  SpaceManagerTest stest(1, 1 * M, 2 * M, g_boot_allocseq_class);
+  stest.test_deallocation_in_place();
+}
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/metaspace/test_virtualspacenode.cpp	Wed Oct 16 17:41:03 2019 +0200
@@ -0,0 +1,524 @@
+/*
+ * Copyright (c) 2019, SAP SE. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+#include "precompiled.hpp"
+#include "metaspace/metaspaceTestsCommon.hpp"
+
+static int test_node_id = 100000; // start high to make it stick out in logs.
+
+
+
+class VirtualSpaceNodeTest {
+
+  // These counters are updated by the Node.
+  SizeCounter _counter_reserved_words;
+  SizeCounter _counter_committed_words;
+  CommitLimiter _commit_limiter;
+  VirtualSpaceNode* _node;
+
+  // These are my checks and counters.
+  const size_t _vs_word_size;
+  const size_t _commit_limit;
+
+  MetachunkList _root_chunks;
+
+  void verify() const {
+
+    ASSERT_EQ(_root_chunks.size() * metaspace::chklvl::MAX_CHUNK_WORD_SIZE,
+              _node->used_words());
+
+    ASSERT_GE(_commit_limit,                      _counter_committed_words.get());
+    ASSERT_EQ(_commit_limiter.committed_words(),  _counter_committed_words.get());
+
+    // Since we know _counter_committed_words serves our single node alone, the counter has to
+    // match the number of bits in the node internal commit mask.
+    ASSERT_EQ(_counter_committed_words.get(), _node->committed_words());
+
+    ASSERT_EQ(_counter_reserved_words.get(), _vs_word_size);
+    ASSERT_EQ(_counter_reserved_words.get(), _node->word_size());
+
+  }
+
+  void lock_and_verify_node() {
+#ifdef ASSERT
+    MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+    _node->verify(true);
+#endif
+  }
+
+  Metachunk* alloc_root_chunk() {
+
+    verify();
+
+    const bool node_is_full = _node->used_words() == _node->word_size();
+    bool may_hit_commit_limit = _commit_limiter.possible_expansion_words() < MAX_CHUNK_WORD_SIZE;
+    Metachunk* c = NULL;
+    {
+      MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+      c = _node->allocate_root_chunk();
+    }
+
+    lock_and_verify_node();
+
+    if (node_is_full) {
+
+      EXPECT_NULL(c);
+
+    } else {
+
+      DEBUG_ONLY(c->verify(true);)
+      EXPECT_NOT_NULL(c);
+      EXPECT_TRUE(c->is_root_chunk());
+      EXPECT_TRUE(c->is_free());
+      EXPECT_EQ(c->word_size(), metaspace::chklvl::MAX_CHUNK_WORD_SIZE);
+
+      if (!may_hit_commit_limit) {
+        if (Settings::newborn_root_chunks_are_fully_committed()) {
+          EXPECT_TRUE(c->is_fully_committed());
+        } else {
+          EXPECT_TRUE(c->is_fully_uncommitted());
+        }
+      }
+
+      EXPECT_TRUE(_node->contains(c->base()));
+
+      _root_chunks.add(c);
+
+    }
+
+    verify();
+
+    return c;
+
+  }
+
+  bool commit_root_chunk(Metachunk* c, size_t request_commit_words) {
+
+    verify();
+
+    const size_t committed_words_before = _counter_committed_words.get();
+
+    bool rc = c->ensure_committed(request_commit_words);
+
+    verify();
+    DEBUG_ONLY(c->verify(true);)
+
+    lock_and_verify_node();
+
+    if (rc == false) {
+
+      // We must have hit the commit limit.
+      EXPECT_GE(committed_words_before + request_commit_words, _commit_limit);
+
+    } else {
+
+      // We should not have hit the commit limit.
+      EXPECT_LE(_counter_committed_words.get(), _commit_limit);
+
+      // We do not know how much we really committed - maybe nothing if the
+      // chunk had been committed before - but we know the numbers should have
+      // risen or at least stayed equal.
+      EXPECT_GE(_counter_committed_words.get(), committed_words_before);
+
+      // The chunk should be as far committed as was requested
+      EXPECT_GE(c->committed_words(), request_commit_words);
+
+      // Zap committed portion.
+      DEBUG_ONLY(zap_range(c->base(), c->committed_words());)
+
+    }
+
+    verify();
+
+    return rc;
+
+  } // commit_root_chunk
+
+  void uncommit_chunk(Metachunk* c) {
+
+    verify();
+
+    const size_t committed_words_before = _counter_committed_words.get();
+    const size_t available_words_before = _commit_limiter.possible_expansion_words();
+
+    c->uncommit();
+
+    DEBUG_ONLY(c->verify(true);)
+
+    lock_and_verify_node();
+
+    EXPECT_EQ(c->committed_words(), (size_t)0);
+
+    // Commit counter should have gone down (by exactly the size of the chunk) if chunk
+    // is larger than a commit granule.
+    // For smaller chunks, we do not know, but at least we know the commit size should not
+    // have gone up.
+    if (c->word_size() >= Settings::commit_granule_words()) {
+
+      EXPECT_EQ(_counter_committed_words.get(), committed_words_before - c->word_size());
+
+      // also, commit number in commit limiter should have gone down, so we have more space
+      EXPECT_EQ(_commit_limiter.possible_expansion_words(),
+                available_words_before + c->word_size());
+
+    } else {
+
+      EXPECT_LE(_counter_committed_words.get(), committed_words_before);
+
+    }
+
+    verify();
+
+  } // uncommit_chunk
+
+  Metachunk* split_chunk_with_checks(Metachunk* c, chklvl_t target_level, MetachunkListCluster* freelist) {
+
+    DEBUG_ONLY(c->verify(true);)
+
+    const chklvl_t orig_level = c->level();
+    assert(orig_level < target_level, "Sanity");
+    DEBUG_ONLY(metaspace::chklvl::check_valid_level(target_level);)
+
+    const int total_num_chunks_in_freelist_before = freelist->total_num_chunks();
+    const size_t total_word_size_in_freelist_before = freelist->total_word_size();
+
+   // freelist->print_on(tty);
+
+    // Split...
+    Metachunk* result = NULL;
+    {
+      MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+      result = _node->split(target_level, c, freelist);
+    }
+
+    // freelist->print_on(tty);
+
+    EXPECT_NOT_NULL(result);
+    EXPECT_EQ(result->level(), target_level);
+    EXPECT_TRUE(result->is_free());
+
+    // ... check that we get the proper amount of splinters. For every chunk split we expect one
+    // buddy chunk to appear of level + 1 (aka, half size).
+    size_t expected_wordsize_increase = 0;
+    int expected_num_chunks_increase = 0;
+    for (chklvl_t l = orig_level + 1; l <= target_level; l ++) {
+      expected_wordsize_increase += metaspace::chklvl::word_size_for_level(l);
+      expected_num_chunks_increase ++;
+    }
+
+    const int total_num_chunks_in_freelist_after = freelist->total_num_chunks();
+    const size_t total_word_size_in_freelist_after = freelist->total_word_size();
+
+    EXPECT_EQ(total_num_chunks_in_freelist_after, total_num_chunks_in_freelist_before + expected_num_chunks_increase);
+    EXPECT_EQ(total_word_size_in_freelist_after, total_word_size_in_freelist_before + expected_wordsize_increase);
+
+    return result;
+
+  } // end: split_chunk_with_checks
+
+
+  Metachunk* merge_chunk_with_checks(Metachunk* c, chklvl_t expected_target_level, MetachunkListCluster* freelist) {
+
+    const chklvl_t orig_level = c->level();
+    assert(expected_target_level < orig_level, "Sanity");
+
+    const int total_num_chunks_in_freelist_before = freelist->total_num_chunks();
+    const size_t total_word_size_in_freelist_before = freelist->total_word_size();
+
+    //freelist->print_on(tty);
+
+    Metachunk* result = NULL;
+    {
+      MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+      result = _node->merge(c, freelist);
+    }
+    EXPECT_NOT_NULL(result);
+    EXPECT_TRUE(result->level() == expected_target_level);
+
+    //freelist->print_on(tty);
+
+    // ... check that we merged in the proper amount of chunks. For every decreased level
+    // of the original chunk (each size doubling) we should see one buddy chunk swallowed up.
+    size_t expected_wordsize_decrease = 0;
+    int expected_num_chunks_decrease = 0;
+    for (chklvl_t l = orig_level; l > expected_target_level; l --) {
+      expected_wordsize_decrease += metaspace::chklvl::word_size_for_level(l);
+      expected_num_chunks_decrease ++;
+    }
+
+    const int total_num_chunks_in_freelist_after = freelist->total_num_chunks();
+    const size_t total_word_size_in_freelist_after = freelist->total_word_size();
+
+    EXPECT_EQ(total_num_chunks_in_freelist_after, total_num_chunks_in_freelist_before - expected_num_chunks_decrease);
+    EXPECT_EQ(total_word_size_in_freelist_after, total_word_size_in_freelist_before - expected_wordsize_decrease);
+
+    return result;
+
+  } // end: merge_chunk_with_checks
+
+public:
+
+  VirtualSpaceNodeTest(size_t vs_word_size, size_t commit_limit)
+    : _counter_reserved_words(), _counter_committed_words(), _commit_limiter(commit_limit),
+      _node(NULL), _vs_word_size(vs_word_size), _commit_limit(commit_limit)
+  {
+    {
+      MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+      _node = VirtualSpaceNode::create_node(test_node_id++,
+                                            vs_word_size, &_commit_limiter,
+                                            &_counter_reserved_words, &_counter_committed_words);
+    }
+    EXPECT_TRUE(_commit_limiter.possible_expansion_words() == _commit_limit);
+    verify();
+  }
+
+  ~VirtualSpaceNodeTest() {
+    delete _node;
+  }
+
+  void test_simple() {
+    Metachunk* c = alloc_root_chunk();
+    commit_root_chunk(c, Settings::commit_granule_words());
+    commit_root_chunk(c, c->word_size());
+    uncommit_chunk(c);
+  }
+
+  void test_exhaust_node() {
+    Metachunk* c = NULL;
+    bool rc = true;
+    do {
+      c = alloc_root_chunk();
+      if (c != NULL) {
+        rc = commit_root_chunk(c, c->word_size());
+      }
+    } while (c != NULL && rc);
+  }
+
+  void test_arbitrary_commits() {
+
+    assert(_commit_limit >= _vs_word_size, "For this test no commit limit.");
+
+    // Get a root chunk to have a committable region
+    Metachunk* c = alloc_root_chunk();
+    ASSERT_NOT_NULL(c);
+    const address_range_t outer = { c->base(), c->word_size() };
+
+    if (c->committed_words() > 0) {
+      c->uncommit();
+    }
+
+    ASSERT_EQ(_node->committed_words(), (size_t)0);
+    ASSERT_EQ(_counter_committed_words.get(), (size_t)0);
+
+    TestMap testmap(c->word_size());
+    assert(testmap.get_num_set() == 0, "Sanity");
+
+    for (int run = 0; run < 1000; run ++) {
+
+      const size_t committed_words_before = testmap.get_num_set();
+      ASSERT_EQ(_commit_limiter.committed_words(), committed_words_before);
+      ASSERT_EQ(_counter_committed_words.get(), committed_words_before);
+
+      range_t range;
+      calc_random_range(c->word_size(), &range, Settings::commit_granule_words());
+
+      const size_t committed_words_in_range_before =
+                   testmap.get_num_set(range.from, range.to);
+
+      if (os::random() % 100 < 50) {
+
+        bool rc = false;
+        {
+          MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+          rc = _node->ensure_range_is_committed(c->base() + range.from, range.to - range.from);
+        }
+
+        // Test-zap
+        zap_range(c->base() + range.from, range.to - range.from);
+
+        // We should never reach commit limit since it is as large as the whole area.
+        ASSERT_TRUE(rc);
+
+        testmap.set_range(range.from, range.to);
+
+      } else {
+
+        {
+          MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+          _node->uncommit_range(c->base() + range.from, range.to - range.from);
+        }
+
+        testmap.clear_range(range.from, range.to);
+
+      }
+
+      const size_t committed_words_after = testmap.get_num_set();
+
+      ASSERT_EQ(_commit_limiter.committed_words(), committed_words_after);
+      ASSERT_EQ(_counter_committed_words.get(), committed_words_after);
+
+      verify();
+    }
+  }
+
+  // Helper function for test_splitting_chunks_1
+  static void check_chunk_is_committed_at_least_up_to(const Metachunk* c, size_t& word_size) {
+    if (word_size >= c->word_size()) {
+      EXPECT_TRUE(c->is_fully_committed());
+      word_size -= c->word_size();
+    } else {
+      EXPECT_EQ(c->committed_words(), word_size);
+      word_size = 0; // clear remaining size if there is.
+    }
+  }
+
+  void test_split_and_merge_chunks() {
+
+    assert(_commit_limit >= _vs_word_size, "No commit limit here pls");
+
+    // Allocate a root chunk and commit a random part of it. Then repeatedly split
+    // it and merge it back together; observe the committed regions of the split chunks.
+
+    Metachunk* c = alloc_root_chunk();
+
+    if (c->committed_words() > 0) {
+      c->uncommit();
+    }
+
+    // To capture split-off chunks. Note: it is okay to use this here as a temp object.
+    MetachunkListCluster freelist;
+
+    const int granules_per_root_chunk = (int)(c->word_size() / Settings::commit_granule_words());
+
+    for (int granules_to_commit = 0; granules_to_commit < granules_per_root_chunk; granules_to_commit ++) {
+
+      const size_t words_to_commit = Settings::commit_granule_words() * granules_to_commit;
+
+      c->ensure_committed(words_to_commit);
+
+      ASSERT_EQ(c->committed_words(), words_to_commit);
+      ASSERT_EQ(_counter_committed_words.get(), words_to_commit);
+      ASSERT_EQ(_commit_limiter.committed_words(), words_to_commit);
+
+      const size_t committed_words_before = c->committed_words();
+
+      verify();
+
+      for (chklvl_t target_level = LOWEST_CHUNK_LEVEL + 1;
+           target_level <= HIGHEST_CHUNK_LEVEL; target_level ++) {
+
+        // Split:
+        Metachunk* c2 = split_chunk_with_checks(c, target_level, &freelist);
+        c2->set_in_use();
+
+        // Split smallest leftover chunk.
+        if (c2->level() < HIGHEST_CHUNK_LEVEL) {
+
+          Metachunk* c3 = freelist.remove_first(c2->level());
+          ASSERT_NOT_NULL(c3); // Must exist since c2 must have a splinter buddy now.
+
+          Metachunk* c4 = split_chunk_with_checks(c3, HIGHEST_CHUNK_LEVEL, &freelist);
+          c4->set_in_use();
+
+          // Merge it back. We expect this to merge up to c2's level, since c2 is in use.
+          c4->set_free();
+          Metachunk* c5 = merge_chunk_with_checks(c4, c2->level(), &freelist);
+          ASSERT_NOT_NULL(c5);
+          freelist.add(c5);
+
+        }
+
+        // Merge c2 back.
+        c2->set_free();
+        merge_chunk_with_checks(c2, LOWEST_CHUNK_LEVEL, &freelist);
+
+        // After all this splitting and combining committed size should not have changed.
+        ASSERT_EQ(c2->committed_words(), committed_words_before);
+
+      }
+
+    }
+
+  } // end: test_splitting_chunks
+
+
+
+
+};
+
+// Note: we unfortunately need TEST_VM even though the system tested
+// should be pretty independent since we need things like os::vm_page_size()
+// which in turn need OS layer initialization.
+TEST_VM(metaspace, virtual_space_node_test_1) {
+  VirtualSpaceNodeTest test(metaspace::chklvl::MAX_CHUNK_WORD_SIZE,
+      metaspace::chklvl::MAX_CHUNK_WORD_SIZE);
+  test.test_simple();
+}
+
+TEST_VM(metaspace, virtual_space_node_test_2) {
+  // Should not hit commit limit
+  VirtualSpaceNodeTest test(3 * metaspace::chklvl::MAX_CHUNK_WORD_SIZE,
+      3 * metaspace::chklvl::MAX_CHUNK_WORD_SIZE);
+  test.test_simple();
+  test.test_exhaust_node();
+}
+
+TEST_VM(metaspace, virtual_space_node_test_3) {
+  // Should hit commit limit
+  VirtualSpaceNodeTest test(10 * metaspace::chklvl::MAX_CHUNK_WORD_SIZE,
+      3 * metaspace::chklvl::MAX_CHUNK_WORD_SIZE);
+  test.test_exhaust_node();
+}
+
+TEST_VM(metaspace, virtual_space_node_test_4) {
+  // Test committing uncommitting arbitrary ranges
+  VirtualSpaceNodeTest test(metaspace::chklvl::MAX_CHUNK_WORD_SIZE,
+      metaspace::chklvl::MAX_CHUNK_WORD_SIZE);
+  test.test_arbitrary_commits();
+}
+
+TEST_VM(metaspace, virtual_space_node_test_5) {
+  // Test committing uncommitting arbitrary ranges
+  for (int run = 0; run < 100; run ++) {
+    VirtualSpaceNodeTest test(metaspace::chklvl::MAX_CHUNK_WORD_SIZE,
+        metaspace::chklvl::MAX_CHUNK_WORD_SIZE);
+    test.test_split_and_merge_chunks();
+  }
+}
+
+TEST_VM(metaspace, virtual_space_node_test_6) {
+  // Test large allocation and freeing.
+  {
+    VirtualSpaceNodeTest test(metaspace::chklvl::MAX_CHUNK_WORD_SIZE * 100,
+        metaspace::chklvl::MAX_CHUNK_WORD_SIZE * 100);
+    test.test_exhaust_node();
+  }
+  {
+    VirtualSpaceNodeTest test(metaspace::chklvl::MAX_CHUNK_WORD_SIZE * 100,
+        metaspace::chklvl::MAX_CHUNK_WORD_SIZE * 100);
+    test.test_exhaust_node();
+  }
+
+}
--- a/test/hotspot/jtreg/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Wed Oct 16 16:54:56 2019 +0200
+++ b/test/hotspot/jtreg/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Wed Oct 16 17:41:03 2019 +0200
@@ -106,7 +106,14 @@
       // Allocate past the MetaspaceSize limit.
       long metaspaceSize = Long.parseLong(args[0]);
       long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
-      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      // There is a max. value to how much metaspace can be allocated in one allocation.
+      final long max = 1024 * 1024 * 4;
+      while (allocationBeyondMetaspaceSize > 0) {
+        long s = max < allocationBeyondMetaspaceSize ? max : allocationBeyondMetaspaceSize;
+        wb.allocateMetaspace(null, s);
+        allocationBeyondMetaspaceSize -= s;
+      }
 
       // Wait for at least one GC to occur. The caller will parse the log files produced.
       GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
@@ -114,7 +121,6 @@
         Thread.sleep(100);
       }
 
-      wb.freeMetaspace(null, metaspace, metaspace);
     }
 
     private static GarbageCollectorMXBean getCMSGCBean() {
--- a/test/hotspot/jtreg/gc/class_unloading/TestG1ClassUnloadingHWM.java	Wed Oct 16 16:54:56 2019 +0200
+++ b/test/hotspot/jtreg/gc/class_unloading/TestG1ClassUnloadingHWM.java	Wed Oct 16 17:41:03 2019 +0200
@@ -104,12 +104,18 @@
       // Allocate past the MetaspaceSize limit
       long metaspaceSize = Long.parseLong(args[0]);
       long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
-      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      // There is a max. value to how much metaspace can be allocated in one allocation.
+      final long max = 1024 * 1024 * 4;
+      while (allocationBeyondMetaspaceSize > 0) {
+        long s = max < allocationBeyondMetaspaceSize ? max : allocationBeyondMetaspaceSize;
+        wb.allocateMetaspace(null, s);
+        allocationBeyondMetaspaceSize -= s;
+      }
 
       long youngGenSize = Long.parseLong(args[1]);
       triggerYoungGCs(youngGenSize);
 
-      wb.freeMetaspace(null, metaspace, metaspace);
     }
 
     public static void triggerYoungGCs(long youngGenSize) {