8072061: Automatically determine optimal sizes for the CDS regions
authoriklam
Wed, 02 Aug 2017 18:06:38 -0700
changeset 46746 ea379ebb9447
parent 46745 f7b9bb98bb72
child 46747 7b6570052b58
8072061: Automatically determine optimal sizes for the CDS regions Summary: See new C++ class MetaspaceClosure. Reviewed-by: coleenp, jiangli, mseledtsov
hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp
hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp
hotspot/src/os_cpu/linux_arm/vm/thread_linux_arm.cpp
hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp
hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp
hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp
hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp
hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp
hotspot/src/share/vm/classfile/classLoader.hpp
hotspot/src/share/vm/classfile/classLoaderData.cpp
hotspot/src/share/vm/classfile/classLoaderData.hpp
hotspot/src/share/vm/classfile/compactHashtable.cpp
hotspot/src/share/vm/classfile/dictionary.cpp
hotspot/src/share/vm/classfile/dictionary.hpp
hotspot/src/share/vm/classfile/klassFactory.cpp
hotspot/src/share/vm/classfile/sharedClassUtil.hpp
hotspot/src/share/vm/classfile/stringTable.cpp
hotspot/src/share/vm/classfile/stringTable.hpp
hotspot/src/share/vm/classfile/symbolTable.cpp
hotspot/src/share/vm/classfile/symbolTable.hpp
hotspot/src/share/vm/classfile/systemDictionary.cpp
hotspot/src/share/vm/classfile/systemDictionary.hpp
hotspot/src/share/vm/classfile/systemDictionaryShared.hpp
hotspot/src/share/vm/classfile/vmSymbols.cpp
hotspot/src/share/vm/classfile/vmSymbols.hpp
hotspot/src/share/vm/memory/allocation.cpp
hotspot/src/share/vm/memory/allocation.hpp
hotspot/src/share/vm/memory/filemap.cpp
hotspot/src/share/vm/memory/filemap.hpp
hotspot/src/share/vm/memory/metadataFactory.hpp
hotspot/src/share/vm/memory/metaspace.cpp
hotspot/src/share/vm/memory/metaspace.hpp
hotspot/src/share/vm/memory/metaspaceClosure.cpp
hotspot/src/share/vm/memory/metaspaceClosure.hpp
hotspot/src/share/vm/memory/metaspaceShared.cpp
hotspot/src/share/vm/memory/metaspaceShared.hpp
hotspot/src/share/vm/memory/universe.cpp
hotspot/src/share/vm/memory/universe.hpp
hotspot/src/share/vm/memory/virtualspace.hpp
hotspot/src/share/vm/oops/annotations.cpp
hotspot/src/share/vm/oops/annotations.hpp
hotspot/src/share/vm/oops/array.hpp
hotspot/src/share/vm/oops/arrayKlass.cpp
hotspot/src/share/vm/oops/arrayKlass.hpp
hotspot/src/share/vm/oops/constMethod.cpp
hotspot/src/share/vm/oops/constMethod.hpp
hotspot/src/share/vm/oops/constantPool.cpp
hotspot/src/share/vm/oops/constantPool.hpp
hotspot/src/share/vm/oops/cpCache.cpp
hotspot/src/share/vm/oops/cpCache.hpp
hotspot/src/share/vm/oops/instanceKlass.cpp
hotspot/src/share/vm/oops/instanceKlass.hpp
hotspot/src/share/vm/oops/klass.cpp
hotspot/src/share/vm/oops/klass.hpp
hotspot/src/share/vm/oops/klassVtable.hpp
hotspot/src/share/vm/oops/metadata.hpp
hotspot/src/share/vm/oops/method.cpp
hotspot/src/share/vm/oops/method.hpp
hotspot/src/share/vm/oops/methodCounters.cpp
hotspot/src/share/vm/oops/methodCounters.hpp
hotspot/src/share/vm/oops/methodData.cpp
hotspot/src/share/vm/oops/methodData.hpp
hotspot/src/share/vm/oops/objArrayKlass.cpp
hotspot/src/share/vm/oops/objArrayKlass.hpp
hotspot/src/share/vm/oops/symbol.cpp
hotspot/src/share/vm/oops/symbol.hpp
hotspot/src/share/vm/prims/whitebox.cpp
hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp
hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/utilities/debug.cpp
hotspot/src/share/vm/utilities/debug.hpp
hotspot/src/share/vm/utilities/hashtable.cpp
hotspot/src/share/vm/utilities/hashtable.hpp
hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java
hotspot/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java
hotspot/test/runtime/SharedArchiveFile/LargeSharedSpace.java
hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java
hotspot/test/runtime/SharedArchiveFile/MaxMetaspaceSize.java
hotspot/test/runtime/SharedArchiveFile/SharedStringsWb.java
hotspot/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java
--- a/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,13 +65,11 @@
       return false;
     }
 
-#if INCLUDE_CDS
-    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
       // In the middle of a trampoline call. Bail out for safety.
       // This happens rarely so shouldn't affect profiling.
       return false;
     }
-#endif
 
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
--- a/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -67,13 +67,11 @@
       return false;
     }
 
-#if INCLUDE_CDS
-    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
       // In the middle of a trampoline call. Bail out for safety.
       // This happens rarely so shouldn't affect profiling.
       return false;
     }
-#endif
 
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
--- a/hotspot/src/os_cpu/linux_arm/vm/thread_linux_arm.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/linux_arm/vm/thread_linux_arm.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,13 +94,11 @@
       return false;
     }
 
-#if INCLUDE_CDS
-    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
       // In the middle of a trampoline call. Bail out for safety.
       // This happens rarely so shouldn't affect profiling.
       return false;
     }
-#endif
 
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
--- a/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,13 +65,11 @@
     return false;
   }
 
-#if INCLUDE_CDS
-  if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+  if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
     // In the middle of a trampoline call. Bail out for safety.
     // This happens rarely so shouldn't affect profiling.
     return false;
   }
-#endif
 
   // we were running Java code when SIGPROF came in
   if (isInJava) {
--- a/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,13 +66,11 @@
       return false;
     }
 
-#if INCLUDE_CDS
-    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
       // In the middle of a trampoline call. Bail out for safety.
       // This happens rarely so shouldn't affect profiling.
       return false;
     }
-#endif
 
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
--- a/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,13 +78,11 @@
     return false;
   }
 
-#if INCLUDE_CDS
-  if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+  if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
     // In the middle of a trampoline call. Bail out for safety.
     // This happens rarely so shouldn't affect profiling.
     return false;
   }
-#endif
 
   frame ret_frame(ret_sp, frame::unpatchable, addr.pc());
 
--- a/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,13 +70,11 @@
     return false;
   }
 
-#if INCLUDE_CDS
-  if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+  if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
     // In the middle of a trampoline call. Bail out for safety.
     // This happens rarely so shouldn't affect profiling.
     return false;
   }
-#endif
 
   // If sp and fp are nonsense just leave them out
 
--- a/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,13 +73,11 @@
       return false;
     }
 
-#if INCLUDE_CDS
-    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
       // In the middle of a trampoline call. Bail out for safety.
       // This happens rarely so shouldn't affect profiling.
       return false;
     }
-#endif
 
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -397,6 +397,7 @@
   static int compute_Object_vtable();
 
   static ClassPathEntry* classpath_entry(int n) {
+    assert(n >= 0 && n < _num_entries, "sanity");
     if (n == 0) {
       assert(has_jrt_entry(), "No class path entry at 0 for exploded module builds");
       return ClassLoader::_jrt_entry;
@@ -415,6 +416,10 @@
     }
   }
 
+  static int number_of_classpath_entries() {
+    return _num_entries;
+  }
+
   static bool is_in_patch_mod_entries(Symbol* module_name);
 
 #if INCLUDE_CDS
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -726,7 +726,6 @@
 }
 
 Metaspace* ClassLoaderData::metaspace_non_null() {
-  assert(!DumpSharedSpaces, "wrong metaspace!");
   // If the metaspace has not been allocated, create a new one.  Might want
   // to create smaller arena for Reflection class loaders also.
   // The reason for the delayed allocation is because some class loaders are
@@ -1315,37 +1314,6 @@
 #endif
 }
 
-// CDS support
-
-// Global metaspaces for writing information to the shared archive.  When
-// application CDS is supported, we may need one per metaspace, so this
-// sort of looks like it.
-Metaspace* ClassLoaderData::_ro_metaspace = NULL;
-Metaspace* ClassLoaderData::_rw_metaspace = NULL;
-static bool _shared_metaspaces_initialized = false;
-
-// Initialize shared metaspaces (change to call from somewhere not lazily)
-void ClassLoaderData::initialize_shared_metaspaces() {
-  assert(DumpSharedSpaces, "only use this for dumping shared spaces");
-  assert(this == ClassLoaderData::the_null_class_loader_data(),
-         "only supported for null loader data for now");
-  assert (!_shared_metaspaces_initialized, "only initialize once");
-  MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
-  _ro_metaspace = new Metaspace(_metaspace_lock, Metaspace::ROMetaspaceType);
-  _rw_metaspace = new Metaspace(_metaspace_lock, Metaspace::ReadWriteMetaspaceType);
-  _shared_metaspaces_initialized = true;
-}
-
-Metaspace* ClassLoaderData::ro_metaspace() {
-  assert(_ro_metaspace != NULL, "should already be initialized");
-  return _ro_metaspace;
-}
-
-Metaspace* ClassLoaderData::rw_metaspace() {
-  assert(_rw_metaspace != NULL, "should already be initialized");
-  return _rw_metaspace;
-}
-
 ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
     : _next_klass(NULL) {
   ClassLoaderData* cld = ClassLoaderDataGraph::_head;
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -314,9 +314,6 @@
     _the_null_class_loader_data = new ClassLoaderData(Handle(), false, Dependencies());
     ClassLoaderDataGraph::_head = _the_null_class_loader_data;
     assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
-    if (DumpSharedSpaces) {
-      _the_null_class_loader_data->initialize_shared_metaspaces();
-    }
   }
 
   bool is_the_null_class_loader_data() const {
@@ -387,11 +384,6 @@
   static ClassLoaderData* anonymous_class_loader_data(oop loader, TRAPS);
   static void print_loader(ClassLoaderData *loader_data, outputStream *out);
 
-  // CDS support
-  Metaspace* ro_metaspace();
-  Metaspace* rw_metaspace();
-  void initialize_shared_metaspaces();
-
   TRACE_DEFINE_TRACE_ID_METHODS;
 };
 
--- a/hotspot/src/share/vm/classfile/compactHashtable.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/compactHashtable.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -48,8 +48,6 @@
     _buckets[i] = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Entry>(0, true, mtSymbol);
   }
 
-  stats->bucket_count = _num_buckets;
-  stats->bucket_bytes = (_num_buckets + 1) * (sizeof(u4));
   _stats = stats;
   _compact_buckets = NULL;
   _compact_entries = NULL;
@@ -91,13 +89,13 @@
                                   "Too many entries.");
   }
 
-  Thread* THREAD = VMThread::vm_thread();
-  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-  _compact_buckets = MetadataFactory::new_array<u4>(loader_data, _num_buckets + 1, THREAD);
-  _compact_entries = MetadataFactory::new_array<u4>(loader_data, entries_space, THREAD);
+  _compact_buckets = MetaspaceShared::new_ro_array<u4>(_num_buckets + 1);
+  _compact_entries = MetaspaceShared::new_ro_array<u4>(entries_space);
 
+  _stats->bucket_count    = _num_buckets;
+  _stats->bucket_bytes    = _compact_buckets->size() * BytesPerWord;
   _stats->hashentry_count = _num_entries;
-  _stats->hashentry_bytes = entries_space * sizeof(u4);
+  _stats->hashentry_bytes = _compact_entries->size() * BytesPerWord;
 }
 
 // Write the compact table's buckets
@@ -177,12 +175,11 @@
 // Customization for dumping Symbol and String tables
 
 void CompactSymbolTableWriter::add(unsigned int hash, Symbol *symbol) {
-  address base_address = address(MetaspaceShared::shared_rs()->base());
-
-  uintx deltax = address(symbol) - base_address;
-  // The symbols are in RO space, which is smaler than MAX_SHARED_DELTA.
-  // The assert below is just to be extra cautious.
-  assert(deltax <= MAX_SHARED_DELTA, "the delta is too large to encode");
+  uintx deltax = MetaspaceShared::object_delta(symbol);
+  // When the symbols are stored into the archive, we already check that
+  // they won't be more than MAX_SHARED_DELTA from the base address, or
+  // else the dumping would have been aborted.
+  assert(deltax <= MAX_SHARED_DELTA, "must not be");
   u4 delta = u4(deltax);
 
   CompactHashtableWriter::add(hash, delta);
@@ -243,7 +240,6 @@
 
 template <class I>
 inline void SimpleCompactHashtable::iterate(const I& iterator) {
-  assert(!DumpSharedSpaces, "run-time only");
   for (u4 i = 0; i < _bucket_count; i++) {
     u4 bucket_info = _buckets[i];
     u4 bucket_offset = BUCKET_OFFSET(bucket_info);
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -32,6 +32,7 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/iterator.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
@@ -241,6 +242,20 @@
   }
 }
 
+// Used to scan and relocate the classes during CDS archive dump.
+void Dictionary::classes_do(MetaspaceClosure* it) {
+  assert(DumpSharedSpaces, "dump-time only");
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      it->push(probe->klass_addr());
+      ((SharedDictionaryEntry*)probe)->metaspace_pointers_do(it);
+    }
+  }
+}
+
+
 
 // Add a loaded class to the dictionary.
 // Readers of the SystemDictionary aren't always locked, so _buckets
@@ -342,7 +357,7 @@
 }
 
 
-void Dictionary::reorder_dictionary() {
+void Dictionary::reorder_dictionary_for_sharing() {
 
   // Copy all the dictionary entries into a single master list.
 
--- a/hotspot/src/share/vm/classfile/dictionary.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -84,6 +84,7 @@
   void classes_do(void f(InstanceKlass*));
   void classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
   void all_entries_do(void f(InstanceKlass*, ClassLoaderData*));
+  void classes_do(MetaspaceClosure* it);
 
   void unlink(BoolObjectClosure* is_alive);
   void remove_classes_in_error_state();
@@ -101,7 +102,7 @@
                              Handle protection_domain, TRAPS);
 
   // Sharing support
-  void reorder_dictionary();
+  void reorder_dictionary_for_sharing();
 
   void print_on(outputStream* st) const;
   void verify();
@@ -142,6 +143,7 @@
   void add_protection_domain(Dictionary* dict, Handle protection_domain);
 
   InstanceKlass* instance_klass() const { return literal(); }
+  InstanceKlass** klass_addr() { return (InstanceKlass**)literal_addr(); }
 
   DictionaryEntry* next() const {
     return (DictionaryEntry*)HashtableEntry<InstanceKlass*, mtClass>::next();
@@ -300,9 +302,6 @@
 
   void methods_do(void f(Method*));
 
-  // Sharing support
-  void reorder_dictionary();
-
   void verify();
 };
 #endif // SHARE_VM_CLASSFILE_DICTIONARY_HPP
--- a/hotspot/src/share/vm/classfile/klassFactory.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/klassFactory.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -74,7 +74,7 @@
         (SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
       ClassFileStream* stream = new ClassFileStream(ptr,
                                                     end_ptr - ptr,
-                                                    ent == NULL ? NULL : ent->_name,
+                                                    ent == NULL ? NULL : ent->name(),
                                                     ClassFileStream::verify);
       ClassFileParser parser(stream,
                              class_name,
@@ -229,8 +229,7 @@
       len = stream->length();
       bytes = stream->buffer();
     }
-    p = (JvmtiCachedClassFileData*)MetaspaceShared::optional_data_space_alloc(
-                    offset_of(JvmtiCachedClassFileData, data) + len);
+    p = (JvmtiCachedClassFileData*)os::malloc(offset_of(JvmtiCachedClassFileData, data) + len, mtInternal);
     p->length = len;
     memcpy(p->data, bytes, len);
     result->set_archived_class_data(p);
--- a/hotspot/src/share/vm/classfile/sharedClassUtil.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/sharedClassUtil.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,13 +52,7 @@
   }
 
   static void update_shared_classpath(ClassPathEntry *cpe,
-                                      SharedClassPathEntry* ent,
-                                      time_t timestamp,
-                                      long filesize, TRAPS) {
-    ent->_timestamp = timestamp;
-    ent->_filesize  = filesize;
-  }
-
+                                      SharedClassPathEntry* ent, TRAPS) {}
   static void initialize(TRAPS) {}
 
   inline static bool is_shared_boot_class(Klass* klass) {
--- a/hotspot/src/share/vm/classfile/stringTable.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -33,6 +33,7 @@
 #include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
@@ -731,6 +732,9 @@
 
       // add to the compact table
       writer->add(hash, new_s);
+
+      MetaspaceShared::relocate_klass_ptr(new_s);
+      MetaspaceShared::relocate_klass_ptr(new_v);
     }
   }
 
@@ -740,35 +744,33 @@
   return true;
 }
 
-void StringTable::serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
-                            size_t* space_size) {
-#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
+void StringTable::write_to_archive(GrowableArray<MemRegion> *string_space) {
+#if INCLUDE_CDS
   _shared_table.reset();
-  if (soc->writing()) {
-    if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
+  if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
       log_info(cds)(
-          "Shared strings are excluded from the archive as UseG1GC, "
-          "UseCompressedOops and UseCompressedClassPointers are required."
-          "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
-          BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
-          BOOL_TO_STR(UseCompressedClassPointers));
-    } else {
-      int num_buckets = the_table()->number_of_entries() /
-                             SharedSymbolTableBucketSize;
-      // calculation of num_buckets can result in zero buckets, we need at least one
-      CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
-                                      &MetaspaceShared::stats()->string);
+        "Shared strings are excluded from the archive as UseG1GC, "
+        "UseCompressedOops and UseCompressedClassPointers are required."
+        "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
+        BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
+        BOOL_TO_STR(UseCompressedClassPointers));
+  } else {
+    int num_buckets = the_table()->number_of_entries() /
+                           SharedSymbolTableBucketSize;
+    // calculation of num_buckets can result in zero buckets, we need at least one
+    CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
+                                    &MetaspaceShared::stats()->string);
 
-      // Copy the interned strings into the "string space" within the java heap
-      if (copy_shared_string(string_space, &writer)) {
-        for (int i = 0; i < string_space->length(); i++) {
-          *space_size += string_space->at(i).byte_size();
-        }
-        writer.dump(&_shared_table);
-      }
+    // Copy the interned strings into the "string space" within the java heap
+    if (copy_shared_string(string_space, &writer)) {
+      writer.dump(&_shared_table);
     }
   }
+#endif
+}
 
+void StringTable::serialize(SerializeClosure* soc) {
+#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
   _shared_table.set_type(CompactHashtable<oop, char>::_string_table);
   _shared_table.serialize(soc);
 
--- a/hotspot/src/share/vm/classfile/stringTable.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/stringTable.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -162,8 +162,8 @@
   static void shared_oops_do(OopClosure* f);
   static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
                                  CompactStringTableWriter* ch_table);
-  static void serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
-                        size_t* space_size);
+  static void write_to_archive(GrowableArray<MemRegion> *string_space);
+  static void serialize(SerializeClosure* soc);
 
   // Rehash the symbol table if it gets out of balance
   static void rehash_table();
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -32,6 +32,7 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
@@ -57,9 +58,9 @@
   Symbol* sym;
 
   if (DumpSharedSpaces) {
-    // Allocate all symbols to CLD shared metaspace
-    sym = new (len, ClassLoaderData::the_null_class_loader_data(), THREAD) Symbol(name, len, PERM_REFCOUNT);
-  } else if (c_heap) {
+    c_heap = false;
+  }
+  if (c_heap) {
     // refcount starts as 1
     sym = new (len, THREAD) Symbol(name, len, 1);
     assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted");
@@ -95,6 +96,18 @@
   }
 }
 
+void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) {
+  assert(DumpSharedSpaces, "called only during dump time");
+  const int n = the_table()->table_size();
+  for (int i = 0; i < n; i++) {
+    for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
+         p != NULL;
+         p = p->next()) {
+      it->push(p->literal_addr());
+    }
+  }
+}
+
 int SymbolTable::_symbols_removed = 0;
 int SymbolTable::_symbols_counted = 0;
 volatile int SymbolTable::_parallel_claimed_idx = 0;
@@ -568,10 +581,10 @@
   }
 }
 
-void SymbolTable::serialize(SerializeClosure* soc) {
+void SymbolTable::write_to_archive() {
 #if INCLUDE_CDS
-  _shared_table.reset();
-  if (soc->writing()) {
+    _shared_table.reset();
+
     int num_buckets = the_table()->number_of_entries() /
                             SharedSymbolTableBucketSize;
     CompactSymbolTableWriter writer(num_buckets,
@@ -587,19 +600,22 @@
     }
 
     writer.dump(&_shared_table);
-  }
 
-  _shared_table.set_type(CompactHashtable<Symbol*, char>::_symbol_table);
-  _shared_table.serialize(soc);
-
-  if (soc->writing()) {
     // Verify table is correct
     Symbol* sym = vmSymbols::java_lang_Object();
     const char* name = (const char*)sym->bytes();
     int len = sym->utf8_length();
     unsigned int hash = hash_symbol(name, len);
     assert(sym == _shared_table.lookup(name, hash, len), "sanity");
+#endif
+}
 
+void SymbolTable::serialize(SerializeClosure* soc) {
+#if INCLUDE_CDS
+  _shared_table.set_type(CompactHashtable<Symbol*, char>::_symbol_table);
+  _shared_table.serialize(soc);
+
+  if (soc->writing()) {
     // Sanity. Make sure we don't use the shared table at dump time
     _shared_table.reset();
   }
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -213,6 +213,7 @@
 
   // iterate over symbols
   static void symbols_do(SymbolClosure *cl);
+  static void metaspace_pointers_do(MetaspaceClosure* it);
 
   // Symbol creation
   static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) {
@@ -255,6 +256,7 @@
   static void read(const char* filename, TRAPS);
 
   // Sharing
+  static void write_to_archive();
   static void serialize(SerializeClosure* soc);
   static u4 encode_shared(Symbol* sym);
   static Symbol* decode_shared(u4 offset);
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -49,6 +49,7 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/filemap.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
@@ -1952,6 +1953,18 @@
   ResolvedMethodTable::oops_do(f);
 }
 
+// CDS: scan and relocate all classes in the system dictionary.
+void SystemDictionary::classes_do(MetaspaceClosure* it) {
+  ClassLoaderData::the_null_class_loader_data()->dictionary()->classes_do(it);
+}
+
+// CDS: scan and relocate all classes referenced by _well_known_klasses[].
+void SystemDictionary::well_known_klasses_do(MetaspaceClosure* it) {
+  for (int id = FIRST_WKID; id < WKID_LIMIT; id++) {
+    it->push(well_known_klass_addr((WKID)id));
+  }
+}
+
 void SystemDictionary::methods_do(void f(Method*)) {
   // Walk methods in loaded classes
   ClassLoaderDataGraph::methods_do(f);
@@ -2793,18 +2806,23 @@
   return _pd_cache_table->get(protection_domain);
 }
 
-
-void SystemDictionary::reorder_dictionary() {
-  ClassLoaderData::the_null_class_loader_data()->dictionary()->reorder_dictionary();
+void SystemDictionary::reorder_dictionary_for_sharing() {
+  ClassLoaderData::the_null_class_loader_data()->dictionary()->reorder_dictionary_for_sharing();
+}
+
+size_t SystemDictionary::count_bytes_for_buckets() {
+  return ClassLoaderData::the_null_class_loader_data()->dictionary()->count_bytes_for_buckets();
 }
 
-
-void SystemDictionary::copy_buckets(char** top, char* end) {
+size_t SystemDictionary::count_bytes_for_table() {
+  return ClassLoaderData::the_null_class_loader_data()->dictionary()->count_bytes_for_table();
+}
+
+void SystemDictionary::copy_buckets(char* top, char* end) {
   ClassLoaderData::the_null_class_loader_data()->dictionary()->copy_buckets(top, end);
 }
 
-
-void SystemDictionary::copy_table(char** top, char* end) {
+void SystemDictionary::copy_table(char* top, char* end) {
   ClassLoaderData::the_null_class_loader_data()->dictionary()->copy_table(top, end);
 }
 
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -353,7 +353,9 @@
                                                            Handle class_loader,
                                                            TRAPS);
 
+  static void classes_do(MetaspaceClosure* it);
   // Iterate over all methods in all klasses
+
   static void methods_do(void f(Method*));
 
   // Garbage collection support
@@ -382,9 +384,11 @@
 
 public:
   // Sharing support.
-  static void reorder_dictionary();
-  static void copy_buckets(char** top, char* end);
-  static void copy_table(char** top, char* end);
+  static void reorder_dictionary_for_sharing();
+  static size_t count_bytes_for_buckets();
+  static size_t count_bytes_for_table();
+  static void copy_buckets(char* top, char* end);
+  static void copy_table(char* top, char* end);
   static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
                                     int number_of_entries);
   // Printing
@@ -442,6 +446,7 @@
     assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob");
     return &_well_known_klasses[id];
   }
+  static void well_known_klasses_do(MetaspaceClosure* it);
 
   // Local definition for direct access to the private array:
   #define WK_KLASS(name) _well_known_klasses[SystemDictionary::WK_KLASS_ENUM_NAME(name)]
--- a/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -91,4 +91,9 @@
                                               TRAPS) {}
 };
 
+class SharedDictionaryEntry : public DictionaryEntry {
+public:
+  void metaspace_pointers_do(MetaspaceClosure* it) {}
+};
+
 #endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/hotspot/src/share/vm/classfile/vmSymbols.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -26,6 +26,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compilerDirectives.hpp"
 #include "memory/oopFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvm.h"
 #include "runtime/handles.inline.hpp"
@@ -183,6 +184,15 @@
   }
 }
 
+void vmSymbols::metaspace_pointers_do(MetaspaceClosure *it) {
+  for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
+    it->push(&_symbols[index]);
+  }
+  for (int i = 0; i < T_VOID+1; i++) {
+    it->push(&_type_signatures[i]);
+  }
+}
+
 void vmSymbols::serialize(SerializeClosure* soc) {
   soc->do_region((u_char*)&_symbols[FIRST_SID],
                  (SID_LIMIT - FIRST_SID) * sizeof(_symbols[0]));
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1511,6 +1511,7 @@
 
   // Sharing support
   static void symbols_do(SymbolClosure* f);
+  static void metaspace_pointers_do(MetaspaceClosure *it);
   static void serialize(SerializeClosure* soc);
 
   static Symbol* type_signature(BasicType t) {
--- a/hotspot/src/share/vm/memory/allocation.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,10 +47,10 @@
 void  _ValueObj::operator delete [](void* p)          { ShouldNotCallThis(); }
 
 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
-                                 size_t word_size, bool read_only,
+                                 size_t word_size,
                                  MetaspaceObj::Type type, TRAPS) throw() {
   // Klass has it's own operator new
-  return Metaspace::allocate(loader_data, word_size, read_only, type, THREAD);
+  return Metaspace::allocate(loader_data, word_size, type, THREAD);
 }
 
 bool MetaspaceObj::is_shared() const {
--- a/hotspot/src/share/vm/memory/allocation.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -239,6 +239,7 @@
 //
 
 class ClassLoaderData;
+class MetaspaceClosure;
 
 class MetaspaceObj {
  public:
@@ -260,9 +261,8 @@
   f(MethodData) \
   f(ConstantPool) \
   f(ConstantPoolCache) \
-  f(Annotation) \
-  f(MethodCounters) \
-  f(Deallocated)
+  f(Annotations) \
+  f(MethodCounters)
 
 #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
 #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
@@ -294,10 +294,15 @@
   }
 
   void* operator new(size_t size, ClassLoaderData* loader_data,
-                     size_t word_size, bool read_only,
+                     size_t word_size,
                      Type type, Thread* thread) throw();
                      // can't use TRAPS from this header file.
   void operator delete(void* p) { ShouldNotCallThis(); }
+
+  // Declare a *static* method with the same signature in any subclass of MetaspaceObj
+  // that should be read-only by default. See symbol.hpp for an example. This function
+  // is used by the templates in metaspaceClosure.hpp
+  static bool is_read_only_by_default() { return false; }
 };
 
 // Base class for classes that constitute name spaces.
--- a/hotspot/src/share/vm/memory/filemap.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -38,6 +38,8 @@
 #include "logging/logMessage.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/objArrayOop.hpp"
 #include "prims/jvm.h"
@@ -196,114 +198,97 @@
   get_header_version(_jvm_ident);
 }
 
-void FileMapInfo::allocate_classpath_entry_table() {
-  int bytes = 0;
-  int count = 0;
-  char* strptr = NULL;
-  char* strptr_max = NULL;
-  Thread* THREAD = Thread::current();
-
-  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-  size_t entry_size = SharedClassUtil::shared_class_path_entry_size();
-
-  for (int pass=0; pass<2; pass++) {
+void SharedClassPathEntry::init(const char* name, TRAPS) {
+  _timestamp = 0;
+  _filesize  = 0;
 
-    // Process the modular java runtime image first
-    ClassPathEntry* jrt_entry = ClassLoader::get_jrt_entry();
-    assert(jrt_entry != NULL,
-           "No modular java runtime image present when allocating the CDS classpath entry table");
-    const char *name = jrt_entry->name();
-    int name_bytes = (int)(strlen(name) + 1);
-    if (pass == 0) {
-      count++;
-      bytes += (int)entry_size;
-      bytes += name_bytes;
-      log_info(class, path)("add main shared path for modular java runtime image %s", name);
+  struct stat st;
+  if (os::stat(name, &st) == 0) {
+    if ((st.st_mode & S_IFMT) == S_IFDIR) {
+      if (!os::dir_is_empty(name)) {
+        ClassLoader::exit_with_path_failure(
+                  "Cannot have non-empty directory in archived classpaths", name);
+      }
+      _is_dir = true;
     } else {
-      // The java runtime image is always in slot 0 on the shared class path.
-      SharedClassPathEntry* ent = shared_classpath(0);
-      struct stat st;
-      if (os::stat(name, &st) == 0) {
-        ent->_timestamp = st.st_mtime;
-        ent->_filesize = st.st_size;
-      }
-      if (ent->_filesize == 0) {
-        // unknown
-        ent->_filesize = -2;
-      }
-      ent->_name = strptr;
-      assert(strptr + name_bytes <= strptr_max, "miscalculated buffer size");
-      strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
-      strptr += name_bytes;
+      _is_dir = false;
+      _timestamp = st.st_mtime;
+      _filesize = st.st_size;
     }
-
-    // Walk the appended entries, which includes the entries added for the classpath.
-    ClassPathEntry *cpe = ClassLoader::classpath_entry(1);
+  } else {
+    // The file/dir must exist, or it would not have been added
+    // into ClassLoader::classpath_entry().
+    //
+    // If we can't access a jar file in the boot path, then we can't
+    // make assumptions about where classes get loaded from.
+    FileMapInfo::fail_stop("Unable to open file %s.", name);
+  }
 
-    // Since the java runtime image is always in slot 0 on the shared class path, the
-    // appended entries are started at slot 1 immediately after.
-    for (int cur_entry = 1 ; cpe != NULL; cpe = cpe->next(), cur_entry++) {
-      const char *name = cpe->name();
-      int name_bytes = (int)(strlen(name) + 1);
-      assert(!cpe->is_jrt(), "A modular java runtime image is present on the list of appended entries");
-
-      if (pass == 0) {
-        count ++;
-        bytes += (int)entry_size;
-        bytes += name_bytes;
-        log_info(class, path)("add main shared path (%s) %s", (cpe->is_jar_file() ? "jar" : "dir"), name);
-      } else {
-        SharedClassPathEntry* ent = shared_classpath(cur_entry);
-        if (cpe->is_jar_file()) {
-          struct stat st;
-          if (os::stat(name, &st) != 0) {
-            // The file/dir must exist, or it would not have been added
-            // into ClassLoader::classpath_entry().
-            //
-            // If we can't access a jar file in the boot path, then we can't
-            // make assumptions about where classes get loaded from.
-            FileMapInfo::fail_stop("Unable to open jar file %s.", name);
-          }
+  size_t len = strlen(name) + 1;
+  _name = MetadataFactory::new_array<char>(ClassLoaderData::the_null_class_loader_data(), (int)len, THREAD);
+  strcpy(_name->data(), name);
+}
 
-          EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
-          SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
-        } else {
-          struct stat st;
-          if (os::stat(name, &st) == 0) {
-            if ((st.st_mode & S_IFMT) == S_IFDIR) {
-              if (!os::dir_is_empty(name)) {
-                ClassLoader::exit_with_path_failure(
-                  "Cannot have non-empty directory in archived classpaths", name);
-              }
-              ent->_filesize = -1;
-            }
-          }
-          if (ent->_filesize == 0) {
-            // unknown
-            ent->_filesize = -2;
-          }
-        }
-        ent->_name = strptr;
-        if (strptr + name_bytes <= strptr_max) {
-          strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
-          strptr += name_bytes;
-        } else {
-          assert(0, "miscalculated buffer size");
-        }
+bool SharedClassPathEntry::validate() {
+  struct stat st;
+  const char* name = this->name();
+  bool ok = true;
+  log_info(class, path)("checking shared classpath entry: %s", name);
+  if (os::stat(name, &st) != 0) {
+    FileMapInfo::fail_continue("Required classpath entry does not exist: %s", name);
+    ok = false;
+  } else if (is_dir()) {
+    if (!os::dir_is_empty(name)) {
+      FileMapInfo::fail_continue("directory is not empty: %s", name);
+      ok = false;
+    }
+  } else if (is_jar_or_bootimage()) {
+    if (_timestamp != st.st_mtime ||
+        _filesize != st.st_size) {
+      ok = false;
+      if (PrintSharedArchiveAndExit) {
+        FileMapInfo::fail_continue(_timestamp != st.st_mtime ?
+                                   "Timestamp mismatch" :
+                                   "File size mismatch");
+      } else {
+        FileMapInfo::fail_continue("A jar/jimage file is not the one used while building"
+                                   " the shared archive file: %s", name);
       }
     }
+  }
+  return ok;
+}
 
-    if (pass == 0) {
+void SharedClassPathEntry::metaspace_pointers_do(MetaspaceClosure* it) {
+  it->push(&_name);
+  it->push(&_manifest);
+}
+
+void FileMapInfo::allocate_classpath_entry_table() {
+  Thread* THREAD = Thread::current();
+  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  size_t entry_size = SharedClassUtil::shared_class_path_entry_size(); // assert ( should be 8 byte aligned??)
+  int num_entries = ClassLoader::number_of_classpath_entries();
+  size_t bytes = entry_size * num_entries;
+
+  _classpath_entry_table = MetadataFactory::new_array<u8>(loader_data, (int)(bytes + 7 / 8), THREAD);
+  _classpath_entry_table_size = num_entries;
+  _classpath_entry_size = entry_size;
+
+  assert(ClassLoader::get_jrt_entry() != NULL,
+         "No modular java runtime image present when allocating the CDS classpath entry table");
+
+  for (int i=0; i<num_entries; i++) {
+    ClassPathEntry *cpe = ClassLoader::classpath_entry(i);
+    const char* type = ((i == 0) ? "jrt" : (cpe->is_jar_file() ? "jar" : "dir"));
+
+    log_info(class, path)("add main shared path (%s) %s", type, cpe->name());
+    SharedClassPathEntry* ent = shared_classpath(i);
+    ent->init(cpe->name(), THREAD);
+
+    if (i > 0) { // No need to do jimage.
       EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
-      Array<u8>* arr = MetadataFactory::new_array<u8>(loader_data, (bytes + 7)/8, THREAD);
-      strptr = (char*)(arr->data());
-      strptr_max = strptr + bytes;
-      SharedClassPathEntry* table = (SharedClassPathEntry*)strptr;
-      strptr += entry_size * count;
-
-      _classpath_entry_table_size = count;
-      _classpath_entry_table = table;
-      _classpath_entry_size = entry_size;
+      SharedClassUtil::update_shared_classpath(cpe, ent, THREAD);
     }
   }
 }
@@ -315,44 +300,19 @@
 
   _classpath_entry_table = _header->_classpath_entry_table;
   _classpath_entry_size = _header->_classpath_entry_size;
+  _classpath_entry_table_size = _header->_classpath_entry_table_size;
 
   for (int i=0; i<count; i++) {
-    SharedClassPathEntry* ent = shared_classpath(i);
-    struct stat st;
-    const char* name = ent->_name;
-    bool ok = true;
-    log_info(class, path)("checking shared classpath entry: %s", name);
-    if (os::stat(name, &st) != 0) {
-      fail_continue("Required classpath entry does not exist: %s", name);
-      ok = false;
-    } else if (ent->is_dir()) {
-      if (!os::dir_is_empty(name)) {
-        fail_continue("directory is not empty: %s", name);
-        ok = false;
-      }
-    } else if (ent->is_jar_or_bootimage()) {
-      if (ent->_timestamp != st.st_mtime ||
-          ent->_filesize != st.st_size) {
-        ok = false;
-        if (PrintSharedArchiveAndExit) {
-          fail_continue(ent->_timestamp != st.st_mtime ?
-                        "Timestamp mismatch" :
-                        "File size mismatch");
-        } else {
-          fail_continue("A jar/jimage file is not the one used while building"
-                        " the shared archive file: %s", name);
-        }
-      }
-    }
-    if (ok) {
+    if (shared_classpath(i)->validate()) {
       log_info(class, path)("ok");
     } else if (!PrintSharedArchiveAndExit) {
       _validating_classpath_entry_table = false;
+      _classpath_entry_table = NULL;
+      _classpath_entry_table_size = 0;
       return false;
     }
   }
 
-  _classpath_entry_table_size = _header->_classpath_entry_table_size;
   _validating_classpath_entry_table = false;
   return true;
 }
@@ -390,7 +350,7 @@
 
   size_t len = lseek(fd, 0, SEEK_END);
   struct FileMapInfo::FileMapHeader::space_info* si =
-    &_header->_space[MetaspaceShared::mc];
+    &_header->_space[MetaspaceShared::last_valid_region];
   if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
     fail_continue("The shared archive file has been truncated.");
     return false;
@@ -469,28 +429,16 @@
 }
 
 
-// Dump shared spaces to file.
-
-void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
-  align_file_position();
-  size_t used = space->used_bytes_slow(Metaspace::NonClassType);
-  size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
-  write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
-}
-
-
 // Dump region to file.
 
 void FileMapInfo::write_region(int region, char* base, size_t size,
-                               size_t capacity, bool read_only,
-                               bool allow_exec) {
+                               bool read_only, bool allow_exec) {
   struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region];
 
   if (_file_open) {
     guarantee(si->_file_offset == _file_offset, "file offset mismatch.");
-    log_info(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(6)
-                  " bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(6),
+    log_info(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(08)
+                  " bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08),
                   region, size, p2i(base), _file_offset);
   } else {
     si->_file_offset = _file_offset;
@@ -506,7 +454,6 @@
     si->_addr._base = base;
   }
   si->_used = size;
-  si->_capacity = capacity;
   si->_read_only = read_only;
   si->_allow_exec = allow_exec;
   si->_crc = ClassLoader::crc32(0, base, (jint)size);
@@ -523,25 +470,62 @@
 // The non-empty portion of the first region is written into the archive as one string
 // region. The rest are consecutive full GC regions if they exist, which can be written
 // out in one chunk as another string region.
-void FileMapInfo::write_string_regions(GrowableArray<MemRegion> *regions) {
+//
+// Here's the mapping from (GrowableArray<MemRegion> *regions) -> (metaspace string regions).
+//   + We have 1 or more heap regions: r0, r1, r2 ..... rn
+//   + We have 2 metaspace string regions: s0 and s1
+//
+// If there's a single heap region (r0), then s0 == r0, and s1 is empty.
+// Otherwise:
+//
+// "X" represented space that's occupied by heap objects.
+// "_" represented unused spaced in the heap region.
+//
+//
+//    |r0        | r1  | r2 | ...... | rn |
+//    |XXXXXX|__ |XXXXX|XXXX|XXXXXXXX|XXXX|
+//    |<-s0->|   |<- s1 ----------------->|
+//            ^^^
+//             |
+//             +-- unmapped space
+void FileMapInfo::write_string_regions(GrowableArray<MemRegion> *regions,
+                                       char** st0_start, char** st0_top, char** st0_end,
+                                       char** st1_start, char** st1_top, char** st1_end) {
+  *st0_start = *st0_top = *st0_end = NULL;
+  *st1_start = *st1_top = *st1_end = NULL;
+
+  assert(MetaspaceShared::max_strings == 2, "this loop doesn't work for any other value");
   for (int i = MetaspaceShared::first_string;
            i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
     char* start = NULL;
     size_t size = 0;
-    if (regions->is_nonempty()) {
+    int len = regions->length();
+    if (len > 0) {
       if (i == MetaspaceShared::first_string) {
         MemRegion first = regions->first();
         start = (char*)first.start();
         size = first.byte_size();
+        *st0_start = start;
+        *st0_top = start + size;
+        if (len > 1) {
+          *st0_end = (char*)regions->at(1).start();
+        } else {
+          *st0_end = start + size;
+        }
       } else {
-        int len = regions->length();
+        assert(i == MetaspaceShared::first_string + 1, "must be");
         if (len > 1) {
           start = (char*)regions->at(1).start();
           size = (char*)regions->at(len - 1).end() - start;
+          *st1_start = start;
+          *st1_top = start + size;
+          *st1_end = start + size;
         }
       }
     }
-    write_region(i, start, size, size, false, false);
+    log_info(cds)("String region %d " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
+                  i, p2i(start), p2i(start + size), size);
+    write_region(i, start, size, false, false);
   }
 }
 
@@ -609,7 +593,7 @@
 // JVM/TI RedefineClasses() support:
 // Remap the shared readonly space to shared readwrite, private.
 bool FileMapInfo::remap_shared_readonly_as_readwrite() {
-  int idx = 0;
+  int idx = MetaspaceShared::ro;
   struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[idx];
   if (!si->_read_only) {
     // the space is already readwrite so we are done
@@ -639,10 +623,8 @@
 
 // Map the whole region at once, assumed to be allocated contiguously.
 ReservedSpace FileMapInfo::reserve_shared_memory() {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
   char* requested_addr = _header->region_addr(0);
-
-  size_t size = FileMapInfo::shared_spaces_size();
+  size_t size = FileMapInfo::core_spaces_size();
 
   // Reserve the space first, then map otherwise map will go right over some
   // other reserved memory (like the code cache).
@@ -862,9 +844,16 @@
   }
 }
 
+void FileMapInfo::metaspace_pointers_do(MetaspaceClosure* it) {
+  it->push(&_classpath_entry_table);
+  for (int i=0; i<_classpath_entry_table_size; i++) {
+    shared_classpath(i)->metaspace_pointers_do(it);
+  }
+}
+
 
 FileMapInfo* FileMapInfo::_current_info = NULL;
-SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
+Array<u8>* FileMapInfo::_classpath_entry_table = NULL;
 int FileMapInfo::_classpath_entry_table_size = 0;
 size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
 bool FileMapInfo::_validating_classpath_entry_table = false;
@@ -890,11 +879,6 @@
   if (!validate_header()) {
     return false;
   }
-
-  SharedReadOnlySize =  _header->_space[0]._capacity;
-  SharedReadWriteSize = _header->_space[1]._capacity;
-  SharedMiscDataSize =  _header->_space[2]._capacity;
-  SharedMiscCodeSize =  _header->_space[3]._capacity;
   return true;
 }
 
@@ -1001,9 +985,12 @@
   return false;
 }
 
-// Check if a given address is within one of the shared regions (ro, rw, md, mc)
+// Check if a given address is within one of the shared regions ( ro, rw, mc or md)
 bool FileMapInfo::is_in_shared_region(const void* p, int idx) {
-  assert((idx >= MetaspaceShared::ro) && (idx <= MetaspaceShared::mc), "invalid region index");
+  assert(idx == MetaspaceShared::ro ||
+         idx == MetaspaceShared::rw ||
+         idx == MetaspaceShared::mc ||
+         idx == MetaspaceShared::md, "invalid region index");
   char* base = _header->region_addr(idx);
   if (p >= base && p < base + _header->_space[idx]._used) {
     return true;
--- a/hotspot/src/share/vm/memory/filemap.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -42,24 +42,33 @@
 
 static const int JVM_IDENT_MAX = 256;
 
-class Metaspace;
-
 class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
-public:
-  const char *_name;
+protected:
+  bool   _is_dir;
   time_t _timestamp;          // jar/jimage timestamp,  0 if is directory or other
   long   _filesize;           // jar/jimage file size, -1 if is directory, -2 if other
+  Array<char>* _name;
+  Array<u1>*   _manifest;
+
+public:
+  void init(const char* name, TRAPS);
+  void metaspace_pointers_do(MetaspaceClosure* it);
+  bool validate();
 
   // The _timestamp only gets set for jar files and "modules" jimage.
   bool is_jar_or_bootimage() {
     return _timestamp != 0;
   }
-  bool is_dir() {
-    return _filesize == -1;
+  bool is_dir() { return _is_dir; }
+  bool is_jrt() { return ClassLoader::is_jrt(name()); }
+  time_t timestamp() const { return _timestamp; }
+  long   filesize()  const { return _filesize; }
+  const char* name() const { return _name->data(); }
+  const char* manifest() const {
+    return (_manifest == NULL) ? NULL : (const char*)_manifest->data();
   }
-
-  bool is_jrt() {
-    return ClassLoader::is_jrt(_name);
+  int manifest_size() const {
+    return (_manifest == NULL) ? 0 : _manifest->length();
   }
 };
 
@@ -68,7 +77,7 @@
   friend class ManifestStream;
   enum {
     _invalid_version = -1,
-    _current_version = 2
+    _current_version = 3
   };
 
   bool  _file_open;
@@ -76,7 +85,7 @@
   size_t  _file_offset;
 
 private:
-  static SharedClassPathEntry* _classpath_entry_table;
+  static Array<u8>*            _classpath_entry_table;
   static int                   _classpath_entry_table_size;
   static size_t                _classpath_entry_size;
   static bool                  _validating_classpath_entry_table;
@@ -110,8 +119,11 @@
     int     _narrow_klass_shift;      // save narrow klass base and shift
     address _narrow_klass_base;
     char*   _misc_data_patching_start;
+    char*   _read_only_tables_start;
     address _cds_i2i_entry_code_buffers;
     size_t  _cds_i2i_entry_code_buffers_size;
+    size_t  _core_spaces_size;        // number of bytes allocated by the core spaces
+                                      // (mc, md, ro, rw and od).
 
     struct space_info {
       int    _crc;           // crc checksum of the current space
@@ -121,7 +133,6 @@
         intx   _offset;      // offset from the compressed oop encoding base, only used
                              // by string space
       } _addr;
-      size_t _capacity;      // for validity checking
       size_t _used;          // for setting space top on read
       bool   _read_only;     // read only space?
       bool   _allow_exec;    // executable code in space?
@@ -158,7 +169,7 @@
     // loading failures during runtime.
     int _classpath_entry_table_size;
     size_t _classpath_entry_size;
-    SharedClassPathEntry* _classpath_entry_table;
+    Array<u8>* _classpath_entry_table;
 
     char* region_addr(int idx);
 
@@ -177,6 +188,7 @@
   bool  init_from_file(int fd);
   void  align_file_position();
   bool  validate_header_impl();
+  static void metaspace_pointers_do(MetaspaceClosure* it);
 
 public:
   FileMapInfo();
@@ -195,10 +207,11 @@
   uintx  max_heap_size()              { return _header->_max_heap_size; }
   address narrow_klass_base() const   { return _header->_narrow_klass_base; }
   int     narrow_klass_shift() const  { return _header->_narrow_klass_shift; }
-  size_t space_capacity(int i)        { return _header->_space[i]._capacity; }
   struct FileMapHeader* header()      { return _header; }
   char* misc_data_patching_start()            { return _header->_misc_data_patching_start; }
   void set_misc_data_patching_start(char* p)  { _header->_misc_data_patching_start = p; }
+  char* read_only_tables_start()              { return _header->_read_only_tables_start; }
+  void set_read_only_tables_start(char* p)    { _header->_read_only_tables_start = p; }
 
   address cds_i2i_entry_code_buffers() {
     return _header->_cds_i2i_entry_code_buffers;
@@ -212,6 +225,8 @@
   void set_cds_i2i_entry_code_buffers_size(size_t s) {
     _header->_cds_i2i_entry_code_buffers_size = s;
   }
+  void set_core_spaces_size(size_t s)    {  _header->_core_spaces_size = s; }
+  size_t core_spaces_size()              { return _header->_core_spaces_size; }
 
   static FileMapInfo* current_info() {
     CDS_ONLY(return _current_info;)
@@ -225,10 +240,11 @@
   bool  open_for_read();
   void  open_for_write();
   void  write_header();
-  void  write_space(int i, Metaspace* space, bool read_only);
   void  write_region(int region, char* base, size_t size,
-                     size_t capacity, bool read_only, bool allow_exec);
-  void  write_string_regions(GrowableArray<MemRegion> *regions);
+                     bool read_only, bool allow_exec);
+  void  write_string_regions(GrowableArray<MemRegion> *regions,
+                             char** s0_start, char** s0_top, char** s0_end,
+                             char** s1_start, char** s1_top, char** s1_end);
   void  write_bytes(const void* buffer, int count);
   void  write_bytes_aligned(const void* buffer, int count);
   char* map_region(int i);
@@ -255,29 +271,6 @@
   bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
   void print_shared_spaces() NOT_CDS_RETURN;
 
-  // The ro+rw+md+mc spaces size
-  static size_t core_spaces_size() {
-    return align_up((SharedReadOnlySize + SharedReadWriteSize +
-                     SharedMiscDataSize + SharedMiscCodeSize),
-                     os::vm_allocation_granularity());
-  }
-
-  // The estimated optional space size.
-  //
-  // Currently the optional space only has archived class bytes.
-  // The core_spaces_size is the size of all class metadata, which is a good
-  // estimate of the total class bytes to be archived. Only the portion
-  // containing data is written out to the archive and mapped at runtime.
-  // There is no memory waste due to unused portion in optional space.
-  static size_t optional_space_size() {
-    return core_spaces_size();
-  }
-
-  // Total shared_spaces size includes the ro, rw, md, mc and od spaces
-  static size_t shared_spaces_size() {
-    return core_spaces_size() + optional_space_size();
-  }
-
   // Stop CDS sharing and unmap CDS regions.
   static void stop_sharing_and_unmap(const char* msg);
 
@@ -288,13 +281,14 @@
     if (index < 0) {
       return NULL;
     }
-    char* p = (char*)_classpath_entry_table;
+    assert(index < _classpath_entry_table_size, "sanity");
+    char* p = (char*)_classpath_entry_table->data();
     p += _classpath_entry_size * index;
     return (SharedClassPathEntry*)p;
   }
   static const char* shared_classpath_name(int index) {
     assert(index >= 0, "Sanity");
-    return shared_classpath(index)->_name;
+    return shared_classpath(index)->name();
   }
 
   static int get_number_of_share_classpaths() {
--- a/hotspot/src/share/vm/memory/metadataFactory.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/metadataFactory.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -36,7 +36,7 @@
   static Array<T>* new_array(ClassLoaderData* loader_data, int length, TRAPS) {
     // The "true" argument is because all metadata arrays are read only when
     // dumped to the shared archive
-    return new (loader_data, length, /*read_only*/true, THREAD) Array<T>(length);
+    return new (loader_data, length, THREAD) Array<T>(length);
   }
 
   template <typename T>
@@ -49,47 +49,22 @@
   }
 
   template <typename T>
-  static Array<T>* new_writeable_array(ClassLoaderData* loader_data, int length, TRAPS) {
-    return new (loader_data, length, /*read_only*/false, THREAD) Array<T>(length);
-  }
-
-  template <typename T>
-  static Array<T>* new_writeable_array(ClassLoaderData* loader_data, int length, T value, TRAPS) {
-    Array<T>* array = new_writeable_array<T>(loader_data, length, CHECK_NULL);
-    for (int i = 0; i < length; i++) {
-      array->at_put(i, value);
-    }
-    return array;
-  }
-
-  template <typename T>
   static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
     if (data != NULL) {
       assert(loader_data != NULL, "shouldn't pass null");
       assert(!data->is_shared(), "cannot deallocate array in shared spaces");
       int size = data->size();
-      if (DumpSharedSpaces) {
-        loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false);
-      } else {
-        loader_data->metaspace_non_null()->deallocate((MetaWord*)data, size, false);
-      }
+      loader_data->metaspace_non_null()->deallocate((MetaWord*)data, size, false);
     }
   }
 
   // Deallocation method for metadata
   template <class T>
   static void free_metadata(ClassLoaderData* loader_data, T md) {
-    if (DumpSharedSpaces) {
-      // FIXME: the freeing code is buggy, especially when -Xlog:cds is enabled.
-      // Disable for now -- this means if you specify bad classes in your classlist you
-      // may have wasted space inside the archive.
-      return;
-    }
     if (md != NULL) {
       assert(loader_data != NULL, "shouldn't pass null");
       int size = md->size();
       // Call metadata's deallocate function which will call deallocate fields
-      assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive");
       assert(!md->on_stack(), "can't deallocate things on stack");
       assert(!md->is_shared(), "cannot deallocate if in shared spaces");
       md->deallocate_contents(loader_data);
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -65,6 +65,8 @@
 size_t Metaspace::_compressed_class_space_size;
 const MetaspaceTracer* Metaspace::_tracer = NULL;
 
+DEBUG_ONLY(bool Metaspace::_frozen = false;)
+
 // Used in declarations in SpaceManager and ChunkManager
 enum ChunkIndex {
   ZeroIndex = 0,
@@ -502,34 +504,8 @@
   // byte_size is the size of the associated virtualspace.
 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   assert_is_aligned(bytes, Metaspace::reserve_alignment());
-
-#if INCLUDE_CDS
-  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
-  // configurable address, generally at the top of the Java heap so other
-  // memory addresses don't conflict.
-  if (DumpSharedSpaces) {
-    bool large_pages = false; // No large pages when dumping the CDS archive.
-    char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
-
-    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
-    if (_rs.is_reserved()) {
-      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
-    } else {
-      // Get a mmap region anywhere if the SharedBaseAddress fails.
-      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
-    }
-    if (!_rs.is_reserved()) {
-      vm_exit_during_initialization("Unable to allocate memory for shared space",
-        err_msg(SIZE_FORMAT " bytes.", bytes));
-    }
-    MetaspaceShared::initialize_shared_rs(&_rs);
-  } else
-#endif
-  {
-    bool large_pages = should_commit_large_pages_when_reserving(bytes);
-
-    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
-  }
+  bool large_pages = should_commit_large_pages_when_reserving(bytes);
+  _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 
   if (_rs.is_reserved()) {
     assert(_rs.base() != NULL, "Catch if we get a NULL address");
@@ -2148,8 +2124,6 @@
   if (is_class()) {
     switch (type) {
     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
-    case Metaspace::ROMetaspaceType:         requested = ClassSpecializedChunk; break;
-    case Metaspace::ReadWriteMetaspaceType:  requested = ClassSpecializedChunk; break;
     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
     default:                                 requested = ClassSmallChunk; break;
@@ -2157,8 +2131,6 @@
   } else {
     switch (type) {
     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
-    case Metaspace::ROMetaspaceType:         requested = SharedReadOnlySize / wordSize; break;
-    case Metaspace::ReadWriteMetaspaceType:  requested = SharedReadWriteSize / wordSize; break;
     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
     default:                                 requested = SmallChunk; break;
@@ -2651,15 +2623,6 @@
   // Is there space in the current chunk?
   MetaWord* result = NULL;
 
-  // For DumpSharedSpaces, only allocate out of the current chunk which is
-  // never null because we gave it the size we wanted.   Caller reports out
-  // of memory if this returns null.
-  if (DumpSharedSpaces) {
-    assert(current_chunk() != NULL, "should never happen");
-    inc_used_metrics(word_size);
-    return current_chunk()->allocate(word_size); // caller handles null result
-  }
-
   if (current_chunk() != NULL) {
     result = current_chunk()->allocate(word_size);
   }
@@ -3113,6 +3076,7 @@
 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 
 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
+  assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
   // narrow_klass_base is the lower of the metaspace base and the cds base
   // (if cds is enabled).  The narrow_klass_shift depends on the distance
@@ -3121,7 +3085,7 @@
   address higher_address;
 #if INCLUDE_CDS
   if (UseSharedSpaces) {
-    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
+    higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
                           (address)(metaspace_base + compressed_class_space_size()));
     lower_base = MIN2(metaspace_base, cds_base);
   } else
@@ -3155,7 +3119,7 @@
   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   address lower_base = MIN2((address)metaspace_base, cds_base);
-  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
+  address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
                                 (address)(metaspace_base + compressed_class_space_size()));
   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
 }
@@ -3163,6 +3127,7 @@
 
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
+  assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
   assert(using_class_space(), "called improperly");
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
@@ -3358,116 +3323,73 @@
 void Metaspace::global_initialize() {
   MetaspaceGC::initialize();
 
-  // Initialize the alignment for shared spaces.
-  int max_alignment = os::vm_allocation_granularity();
-  size_t cds_total = 0;
-
-  MetaspaceShared::set_max_alignment(max_alignment);
-
+#if INCLUDE_CDS
   if (DumpSharedSpaces) {
-#if INCLUDE_CDS
-    MetaspaceShared::estimate_regions_size();
-
-    SharedReadOnlySize  = align_up(SharedReadOnlySize,  max_alignment);
-    SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
-    SharedMiscDataSize  = align_up(SharedMiscDataSize,  max_alignment);
-    SharedMiscCodeSize  = align_up(SharedMiscCodeSize,  max_alignment);
-
-    // Initialize with the sum of the shared space sizes.  The read-only
-    // and read write metaspace chunks will be allocated out of this and the
-    // remainder is the misc code and data chunks.
-    cds_total = FileMapInfo::shared_spaces_size();
-    cds_total = align_up(cds_total, _reserve_alignment);
-    _space_list = new VirtualSpaceList(cds_total/wordSize);
-    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
-
-    if (!_space_list->initialization_succeeded()) {
-      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
-    }
-
+    MetaspaceShared::initialize_shared_rs();
+  } else if (UseSharedSpaces) {
+    // If using shared space, open the file that contains the shared space
+    // and map in the memory before initializing the rest of metaspace (so
+    // the addresses don't conflict)
+    address cds_address = NULL;
+    FileMapInfo* mapinfo = new FileMapInfo();
+
+    // Open the shared archive file, read and validate the header. If
+    // initialization fails, shared spaces [UseSharedSpaces] are
+    // disabled and the file is closed.
+    // Map in spaces now also
+    if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
+      size_t cds_total = MetaspaceShared::core_spaces_size();
+      cds_address = (address)mapinfo->header()->region_addr(0);
 #ifdef _LP64
-    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
-      vm_exit_during_initialization("Unable to dump shared archive.",
-          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
-                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
-                  "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
-                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
+      if (using_class_space()) {
+        char* cds_end = (char*)(cds_address + cds_total);
+        cds_end = (char *)align_up(cds_end, _reserve_alignment);
+        // If UseCompressedClassPointers is set then allocate the metaspace area
+        // above the heap and above the CDS area (if it exists).
+        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
+        // Map the shared string space after compressed pointers
+        // because it relies on compressed class pointers setting to work
+        mapinfo->map_string_regions();
+      }
+#endif // _LP64
+    } else {
+      assert(!mapinfo->is_open() && !UseSharedSpaces,
+             "archive file not closed or shared spaces not disabled.");
     }
-
-    // Set the compressed klass pointer base so that decoding of these pointers works
-    // properly when creating the shared archive.
-    assert(UseCompressedOops && UseCompressedClassPointers,
-      "UseCompressedOops and UseCompressedClassPointers must be set");
-    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
-    log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
-                                     p2i(_space_list->current_virtual_space()->bottom()));
-
-    Universe::set_narrow_klass_shift(0);
-#endif // _LP64
-#endif // INCLUDE_CDS
-  } else {
-#if INCLUDE_CDS
-    if (UseSharedSpaces) {
-      // If using shared space, open the file that contains the shared space
-      // and map in the memory before initializing the rest of metaspace (so
-      // the addresses don't conflict)
-      address cds_address = NULL;
-      FileMapInfo* mapinfo = new FileMapInfo();
-
-      // Open the shared archive file, read and validate the header. If
-      // initialization fails, shared spaces [UseSharedSpaces] are
-      // disabled and the file is closed.
-      // Map in spaces now also
-      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
-        cds_total = FileMapInfo::shared_spaces_size();
-        cds_address = (address)mapinfo->header()->region_addr(0);
-#ifdef _LP64
-        if (using_class_space()) {
-          char* cds_end = (char*)(cds_address + cds_total);
-          cds_end = align_up(cds_end, _reserve_alignment);
-          // If UseCompressedClassPointers is set then allocate the metaspace area
-          // above the heap and above the CDS area (if it exists).
-          allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
-          // Map the shared string space after compressed pointers
-          // because it relies on compressed class pointers setting to work
-          mapinfo->map_string_regions();
-        }
-#endif // _LP64
-      } else {
-        assert(!mapinfo->is_open() && !UseSharedSpaces,
-               "archive file not closed or shared spaces not disabled.");
-      }
-    }
+  }
 #endif // INCLUDE_CDS
 
 #ifdef _LP64
-    if (!UseSharedSpaces && using_class_space()) {
+  if (!UseSharedSpaces && using_class_space()) {
+    if (DumpSharedSpaces) {
+      // Already initialized inside MetaspaceShared::initialize_shared_rs()
+    } else {
       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
       allocate_metaspace_compressed_klass_ptrs(base, 0);
     }
+  }
 #endif // _LP64
 
-    // Initialize these before initializing the VirtualSpaceList
-    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
-    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
-    // Make the first class chunk bigger than a medium chunk so it's not put
-    // on the medium chunk list.   The next chunk will be small and progress
-    // from there.  This size calculated by -version.
-    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
-                                       (CompressedClassSpaceSize/BytesPerWord)*2);
-    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
-    // Arbitrarily set the initial virtual space to a multiple
-    // of the boot class loader size.
-    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
-    word_size = align_up(word_size, Metaspace::reserve_alignment_words());
-
-    // Initialize the list of virtual spaces.
-    _space_list = new VirtualSpaceList(word_size);
-    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
-
-    if (!_space_list->initialization_succeeded()) {
-      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
-    }
+  // Initialize these before initializing the VirtualSpaceList
+  _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
+  _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
+  // Make the first class chunk bigger than a medium chunk so it's not put
+  // on the medium chunk list.   The next chunk will be small and progress
+  // from there.  This size calculated by -version.
+  _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
+                                     (CompressedClassSpaceSize/BytesPerWord)*2);
+  _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
+  // Arbitrarily set the initial virtual space to a multiple
+  // of the boot class loader size.
+  size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
+  word_size = align_up(word_size, Metaspace::reserve_alignment_words());
+
+  // Initialize the list of virtual spaces.
+  _space_list = new VirtualSpaceList(word_size);
+  _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
+
+  if (!_space_list->initialization_succeeded()) {
+    vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
   }
 
   _tracer = new MetaspaceTracer();
@@ -3496,11 +3418,6 @@
                                                   get_space_manager(mdtype)->medium_chunk_bunch());
   }
 
-  // For dumping shared archive, report error if allocation has failed.
-  if (DumpSharedSpaces && chunk == NULL) {
-    report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
-  }
-
   return chunk;
 }
 
@@ -3534,9 +3451,6 @@
   if (using_class_space()) {
     initialize_first_chunk(type, ClassType);
   }
-
-  _alloc_record_head = NULL;
-  _alloc_record_tail = NULL;
 }
 
 size_t Metaspace::align_word_size_up(size_t word_size) {
@@ -3545,8 +3459,8 @@
 }
 
 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
-  // DumpSharedSpaces doesn't use class metadata area (yet)
-  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
+  assert(!_frozen, "sanity");
+  // Don't use class_vsm() unless UseCompressedClassPointers is true.
   if (is_class_space_allocation(mdtype)) {
     return  class_vsm()->allocate(word_size);
   } else {
@@ -3555,6 +3469,7 @@
 }
 
 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
+  assert(!_frozen, "sanity");
   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
   assert(delta_bytes > 0, "Must be");
 
@@ -3580,13 +3495,6 @@
   return res;
 }
 
-// Space allocated in the Metaspace.  This may
-// be across several metadata virtual spaces.
-char* Metaspace::bottom() const {
-  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
-  return (char*)vsm()->current_chunk()->bottom();
-}
-
 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
   if (mdtype == ClassType) {
     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
@@ -3596,6 +3504,7 @@
 }
 
 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
+  assert(!_frozen, "sanity");
   if (mdtype == ClassType) {
     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
   } else {
@@ -3635,13 +3544,10 @@
 }
 
 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
+  assert(!_frozen, "sanity");
   assert(!SafepointSynchronize::is_at_safepoint()
          || Thread::current()->is_VM_thread(), "should be the VM thread");
 
-  if (DumpSharedSpaces && log_is_enabled(Info, cds)) {
-    record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
-  }
-
   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
   if (is_class && using_class_space()) {
@@ -3651,9 +3557,9 @@
   }
 }
 
-
 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
-                              bool read_only, MetaspaceObj::Type type, TRAPS) {
+                              MetaspaceObj::Type type, TRAPS) {
+  assert(!_frozen, "sanity");
   if (HAS_PENDING_EXCEPTION) {
     assert(false, "Should not allocate with exception pending");
     return NULL;  // caller does a CHECK_NULL too
@@ -3662,26 +3568,6 @@
   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
         "ClassLoaderData::the_null_class_loader_data() should have been used.");
 
-  // Allocate in metaspaces without taking out a lock, because it deadlocks
-  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
-  // to revisit this for application class data sharing.
-  if (DumpSharedSpaces) {
-    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
-    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
-    MetaWord* result = space->allocate(word_size, NonClassType);
-    if (result == NULL) {
-      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
-    }
-    if (log_is_enabled(Info, cds)) {
-      space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
-    }
-
-    // Zero initialize.
-    Copy::fill_to_words((HeapWord*)result, word_size, 0);
-
-    return result;
-  }
-
   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
 
   // Try to allocate metadata.
@@ -3788,78 +3674,6 @@
   }
 }
 
-void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
-  assert(DumpSharedSpaces, "sanity");
-
-  int byte_size = (int)word_size * wordSize;
-  AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
-
-  if (_alloc_record_head == NULL) {
-    _alloc_record_head = _alloc_record_tail = rec;
-  } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
-    _alloc_record_tail->_next = rec;
-    _alloc_record_tail = rec;
-  } else {
-    // slow linear search, but this doesn't happen that often, and only when dumping
-    for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
-      if (old->_ptr == ptr) {
-        assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
-        int remain_bytes = old->_byte_size - byte_size;
-        assert(remain_bytes >= 0, "sanity");
-        old->_type = type;
-
-        if (remain_bytes == 0) {
-          delete(rec);
-        } else {
-          address remain_ptr = address(ptr) + byte_size;
-          rec->_ptr = remain_ptr;
-          rec->_byte_size = remain_bytes;
-          rec->_type = MetaspaceObj::DeallocatedType;
-          rec->_next = old->_next;
-          old->_byte_size = byte_size;
-          old->_next = rec;
-        }
-        return;
-      }
-    }
-    assert(0, "reallocating a freed pointer that was not recorded");
-  }
-}
-
-void Metaspace::record_deallocation(void* ptr, size_t word_size) {
-  assert(DumpSharedSpaces, "sanity");
-
-  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
-    if (rec->_ptr == ptr) {
-      assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
-      rec->_type = MetaspaceObj::DeallocatedType;
-      return;
-    }
-  }
-
-  assert(0, "deallocating a pointer that was not recorded");
-}
-
-void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
-  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
-
-  address last_addr = (address)bottom();
-
-  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
-    address ptr = rec->_ptr;
-    if (last_addr < ptr) {
-      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
-    }
-    closure->doit(ptr, rec->_type, rec->_byte_size);
-    last_addr = ptr + rec->_byte_size;
-  }
-
-  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
-  if (last_addr < top) {
-    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
-  }
-}
-
 void Metaspace::purge(MetadataType mdtype) {
   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
 }
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -35,8 +35,6 @@
 // Metaspaces are Arenas for the VM's metadata.
 // They are allocated one per class loader object, and one for the null
 // bootstrap class loader
-// Eventually for bootstrap loader we'll have a read-only section and read-write
-// to write for DumpSharedSpaces and read for UseSharedSpaces
 //
 //    block X ---+       +-------------------+
 //               |       |  Virtualspace     |
@@ -87,6 +85,7 @@
   friend class VM_CollectForMetadataAllocation;
   friend class MetaspaceGC;
   friend class MetaspaceAux;
+  friend class MetaspaceShared;
   friend class CollectorPolicy;
 
  public:
@@ -98,8 +97,6 @@
   enum MetaspaceType {
     StandardMetaspaceType,
     BootMetaspaceType,
-    ROMetaspaceType,
-    ReadWriteMetaspaceType,
     AnonymousMetaspaceType,
     ReflectionMetaspaceType
   };
@@ -134,6 +131,7 @@
 
   static size_t _commit_alignment;
   static size_t _reserve_alignment;
+  DEBUG_ONLY(static bool   _frozen;)
 
   SpaceManager* _vsm;
   SpaceManager* vsm() const { return _vsm; }
@@ -177,12 +175,11 @@
   }
 
   static const MetaspaceTracer* tracer() { return _tracer; }
-
+  static void freeze() {
+    assert(DumpSharedSpaces, "sanity");
+    DEBUG_ONLY(_frozen = true;)
+  }
  private:
-  // These 2 methods are used by DumpSharedSpaces only, where only _vsm is used. So we will
-  // maintain a single list for now.
-  void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
-  void record_deallocation(void* ptr, size_t word_size);
 
 #ifdef _LP64
   static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
@@ -194,20 +191,6 @@
 
   static void initialize_class_space(ReservedSpace rs);
 #endif
-
-  class AllocRecord : public CHeapObj<mtClass> {
-  public:
-    AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
-      : _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {}
-    AllocRecord *_next;
-    address _ptr;
-    MetaspaceObj::Type _type;
-    int _byte_size;
-  };
-
-  AllocRecord * _alloc_record_head;
-  AllocRecord * _alloc_record_tail;
-
   size_t class_chunk_size(size_t word_size);
 
  public:
@@ -227,7 +210,6 @@
   static size_t commit_alignment()        { return _commit_alignment; }
   static size_t commit_alignment_words()  { return _commit_alignment / BytesPerWord; }
 
-  char*  bottom() const;
   size_t used_words_slow(MetadataType mdtype) const;
   size_t free_words_slow(MetadataType mdtype) const;
   size_t capacity_words_slow(MetadataType mdtype) const;
@@ -239,7 +221,7 @@
   size_t allocated_chunks_bytes() const;
 
   static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
-                            bool read_only, MetaspaceObj::Type type, TRAPS);
+                            MetaspaceObj::Type type, TRAPS);
   void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
 
   static bool contains(const void* ptr);
@@ -262,16 +244,9 @@
 
   static void print_compressed_class_space(outputStream* st, const char* requested_addr = 0) NOT_LP64({});
 
-  class AllocRecordClosure :  public StackObj {
-  public:
-    virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;
-  };
-
-  void iterate(AllocRecordClosure *closure);
-
-  // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
+  // Return TRUE only if UseCompressedClassPointers is True.
   static bool using_class_space() {
-    return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
+    return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers);
   }
 
   static bool is_class_space_allocation(MetadataType mdType) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/metaspaceClosure.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/metaspaceClosure.hpp"
+
+// Update the reference to point to new_loc.
+void MetaspaceClosure::Ref::update(address new_loc) const {
+  log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
+                 p2i(mpp()), p2i(obj()), p2i(new_loc));
+  uintx p = (uintx)new_loc;
+  p |= flag_bits(); // Make sure the flag bits are copied to the new pointer.
+  *(address*)mpp() = (address)p;
+}
+
+void MetaspaceClosure::push_impl(MetaspaceClosure::Ref* ref, Writability w) {
+  if (ref->not_null()) {
+    bool read_only;
+    switch (w) {
+    case _writable:
+      read_only = false;
+      break;
+    case _not_writable:
+      read_only = true;
+      break;
+    default:
+      assert(w == _default, "must be");
+      read_only = ref->is_read_only_by_default();
+    }
+    if (do_ref(ref, read_only)) { // true means we want to iterate the embedded pointer in <ref>
+      ref->metaspace_pointers_do(this);
+    }
+  }
+}
+
+bool UniqueMetaspaceClosure::do_ref(MetaspaceClosure::Ref* ref, bool read_only) {
+  bool* found = _has_been_visited.get(ref->obj());
+  if (found != NULL) {
+    assert(*found == read_only, "must be");
+    return false; // Already visited: no need to iterate embedded pointers.
+  } else {
+    bool isnew = _has_been_visited.put(ref->obj(), read_only);
+    assert(isnew, "sanity");
+    do_unique_ref(ref, read_only);
+    return true;  // Saw this for the first time: iterate the embedded pointers.
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/metaspaceClosure.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP
+#define SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP
+
+#include "logging/log.hpp"
+#include "memory/allocation.hpp"
+#include "oops/array.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/resourceHash.hpp"
+
+// The metadata hierarchy is separate from the oop hierarchy
+  class MetaspaceObj;        // no C++ vtable
+//class   Array;             // no C++ vtable
+  class   Annotations;       // no C++ vtable
+  class   ConstantPoolCache; // no C++ vtable
+  class   ConstMethod;       // no C++ vtable
+  class   MethodCounters;    // no C++ vtable
+  class   Symbol;            // no C++ vtable
+  class   Metadata;          // has C++ vtable (so do all subclasses)
+  class     ConstantPool;
+  class     MethodData;
+  class     Method;
+  class     Klass;
+  class       InstanceKlass;
+  class         InstanceMirrorKlass;
+  class         InstanceClassLoaderKlass;
+  class         InstanceRefKlass;
+  class       ArrayKlass;
+  class         ObjArrayKlass;
+  class         TypeArrayKlass;
+
+// class MetaspaceClosure --
+//
+// This class is used for iterating the objects in the HotSpot Metaspaces. It
+// provides an API to walk all the reachable objects starting from a set of
+// root references (such as all Klass'es in the SystemDictionary).
+//
+// Currently it is used for compacting the CDS archive by eliminate temporary
+// objects allocated during archive creation time. See ArchiveCompactor in
+// metaspaceShared.cpp for an example.
+//
+// To support MetaspaceClosure, each subclass of MetaspaceObj must provide
+// a method of the type void metaspace_pointers_do(MetaspaceClosure*). This method
+// should call MetaspaceClosure::push() on every pointer fields of this
+// class that points to a MetaspaceObj. See Annotations::metaspace_pointers_do()
+// for an example.
+class MetaspaceClosure {
+public:
+  enum Writability {
+    _writable,
+    _not_writable,
+    _default
+  };
+
+  // class MetaspaceClosure::Ref --
+  //
+  // MetaspaceClosure can be viewed as a very simple type of copying garbage
+  // collector. For it to function properly, it requires each subclass of
+  // MetaspaceObj to provide two methods:
+  //
+  //  size_t size();                                 -- to determine how much data to copy
+  //  void metaspace_pointers_do(MetaspaceClosure*); -- to locate all the embedded pointers
+  //
+  // Calling these methods would be trivial if these two were virtual methods.
+  // However, to save space, MetaspaceObj has NO vtable. The vtable is introduced
+  // only in the Metadata class.
+  //
+  // To work around the lack of a vtable, we use Ref class with templates
+  // (see ObjectRef, PrimitiveArrayRef and PointerArrayRef)
+  // so that we can statically discover the type of a object. The use of Ref
+  // depends on the fact that:
+  //
+  // [1] We don't use polymorphic pointers for MetaspaceObj's that are not subclasses
+  //     of Metadata. I.e., we don't do this:
+  //     class Klass {
+  //         MetaspaceObj *_obj;
+  //         Array<int>* foo() { return (Array<int>*)_obj; }
+  //         Symbol*     bar() { return (Symbol*)    _obj; }
+  //
+  // [2] All Array<T> dimensions are statically declared.
+  class Ref {
+  protected:
+    virtual void** mpp() const = 0;
+  public:
+    virtual bool not_null() const = 0;
+    virtual int size() const = 0;
+    virtual void metaspace_pointers_do(MetaspaceClosure *it) const = 0;
+    virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const = 0;
+    virtual MetaspaceObj::Type msotype() const = 0;
+    virtual bool is_read_only_by_default() const = 0;
+
+    address obj() const {
+      // In some rare cases (see CPSlot in constantPool.hpp) we store some flags in the lowest
+      // 2 bits of a MetaspaceObj pointer. Unmask these when manipulating the pointer.
+      uintx p = (uintx)*mpp();
+      return (address)(p & (~FLAG_MASK));
+    }
+
+    void update(address new_loc) const;
+
+  private:
+    static const uintx FLAG_MASK = 0x03;
+
+    int flag_bits() const {
+      uintx p = (uintx)*mpp();
+      return (int)(p & FLAG_MASK);
+    }
+  };
+
+private:
+  // -------------------------------------------------- ObjectRef
+  template <class T> class ObjectRef : public Ref {
+    T** _mpp;
+    T* dereference() const {
+      return *_mpp;
+    }
+  protected:
+    virtual void** mpp() const {
+      return (void**)_mpp;
+    }
+
+  public:
+    ObjectRef(T** mpp) : _mpp(mpp) {}
+
+    virtual bool is_read_only_by_default() const { return T::is_read_only_by_default(); }
+    virtual bool not_null()                const { return dereference() != NULL; }
+    virtual int size()                     const { return dereference()->size(); }
+    virtual MetaspaceObj::Type msotype()   const { return dereference()->type(); }
+
+    virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
+      dereference()->metaspace_pointers_do(it);
+    }
+    virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
+      ((T*)new_loc)->metaspace_pointers_do(it);
+    }
+  };
+
+  // -------------------------------------------------- PrimitiveArrayRef
+  template <class T> class PrimitiveArrayRef : public Ref {
+    Array<T>** _mpp;
+    Array<T>* dereference() const {
+      return *_mpp;
+    }
+  protected:
+    virtual void** mpp() const {
+      return (void**)_mpp;
+    }
+
+  public:
+    PrimitiveArrayRef(Array<T>** mpp) : _mpp(mpp) {}
+
+    // all Arrays are read-only by default
+    virtual bool is_read_only_by_default() const { return true; }
+    virtual bool not_null()                const { return dereference() != NULL;  }
+    virtual int size()                     const { return dereference()->size(); }
+    virtual MetaspaceObj::Type msotype()   const { return MetaspaceObj::array_type(sizeof(T)); }
+
+    virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
+      Array<T>* array = dereference();
+      log_trace(cds)("Iter(PrimitiveArray): %p [%d]", array, array->length());
+    }
+    virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
+      Array<T>* array = (Array<T>*)new_loc;
+      log_trace(cds)("Iter(PrimitiveArray): %p [%d]", array, array->length());
+    }
+  };
+
+  // -------------------------------------------------- PointerArrayRef
+  template <class T> class PointerArrayRef : public Ref {
+    Array<T*>** _mpp;
+    Array<T*>* dereference() const {
+      return *_mpp;
+    }
+  protected:
+    virtual void** mpp() const {
+      return (void**)_mpp;
+    }
+
+  public:
+    PointerArrayRef(Array<T*>** mpp) : _mpp(mpp) {}
+
+    // all Arrays are read-only by default
+    virtual bool is_read_only_by_default() const { return true; }
+    virtual bool not_null()                const { return dereference() != NULL; }
+    virtual int size()                     const { return dereference()->size(); }
+    virtual MetaspaceObj::Type msotype()   const { return MetaspaceObj::array_type(sizeof(T*)); }
+
+    virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
+      metaspace_pointers_do_at_impl(it, dereference());
+    }
+    virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
+      metaspace_pointers_do_at_impl(it, (Array<T*>*)new_loc);
+    }
+  private:
+    void metaspace_pointers_do_at_impl(MetaspaceClosure *it, Array<T*>* array) const {
+      log_trace(cds)("Iter(ObjectArray): %p [%d]", array, array->length());
+      for (int i = 0; i < array->length(); i++) {
+        T** mpp = array->adr_at(i);
+        it->push(mpp);
+      }
+    }
+  };
+
+  void push_impl(Ref* ref, Writability w);
+
+public:
+  // returns true if we want to keep iterating the pointers embedded inside <ref>
+  virtual bool do_ref(Ref* ref, bool read_only) = 0;
+
+  // When you do:
+  //     void MyType::metaspace_pointers_do(MetaspaceClosure* it) {
+  //       it->push(_my_field)
+  //
+  // C++ will try to match the "most specific" template function. This one will
+  // will be matched if possible (if mpp is an Array<> of any pointer type).
+  template <typename T> void push(Array<T*>** mpp, Writability w = _default) {
+    PointerArrayRef<T> ref(mpp);
+    push_impl(&ref, w);
+  }
+
+  // If the above function doesn't match (mpp is an Array<>, but T is not a pointer type), then
+  // this is the second choice.
+  template <typename T> void push(Array<T>** mpp, Writability w = _default) {
+    PrimitiveArrayRef<T> ref(mpp);
+    push_impl(&ref, w);
+  }
+
+  // If the above function doesn't match (mpp is not an Array<> type), then
+  // this will be matched by default.
+  template <class T> void push(T** mpp, Writability w = _default) {
+    ObjectRef<T> ref(mpp);
+    push_impl(&ref, w);
+  }
+};
+
+// This is a special MetaspaceClosure that visits each unique MetaspaceObj once.
+class UniqueMetaspaceClosure : public MetaspaceClosure {
+  // Do not override. Returns true if we are discovering ref->obj() for the first time.
+  virtual bool do_ref(Ref* ref, bool read_only);
+
+public:
+  // Gets called the first time we discover an object.
+  virtual void do_unique_ref(Ref* ref, bool read_only) = 0;
+private:
+  static unsigned my_hash(const address& a) {
+    return primitive_hash<address>(a);
+  }
+  static bool my_equals(const address& a0, const address& a1) {
+    return primitive_equals<address>(a0, a1);
+  }
+  ResourceHashtable<
+      address, bool,
+      UniqueMetaspaceClosure::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
+      UniqueMetaspaceClosure::my_equals, // solaris compiler doesn't like: primitive_equals<address>
+    16384> _has_been_visited;
+};
+
+#endif // SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -51,6 +51,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayKlass.hpp"
 #include "prims/jvm.h"
+#include "prims/jvmtiRedefineClasses.hpp"
 #include "runtime/timerTrace.hpp"
 #include "runtime/os.hpp"
 #include "runtime/signature.hpp"
@@ -59,74 +60,261 @@
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/hashtable.inline.hpp"
-
-int MetaspaceShared::_max_alignment = 0;
+#include "memory/metaspaceClosure.hpp"
 
-ReservedSpace* MetaspaceShared::_shared_rs = NULL;
-
+ReservedSpace MetaspaceShared::_shared_rs;
+VirtualSpace MetaspaceShared::_shared_vs;
 MetaspaceSharedStats MetaspaceShared::_stats;
-
 bool MetaspaceShared::_has_error_classes;
 bool MetaspaceShared::_archive_loading_failed = false;
 bool MetaspaceShared::_remapped_readwrite = false;
 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
-SharedMiscRegion MetaspaceShared::_mc;
-SharedMiscRegion MetaspaceShared::_md;
-SharedMiscRegion MetaspaceShared::_od;
+size_t MetaspaceShared::_core_spaces_size = 0;
+
+// The CDS archive is divided into the following regions:
+//     mc - misc code (the method entry trampolines)
+//     rw - read-write metadata
+//     ro - read-only metadata and read-only tables
+//     md - misc data (the c++ vtables)
+//     od - optional data (original class files)
+//
+//     s0 - shared strings #0
+//     s1 - shared strings #1 (may be empty)
+//
+// Except for the s0/s1 regions, the other 5 regions are linearly allocated, starting from
+// SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
+// are page-aligned, and there's no gap between any consecutive regions.
+//
+// These 5 regions are populated in the following steps:
+// [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
+//     temporarily allocated outside of the shared regions. Only the method entry
+//     trampolines are written into the mc region.
+// [2] ArchiveCompactor copies RW metadata into the rw region.
+// [3] ArchiveCompactor copies RO metadata into the ro region.
+// [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
+//     are copied into the ro region as read-only tables.
+// [5] C++ vtables are copied into the md region.
+// [6] Original class files are copied into the od region.
+//
+// The s0/s1 regions are populated inside MetaspaceShared::dump_string_and_symbols. Their
+// layout is independent of the other 5 regions.
+
+class DumpRegion {
+private:
+  const char* _name;
+  char* _base;
+  char* _top;
+  char* _end;
+  bool _is_packed;
+
+  char* expand_top_to(char* newtop) {
+    assert(is_allocatable(), "must be initialized and not packed");
+    assert(newtop >= _top, "must not grow backwards");
+    if (newtop > _end) {
+      MetaspaceShared::report_out_of_space(_name, newtop - _top);
+      ShouldNotReachHere();
+    }
+    MetaspaceShared::commit_shared_space_to(newtop);
+    _top = newtop;
+    return _top;
+  }
+
+public:
+  DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
 
-void SharedMiscRegion::initialize(ReservedSpace rs, size_t committed_byte_size,  SharedSpaceType space_type) {
-  _vs.initialize(rs, committed_byte_size);
-  _alloc_top = _vs.low();
-  _space_type = space_type;
-}
+  char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
+    char* p = (char*)align_up(_top, alignment);
+    char* newtop = p + align_up(num_bytes, alignment);
+    expand_top_to(newtop);
+    memset(p, 0, newtop - p);
+    return p;
+  }
+
+  void append_intptr_t(intptr_t n) {
+    assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
+    intptr_t *p = (intptr_t*)_top;
+    char* newtop = _top + sizeof(intptr_t);
+    expand_top_to(newtop);
+    *p = n;
+  }
+
+  char* base()      const { return _base;        }
+  char* top()       const { return _top;         }
+  char* end()       const { return _end;         }
+  size_t reserved() const { return _end - _base; }
+  size_t used()     const { return _top - _base; }
+  bool is_packed()  const { return _is_packed;   }
+  bool is_allocatable() const {
+    return !is_packed() && _base != NULL;
+  }
 
-// NOT thread-safe, but this is called during dump time in single-threaded mode.
-char* SharedMiscRegion::alloc(size_t num_bytes) {
-  assert(DumpSharedSpaces, "dump time only");
-  size_t alignment = sizeof(char*);
-  num_bytes = align_up(num_bytes, alignment);
-  _alloc_top = align_up(_alloc_top, alignment);
-  if (_alloc_top + num_bytes > _vs.high()) {
-    report_out_of_shared_space(_space_type);
+  double perc(size_t used, size_t total) const {
+    if (total == 0) {
+      total = 1;
+    }
+    return used / double(total) * 100.0;
+  }
+
+  void print(size_t total_bytes) const {
+    tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
+                  _name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
+  }
+  void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
+    tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
+               _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
+    if (strcmp(_name, failing_region) == 0) {
+      tty->print_cr(" required = %d", int(needed_bytes));
+    } else {
+      tty->cr();
+    }
+  }
+
+  void init(const ReservedSpace* rs) {
+    _base = _top = rs->base();
+    _end = rs->end();
+  }
+  void init(char* b, char* t, char* e) {
+    _base = b;
+    _top = t;
+    _end = e;
   }
 
-  char* p = _alloc_top;
-  _alloc_top += num_bytes;
+  void pack(DumpRegion* next = NULL) {
+    assert(!is_packed(), "sanity");
+    _end = (char*)align_up(_top, Metaspace::reserve_alignment());
+    _is_packed = true;
+    if (next != NULL) {
+      next->_base = next->_top = this->_end;
+      next->_end = MetaspaceShared::shared_rs()->end();
+    }
+  }
+  bool contains(char* p) {
+    return base() <= p && p < top();
+  }
+};
 
-  memset(p, 0, num_bytes);
-  return p;
+DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
+DumpRegion _s0_region("s0"), _s1_region("s1");
+
+char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
+  return _mc_region.allocate(num_bytes);
+}
+
+char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
+  return _ro_region.allocate(num_bytes);
 }
 
-void MetaspaceShared::initialize_shared_rs(ReservedSpace* rs) {
-  assert(DumpSharedSpaces, "dump time only");
-  _shared_rs = rs;
+void MetaspaceShared::initialize_shared_rs() {
+  const size_t reserve_alignment = Metaspace::reserve_alignment();
+  bool large_pages = false; // No large pages when dumping the CDS archive.
+  char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
+
+#ifdef _LP64
+  // On 64-bit VM, the heap and class space layout will be the same as if
+  // you're running in -Xshare:on mode:
+  //
+  //                         +-- SharedBaseAddress (default = 0x800000000)
+  //                         v
+  // +-..---------+----+ ... +----+----+----+----+----+---------------+
+  // |    Heap    | ST |     | MC | RW | RO | MD | OD | class space   |
+  // +-..---------+----+ ... +----+----+----+----+----+---------------+
+  // |<--MaxHeapSize->|     |<-- UnscaledClassSpaceMax = 4GB ------->|
+  //
+  const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
+  const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
+#else
+  // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
+  size_t cds_total = align_down(256*M, reserve_alignment);
+#endif
 
-  size_t core_spaces_size = FileMapInfo::core_spaces_size();
-  size_t metadata_size = SharedReadOnlySize + SharedReadWriteSize;
+  // First try to reserve the space at the specified SharedBaseAddress.
+  _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
+  if (_shared_rs.is_reserved()) {
+    assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
+  } else {
+    // Get a mmap region anywhere if the SharedBaseAddress fails.
+    _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
+  }
+  if (!_shared_rs.is_reserved()) {
+    vm_exit_during_initialization("Unable to reserve memory for shared space",
+                                  err_msg(SIZE_FORMAT " bytes.", cds_total));
+  }
 
-  // Split into the core and optional sections
-  ReservedSpace core_data = _shared_rs->first_part(core_spaces_size);
-  ReservedSpace optional_data = _shared_rs->last_part(core_spaces_size);
+#ifdef _LP64
+  // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
+  // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
+  //   will store Klasses into this space.
+  // + The lower 3 GB is used for the archive -- when preload_classes() is done,
+  //   ArchiveCompactor will copy the class metadata into this space, first the RW parts,
+  //   then the RO parts.
+
+  assert(UseCompressedOops && UseCompressedClassPointers,
+      "UseCompressedOops and UseCompressedClassPointers must be set");
+
+  size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
+  ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
+  CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
+  _shared_rs = _shared_rs.first_part(max_archive_size);
 
-  // The RO/RW and the misc sections
-  ReservedSpace shared_ro_rw = core_data.first_part(metadata_size);
-  ReservedSpace misc_section = core_data.last_part(metadata_size);
+  // Set up compress class pointers.
+  Universe::set_narrow_klass_base((address)_shared_rs.base());
+  if (UseAOT || cds_total > UnscaledClassSpaceMax) {
+    // AOT forces narrow_klass_shift=LogKlassAlignmentInBytes
+    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+  } else {
+    Universe::set_narrow_klass_shift(0);
+  }
+
+  Metaspace::initialize_class_space(tmp_class_space);
+  tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
+                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
+
+  tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
+                CompressedClassSpaceSize, p2i(tmp_class_space.base()));
+#endif
+
+  // Start with 0 committed bytes. The memory will be committed as needed by
+  // MetaspaceShared::commit_shared_space_to().
+  if (!_shared_vs.initialize(_shared_rs, 0)) {
+    vm_exit_during_initialization("Unable to allocate memory for shared space");
+  }
 
-  // Now split the misc code and misc data sections.
-  ReservedSpace md_rs   = misc_section.first_part(SharedMiscDataSize);
-  ReservedSpace mc_rs   = misc_section.last_part(SharedMiscDataSize);
+  _mc_region.init(&_shared_rs);
+  tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
+                _shared_rs.size(), p2i(_shared_rs.base()));
+}
+
+void MetaspaceShared::commit_shared_space_to(char* newtop) {
+  assert(DumpSharedSpaces, "dump-time only");
+  char* base = _shared_rs.base();
+  size_t need_committed_size = newtop - base;
+  size_t has_committed_size = _shared_vs.committed_size();
+  if (need_committed_size < has_committed_size) {
+    return;
+  }
 
-  _md.initialize(md_rs, SharedMiscDataSize, SharedMiscData);
-  _mc.initialize(mc_rs, SharedMiscCodeSize, SharedMiscCode);
-  _od.initialize(optional_data, metadata_size, SharedOptional);
+  size_t min_bytes = need_committed_size - has_committed_size;
+  size_t preferred_bytes = 1 * M;
+  size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
+
+  size_t commit = MAX2(min_bytes, preferred_bytes);
+  assert(commit <= uncommitted, "sanity");
+
+  bool result = _shared_vs.expand_by(commit, false);
+  if (!result) {
+    vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
+                                          need_committed_size));
+  }
+
+  log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
+                commit, _shared_vs.actual_committed_size(), _shared_vs.high());
 }
 
 // Read/write a data stream for restoring/preserving metadata pointers and
 // miscellaneous data from/to the shared archive file.
 
-void MetaspaceShared::serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
-                                size_t* space_size) {
+void MetaspaceShared::serialize(SerializeClosure* soc) {
   int tag = 0;
   soc->do_tag(--tag);
 
@@ -150,7 +338,7 @@
 
   // Dump/restore the symbol and string tables
   SymbolTable::serialize(soc);
-  StringTable::serialize(soc, string_space, space_size);
+  StringTable::serialize(soc);
   soc->do_tag(--tag);
 
   soc->do_tag(666);
@@ -159,7 +347,7 @@
 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
   if (DumpSharedSpaces) {
     if (_cds_i2i_entry_code_buffers == NULL) {
-      _cds_i2i_entry_code_buffers = (address)misc_data_space_alloc(total_size);
+      _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
       _cds_i2i_entry_code_buffers_size = total_size;
     }
   } else if (UseSharedSpaces) {
@@ -179,7 +367,9 @@
 static GrowableArray<Klass*>* _global_klass_objects;
 class CollectClassesClosure : public KlassClosure {
   void do_klass(Klass* k) {
-    _global_klass_objects->append_if_missing(k);
+    if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
+      _global_klass_objects->append_if_missing(k);
+    }
   }
 };
 
@@ -230,6 +420,23 @@
   }
 }
 
+static void relocate_cached_class_file() {
+  for (int i = 0; i < _global_klass_objects->length(); i++) {
+    Klass* k = _global_klass_objects->at(i);
+    if (k->is_instance_klass()) {
+      InstanceKlass* ik = InstanceKlass::cast(k);
+      JvmtiCachedClassFileData* p = ik->get_archived_class_data();
+      if (p != NULL) {
+        int size = offset_of(JvmtiCachedClassFileData, data) + p->length;
+        JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size);
+        q->length = p->length;
+        memcpy(q->data, p->data, p->length);
+        ik->set_archived_class_data(q);
+      }
+    }
+  }
+}
+
 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
 //
@@ -266,8 +473,9 @@
   intptr_t* cloned_vtable()   { return &_cloned_vtable[0]; }
   void zero()                 { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); }
   // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo
-  intptr_t* next(int vtable_size) {
-    return &_cloned_vtable[vtable_size];
+  static size_t byte_size(int vtable_size) {
+    CppVtableInfo i;
+    return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1));
   }
 };
 
@@ -281,7 +489,7 @@
 
 public:
   // Allocate and initialize the C++ vtable, starting from top, but do not go past end.
-  static intptr_t* allocate(const char* name, intptr_t* top, intptr_t* end);
+  static intptr_t* allocate(const char* name);
 
   // Clone the vtable to ...
   static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
@@ -306,18 +514,14 @@
 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
 
 template <class T>
-intptr_t* CppVtableCloner<T>::allocate(const char* name, intptr_t* top, intptr_t* end) {
+intptr_t* CppVtableCloner<T>::allocate(const char* name) {
+  assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment");
   int n = get_vtable_length(name);
-  _info = (CppVtableInfo*)top;
-  intptr_t* next = _info->next(n);
-
-  if (next > end) {
-    report_out_of_shared_space(SharedMiscData);
-  }
+  _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
   _info->set_vtable_size(n);
 
   intptr_t* p = clone_vtable(name, _info);
-  assert(p == next, "must be");
+  assert((char*)p == _md_region.top(), "must be");
 
   return p;
 }
@@ -392,7 +596,7 @@
 }
 
 #define ALLOC_CPP_VTABLE_CLONE(c) \
-  top = CppVtableCloner<c>::allocate(#c, top, end);
+  CppVtableCloner<c>::allocate(#c);
 
 #define CLONE_CPP_VTABLE(c) \
   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
@@ -413,7 +617,7 @@
 }
 
 // Allocate and initialize the C++ vtables, starting from top, but do not go past end.
-intptr_t* MetaspaceShared::allocate_cpp_vtable_clones(intptr_t* top, intptr_t* end) {
+void MetaspaceShared::allocate_cpp_vtable_clones() {
   assert(DumpSharedSpaces, "dump-time only");
   // Layout (each slot is a intptr_t):
   //   [number of slots in the first vtable = n1]
@@ -423,7 +627,6 @@
   //   ...
   // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
   CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
-  return top;
 }
 
 // Switch the vtable pointer to point to the cloned vtable. We assume the
@@ -469,27 +672,15 @@
 
 class WriteClosure : public SerializeClosure {
 private:
-  intptr_t* top;
-  char* end;
-
-  inline void check_space() {
-    if ((char*)top + sizeof(intptr_t) > end) {
-      report_out_of_shared_space(SharedMiscData);
-    }
-  }
+  DumpRegion* _dump_region;
 
 public:
-  WriteClosure(char* md_top, char* md_end) {
-    top = (intptr_t*)md_top;
-    end = md_end;
+  WriteClosure(DumpRegion* r) {
+    _dump_region = r;
   }
 
-  char* get_top() { return (char*)top; }
-
   void do_ptr(void** p) {
-    check_space();
-    *top = (intptr_t)*p;
-    ++top;
+    _dump_region->append_intptr_t((intptr_t)*p);
   }
 
   void do_u4(u4* p) {
@@ -498,21 +689,15 @@
   }
 
   void do_tag(int tag) {
-    check_space();
-    *top = (intptr_t)tag;
-    ++top;
+    _dump_region->append_intptr_t((intptr_t)tag);
   }
 
   void do_region(u_char* start, size_t size) {
-    if ((char*)top + size > end) {
-      report_out_of_shared_space(SharedMiscData);
-    }
     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
     assert(size % sizeof(intptr_t) == 0, "bad size");
     do_tag((int)size);
     while (size > 0) {
-      *top = *(intptr_t*)start;
-      ++top;
+      _dump_region->append_intptr_t(*(intptr_t*)start);
       start += sizeof(intptr_t);
       size -= sizeof(intptr_t);
     }
@@ -523,7 +708,7 @@
 
 // This is for dumping detailed statistics for the allocations
 // in the shared spaces.
-class DumpAllocClosure : public Metaspace::AllocRecordClosure {
+class DumpAllocStats : public ResourceObj {
 public:
 
   // Here's poor man's enum inheritance
@@ -535,18 +720,15 @@
   f(StringBucket) \
   f(Other)
 
-#define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type,
-#define SHAREDSPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
-
   enum Type {
     // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
-    SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_DECLARE)
+    SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
     _number_of_types
   };
 
   static const char * type_name(Type type) {
     switch(type) {
-    SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_NAME_CASE)
+    SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
     default:
       ShouldNotReachHere();
       return NULL;
@@ -554,62 +736,51 @@
   }
 
 public:
-  enum {
-    RO = 0,
-    RW = 1
-  };
+  enum { RO = 0, RW = 1 };
 
   int _counts[2][_number_of_types];
   int _bytes [2][_number_of_types];
-  int _which;
 
-  DumpAllocClosure() {
+  DumpAllocStats() {
     memset(_counts, 0, sizeof(_counts));
     memset(_bytes,  0, sizeof(_bytes));
   };
 
-  void iterate_metaspace(Metaspace* space, int which) {
-    assert(which == RO || which == RW, "sanity");
-    _which = which;
-    space->iterate(this);
+  void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
+    assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
+    int which = (read_only) ? RO : RW;
+    _counts[which][type] ++;
+    _bytes [which][type] += byte_size;
   }
 
-  virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) {
-    assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
-    _counts[_which][type] ++;
-    _bytes [_which][type] += byte_size;
+  void record_other_type(int byte_size, bool read_only) {
+    int which = (read_only) ? RO : RW;
+    _bytes [which][OtherType] += byte_size;
   }
-
-  void dump_stats(int ro_all, int rw_all, int md_all, int mc_all);
+  void print_stats(int ro_all, int rw_all, int mc_all, int md_all);
 };
 
-void DumpAllocClosure::dump_stats(int ro_all, int rw_all, int md_all, int mc_all) {
-  rw_all += (md_all + mc_all); // md and mc are all mapped Read/Write
-  int other_bytes = md_all + mc_all;
-
+void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) {
   // Calculate size of data that was not allocated by Metaspace::allocate()
   MetaspaceSharedStats *stats = MetaspaceShared::stats();
 
   // symbols
   _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
   _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
-  _bytes [RO][TypeArrayU4Type]    -= stats->symbol.hashentry_bytes;
 
   _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
   _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
-  _bytes [RO][TypeArrayU4Type] -= stats->symbol.bucket_bytes;
 
   // strings
   _counts[RO][StringHashentryType] = stats->string.hashentry_count;
   _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
-  _bytes [RO][TypeArrayU4Type]    -= stats->string.hashentry_bytes;
 
   _counts[RO][StringBucketType] = stats->string.bucket_count;
   _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
-  _bytes [RO][TypeArrayU4Type] -= stats->string.bucket_bytes;
 
   // TODO: count things like dictionary, vtable, etc
-  _bytes[RW][OtherType] =  other_bytes;
+  _bytes[RW][OtherType] += mc_all + md_all;
+  rw_all += mc_all + md_all; // mc/md are mapped Read/Write
 
   // prevent divide-by-zero
   if (ro_all < 1) {
@@ -633,7 +804,7 @@
   LogMessage(cds) msg;
   stringStream info_stream;
 
-  info_stream.print_cr("Detailed metadata info (rw includes md and mc):");
+  info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
   info_stream.print_cr("%s", hdr);
   info_stream.print_cr("%s", sep);
   for (int type = 0; type < int(_number_of_types); type ++) {
@@ -684,33 +855,317 @@
 
 class VM_PopulateDumpSharedSpace: public VM_Operation {
 private:
-  ClassLoaderData* _loader_data;
-  GrowableArray<Klass*> *_class_promote_order;
-  VirtualSpace _md_vs;
-  VirtualSpace _mc_vs;
-  VirtualSpace _od_vs;
   GrowableArray<MemRegion> *_string_regions;
 
+  void dump_string_and_symbols();
+  char* dump_read_only_tables();
+  void print_region_stats();
 public:
-  VM_PopulateDumpSharedSpace(ClassLoaderData* loader_data,
-                             GrowableArray<Klass*> *class_promote_order) :
-    _loader_data(loader_data) {
-    _class_promote_order = class_promote_order;
-  }
 
   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
   void doit();   // outline because gdb sucks
+  static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec);
+}; // class VM_PopulateDumpSharedSpace
+
+class SortedSymbolClosure: public SymbolClosure {
+  GrowableArray<Symbol*> _symbols;
+  virtual void do_symbol(Symbol** sym) {
+    assert((*sym)->is_permanent(), "archived symbols must be permanent");
+    _symbols.append(*sym);
+  }
+  static int compare_symbols_by_address(Symbol** a, Symbol** b) {
+    if (a[0] < b[0]) {
+      return -1;
+    } else if (a[0] == b[0]) {
+      return 0;
+    } else {
+      return 1;
+    }
+  }
+
+public:
+  SortedSymbolClosure() {
+    SymbolTable::symbols_do(this);
+    _symbols.sort(compare_symbols_by_address);
+  }
+  GrowableArray<Symbol*>* get_sorted_symbols() {
+    return &_symbols;
+  }
+};
+
+// ArchiveCompactor --
+//
+// This class is the central piece of shared archive compaction -- all metaspace data are
+// initially allocated outside of the shared regions. ArchiveCompactor copies the
+// metaspace data into their final location in the shared regions.
+
+class ArchiveCompactor : AllStatic {
+  static DumpAllocStats* _alloc_stats;
+  static SortedSymbolClosure* _ssc;
+
+  static unsigned my_hash(const address& a) {
+    return primitive_hash<address>(a);
+  }
+  static bool my_equals(const address& a0, const address& a1) {
+    return primitive_equals<address>(a0, a1);
+  }
+  typedef ResourceHashtable<
+      address, address,
+      ArchiveCompactor::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
+      ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
+      16384, ResourceObj::C_HEAP> RelocationTable;
+  static RelocationTable* _new_loc_table;
+
+public:
+  static void initialize() {
+    _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
+    _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable;
+  }
+  static DumpAllocStats* alloc_stats() {
+    return _alloc_stats;
+  }
+
+  static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
+    address obj = ref->obj();
+    int bytes = ref->size() * BytesPerWord;
+    char* p;
+    size_t alignment = BytesPerWord;
+    char* oldtop;
+    char* newtop;
+
+    if (read_only) {
+      oldtop = _ro_region.top();
+      p = _ro_region.allocate(bytes, alignment);
+      newtop = _ro_region.top();
+    } else {
+      oldtop = _rw_region.top();
+      p = _rw_region.allocate(bytes, alignment);
+      newtop = _rw_region.top();
+    }
+    memcpy(p, obj, bytes);
+    bool isnew = _new_loc_table->put(obj, (address)p);
+    assert(isnew, "must be");
+    log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
+
+    _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
+    if (ref->msotype() == MetaspaceObj::SymbolType) {
+      uintx delta = MetaspaceShared::object_delta(p);
+      if (delta > MAX_SHARED_DELTA) {
+        // This is just a sanity check and should not appear in any real world usage. This
+        // happens only if you allocate more than 2GB of Symbols and would require
+        // millions of shared classes.
+        vm_exit_during_initialization("Too many Symbols in the CDS archive",
+                                      "Please reduce the number of shared classes.");
+      }
+    }
+  }
+
+  static address get_new_loc(MetaspaceClosure::Ref* ref) {
+    address* pp = _new_loc_table->get(ref->obj());
+    assert(pp != NULL, "must be");
+    return *pp;
+  }
 
 private:
-  void handle_misc_data_space_failure(bool success) {
-    if (!success) {
-      report_out_of_shared_space(SharedMiscData);
+  // Makes a shallow copy of visited MetaspaceObj's
+  class ShallowCopier: public UniqueMetaspaceClosure {
+    bool _read_only;
+  public:
+    ShallowCopier(bool read_only) : _read_only(read_only) {}
+
+    virtual void do_unique_ref(Ref* ref, bool read_only) {
+      if (read_only == _read_only) {
+        allocate(ref, read_only);
+      }
+    }
+  };
+
+  // Relocate embedded pointers within a MetaspaceObj's shallow copy
+  class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
+  public:
+    virtual void do_unique_ref(Ref* ref, bool read_only) {
+      address new_loc = get_new_loc(ref);
+      RefRelocator refer;
+      ref->metaspace_pointers_do_at(&refer, new_loc);
+    }
+  };
+
+  // Relocate a reference to point to its shallow copy
+  class RefRelocator: public MetaspaceClosure {
+  public:
+    virtual bool do_ref(Ref* ref, bool read_only) {
+      if (ref->not_null()) {
+        ref->update(get_new_loc(ref));
+      }
+      return false; // Do not recurse.
+    }
+  };
+
+#ifdef ASSERT
+  class IsRefInArchiveChecker: public MetaspaceClosure {
+  public:
+    virtual bool do_ref(Ref* ref, bool read_only) {
+      if (ref->not_null()) {
+        char* obj = (char*)ref->obj();
+        assert(_ro_region.contains(obj) || _rw_region.contains(obj),
+               "must be relocated to point to CDS archive");
+      }
+      return false; // Do not recurse.
+    }
+  };
+#endif
+
+public:
+  static void copy_and_compact() {
+    // We should no longer allocate anything from the metaspace, so that
+    // we can have a stable set of MetaspaceObjs to work with.
+    Metaspace::freeze();
+
+    ResourceMark rm;
+    SortedSymbolClosure the_ssc; // StackObj
+    _ssc = &the_ssc;
+
+    tty->print_cr("Scanning all metaspace objects ... ");
+    {
+      // allocate and shallow-copy RW objects, immediately following the MC region
+      tty->print_cr("Allocating RW objects ... ");
+      _mc_region.pack(&_rw_region);
+
+      ResourceMark rm;
+      ShallowCopier rw_copier(false);
+      iterate_roots(&rw_copier);
+    }
+    {
+      // allocate and shallow-copy of RO object, immediately following the RW region
+      tty->print_cr("Allocating RO objects ... ");
+      _rw_region.pack(&_ro_region);
+
+      ResourceMark rm;
+      ShallowCopier ro_copier(true);
+      iterate_roots(&ro_copier);
+    }
+    {
+      tty->print_cr("Relocating embedded pointers ... ");
+      ResourceMark rm;
+      ShallowCopyEmbeddedRefRelocator emb_reloc;
+      iterate_roots(&emb_reloc);
+    }
+    {
+      tty->print_cr("Relocating external roots ... ");
+      ResourceMark rm;
+      RefRelocator ext_reloc;
+      iterate_roots(&ext_reloc);
+    }
+
+#ifdef ASSERT
+    {
+      tty->print_cr("Verifying external roots ... ");
+      ResourceMark rm;
+      IsRefInArchiveChecker checker;
+      iterate_roots(&checker);
     }
+#endif
+
+
+    // cleanup
+    _ssc = NULL;
   }
-}; // class VM_PopulateDumpSharedSpace
+
+  // We must relocate the System::_well_known_klasses only after we have copied the
+  // strings in during dump_string_and_symbols(): during the string copy, we operate on old
+  // String objects which assert that their klass is the old
+  // SystemDictionary::String_klass().
+  static void relocate_well_known_klasses() {
+    {
+      tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
+      ResourceMark rm;
+      RefRelocator ext_reloc;
+      SystemDictionary::well_known_klasses_do(&ext_reloc);
+    }
+    // NOTE: after this point, we shouldn't have any globals that can reach the old
+    // objects.
+
+    // We cannot use any of the objects in the heap anymore (except for the objects
+    // in the CDS shared string regions) because their headers no longer point to
+    // valid Klasses.
+  }
+
+  static void iterate_roots(MetaspaceClosure* it) {
+    GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
+    for (int i=0; i<symbols->length(); i++) {
+      it->push(symbols->adr_at(i));
+    }
+    if (_global_klass_objects != NULL) {
+      // Need to fix up the pointers
+      for (int i = 0; i < _global_klass_objects->length(); i++) {
+        // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
+        it->push(_global_klass_objects->adr_at(i));
+      }
+    }
+    FileMapInfo::metaspace_pointers_do(it);
+    SystemDictionary::classes_do(it);
+    Universe::metaspace_pointers_do(it);
+    SymbolTable::metaspace_pointers_do(it);
+    vmSymbols::metaspace_pointers_do(it);
+  }
+
+  static Klass* get_relocated_klass(Klass* orig_klass) {
+    address* pp = _new_loc_table->get((address)orig_klass);
+    assert(pp != NULL, "must be");
+    Klass* klass = (Klass*)(*pp);
+    assert(klass->is_klass(), "must be");
+    return klass;
+  }
+};
+
+DumpAllocStats* ArchiveCompactor::_alloc_stats;
+SortedSymbolClosure* ArchiveCompactor::_ssc;
+ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
+
+void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
+                                              DumpRegion* dump_region, bool read_only,  bool allow_exec) {
+  mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
+}
+
+void VM_PopulateDumpSharedSpace::dump_string_and_symbols() {
+  tty->print_cr("Dumping string and symbol tables ...");
+
+  NOT_PRODUCT(SymbolTable::verify());
+  NOT_PRODUCT(StringTable::verify());
+  SymbolTable::write_to_archive();
+
+  // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
+  _string_regions = new GrowableArray<MemRegion>(2);
+  StringTable::write_to_archive(_string_regions);
+}
+
+char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
+  char* oldtop = _ro_region.top();
+  // Reorder the system dictionary. Moving the symbols affects
+  // how the hash table indices are calculated.
+  SystemDictionary::reorder_dictionary_for_sharing();
+  NOT_PRODUCT(SystemDictionary::verify();)
+
+  size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
+  char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
+  SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
+
+  size_t table_bytes = SystemDictionary::count_bytes_for_table();
+  char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
+  SystemDictionary::copy_table(table_top, _ro_region.top());
+
+  // Write the other data to the output array.
+  WriteClosure wc(&_ro_region);
+  MetaspaceShared::serialize(&wc);
+
+  char* newtop = _ro_region.top();
+  ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true);
+  return buckets_top;
+}
 
 void VM_PopulateDumpSharedSpace::doit() {
   Thread* THREAD = VMThread::vm_thread();
+
   NOT_PRODUCT(SystemDictionary::verify();)
   // The following guarantee is meant to ensure that no loader constraints
   // exist yet, since the constraints table is not shared.  This becomes
@@ -762,97 +1217,28 @@
   remove_unshareable_in_classes();
   tty->print_cr("done. ");
 
-  // Set up the misc data, misc code and optional data segments.
-  _md_vs = *MetaspaceShared::misc_data_region()->virtual_space();
-  _mc_vs = *MetaspaceShared::misc_code_region()->virtual_space();
-  _od_vs = *MetaspaceShared::optional_data_region()->virtual_space();
-  char* md_low = _md_vs.low();
-  char* md_top = MetaspaceShared::misc_data_region()->alloc_top();
-  char* md_end = _md_vs.high();
-  char* mc_low = _mc_vs.low();
-  char* mc_top = MetaspaceShared::misc_code_region()->alloc_top();
-  char* mc_end = _mc_vs.high();
-  char* od_low = _od_vs.low();
-  char* od_top = MetaspaceShared::optional_data_region()->alloc_top();
-  char* od_end = _od_vs.high();
-
-  char* vtbl_list = md_top;
-  md_top = (char*)MetaspaceShared::allocate_cpp_vtable_clones((intptr_t*)md_top, (intptr_t*)md_end);
-
-  // We don't use MC section anymore. We will remove it in a future RFE. For now, put one
-  // byte inside so the region writing/mapping code works.
-  mc_top ++;
+  ArchiveCompactor::initialize();
+  ArchiveCompactor::copy_and_compact();
 
-  // Reorder the system dictionary.  (Moving the symbols affects
-  // how the hash table indices are calculated.)
-  // Not doing this either.
-
-  SystemDictionary::reorder_dictionary();
-  NOT_PRODUCT(SystemDictionary::verify();)
-  SystemDictionary::copy_buckets(&md_top, md_end);
-
-  SystemDictionary::copy_table(&md_top, md_end);
+  dump_string_and_symbols();
+  ArchiveCompactor::relocate_well_known_klasses();
 
-  // Write the other data to the output array.
-  // SymbolTable, StringTable and extra information for system dictionary
-  NOT_PRODUCT(SymbolTable::verify());
-  NOT_PRODUCT(StringTable::verify());
-  size_t ss_bytes = 0;
-  char* ss_low;
-  // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
-  _string_regions = new GrowableArray<MemRegion>(2);
-
-  WriteClosure wc(md_top, md_end);
-  MetaspaceShared::serialize(&wc, _string_regions, &ss_bytes);
-  md_top = wc.get_top();
-  ss_low = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
+  char* read_only_tables_start = dump_read_only_tables();
+  _ro_region.pack(&_md_region);
 
-  // Print shared spaces all the time
-  Metaspace* ro_space = _loader_data->ro_metaspace();
-  Metaspace* rw_space = _loader_data->rw_metaspace();
-
-  // Allocated size of each space (may not be all occupied)
-  const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType);
-  const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType);
-  const size_t md_alloced = md_end-md_low;
-  const size_t mc_alloced = mc_end-mc_low;
-  const size_t od_alloced = od_end-od_low;
-  const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced
-                             + ss_bytes + od_alloced;
-
-  // Occupied size of each space.
-  const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType);
-  const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType);
-  const size_t md_bytes = size_t(md_top - md_low);
-  const size_t mc_bytes = size_t(mc_top - mc_low);
-  const size_t od_bytes = size_t(od_top - od_low);
+  char* vtbl_list = _md_region.top();
+  MetaspaceShared::allocate_cpp_vtable_clones();
+  _md_region.pack(&_od_region);
 
-  // Percent of total size
-  const size_t total_bytes = ro_bytes + rw_bytes + md_bytes + mc_bytes + ss_bytes + od_bytes;
-  const double ro_t_perc = ro_bytes / double(total_bytes) * 100.0;
-  const double rw_t_perc = rw_bytes / double(total_bytes) * 100.0;
-  const double md_t_perc = md_bytes / double(total_bytes) * 100.0;
-  const double mc_t_perc = mc_bytes / double(total_bytes) * 100.0;
-  const double ss_t_perc = ss_bytes / double(total_bytes) * 100.0;
-  const double od_t_perc = od_bytes / double(total_bytes) * 100.0;
+  // Relocate the archived class file data into the od region
+  relocate_cached_class_file();
+  _od_region.pack();
 
-  // Percent of fullness of each space
-  const double ro_u_perc = ro_bytes / double(ro_alloced) * 100.0;
-  const double rw_u_perc = rw_bytes / double(rw_alloced) * 100.0;
-  const double md_u_perc = md_bytes / double(md_alloced) * 100.0;
-  const double mc_u_perc = mc_bytes / double(mc_alloced) * 100.0;
-  const double od_u_perc = od_bytes / double(od_alloced) * 100.0;
-  const double total_u_perc = total_bytes / double(total_alloced) * 100.0;
-
-#define fmt_space "%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT
-  tty->print_cr(fmt_space, "ro", ro_bytes, ro_t_perc, ro_alloced, ro_u_perc, p2i(ro_space->bottom()));
-  tty->print_cr(fmt_space, "rw", rw_bytes, rw_t_perc, rw_alloced, rw_u_perc, p2i(rw_space->bottom()));
-  tty->print_cr(fmt_space, "md", md_bytes, md_t_perc, md_alloced, md_u_perc, p2i(md_low));
-  tty->print_cr(fmt_space, "mc", mc_bytes, mc_t_perc, mc_alloced, mc_u_perc, p2i(mc_low));
-  tty->print_cr(fmt_space, "st", ss_bytes, ss_t_perc, ss_bytes,   100.0,     p2i(ss_low));
-  tty->print_cr(fmt_space, "od", od_bytes, od_t_perc, od_alloced, od_u_perc, p2i(od_low));
-  tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
-                 total_bytes, total_alloced, total_u_perc);
+  // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
+  // is just the spaces between the two ends.
+  size_t core_spaces_size = _od_region.end() - _mc_region.base();
+  assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
+         "should already be aligned");
 
   // During patching, some virtual methods may be called, so at this point
   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
@@ -865,10 +1251,15 @@
   // Create and write the archive file that maps the shared spaces.
 
   FileMapInfo* mapinfo = new FileMapInfo();
-  mapinfo->populate_header(MetaspaceShared::max_alignment());
+  mapinfo->populate_header(os::vm_allocation_granularity());
+  mapinfo->set_read_only_tables_start(read_only_tables_start);
   mapinfo->set_misc_data_patching_start(vtbl_list);
   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
+  mapinfo->set_core_spaces_size(core_spaces_size);
+
+  char* s0_start, *s0_top, *s0_end;
+  char* s1_start, *s1_top, *s1_end;
 
   for (int pass=1; pass<=2; pass++) {
     if (pass == 1) {
@@ -882,21 +1273,18 @@
       mapinfo->set_header_crc(mapinfo->compute_header_crc());
     }
     mapinfo->write_header();
-    mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
-    mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
-    mapinfo->write_region(MetaspaceShared::md, _md_vs.low(),
-                          pointer_delta(md_top, _md_vs.low(), sizeof(char)),
-                          SharedMiscDataSize,
-                          false, true);
-    mapinfo->write_region(MetaspaceShared::mc, _mc_vs.low(),
-                          pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
-                          SharedMiscCodeSize,
-                          true, true);
-    mapinfo->write_string_regions(_string_regions);
-    mapinfo->write_region(MetaspaceShared::od, _od_vs.low(),
-                          pointer_delta(od_top, _od_vs.low(), sizeof(char)),
-                          pointer_delta(od_end, _od_vs.low(), sizeof(char)),
-                          true, false);
+
+    // NOTE: md contains the trampoline code for method entries, which are patched at run time,
+    // so it needs to be read/write.
+    write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
+    write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
+    write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
+    write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
+    write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
+
+    mapinfo->write_string_regions(_string_regions,
+                                  &s0_start, &s0_top, &s0_end,
+                                  &s1_start, &s1_top, &s1_end);
   }
 
   mapinfo->close();
@@ -904,14 +1292,47 @@
   // Restore the vtable in case we invoke any virtual methods.
   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
 
+  _s0_region.init(s0_start, s0_top, s0_end);
+  _s1_region.init(s1_start, s1_top, s1_end);
+  print_region_stats();
+
   if (log_is_enabled(Info, cds)) {
-    DumpAllocClosure dac;
-    dac.iterate_metaspace(_loader_data->ro_metaspace(), DumpAllocClosure::RO);
-    dac.iterate_metaspace(_loader_data->rw_metaspace(), DumpAllocClosure::RW);
+    ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
+                                                 int(_mc_region.used()), int(_md_region.used()));
+  }
+}
+
+void VM_PopulateDumpSharedSpace::print_region_stats() {
+  // Print statistics of all the regions
+  const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
+                                _mc_region.reserved() + _md_region.reserved() +
+                                _od_region.reserved() +
+                                _s0_region.reserved() + _s1_region.reserved();
+  const size_t total_bytes = _ro_region.used() + _rw_region.used() +
+                             _mc_region.used() + _md_region.used() +
+                             _od_region.used() +
+                             _s0_region.used() + _s1_region.used();
+  const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
 
-    dac.dump_stats(int(ro_bytes), int(rw_bytes), int(md_bytes), int(mc_bytes));
-  }
-#undef fmt_space
+  _mc_region.print(total_reserved);
+  _rw_region.print(total_reserved);
+  _ro_region.print(total_reserved);
+  _md_region.print(total_reserved);
+  _od_region.print(total_reserved);
+  _s0_region.print(total_reserved);
+  _s1_region.print(total_reserved);
+
+  tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
+                 total_bytes, total_reserved, total_u_perc);
+}
+
+
+// Update a Java object to point its Klass* to the new location after
+// shared archive has been compacted.
+void MetaspaceShared::relocate_klass_ptr(oop o) {
+  assert(DumpSharedSpaces, "sanity");
+  Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
+  o->set_klass(k);
 }
 
 class LinkSharedClassesClosure : public KlassClosure {
@@ -1007,11 +1428,6 @@
   { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
     ResourceMark rm;
     char class_list_path_str[JVM_MAXPATHLEN];
-
-    tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
-                  MetaspaceShared::shared_rs()->size(),
-                  p2i(MetaspaceShared::shared_rs()->base()));
-
     // Preload classes to be shared.
     // Should use some os:: method rather than fopen() here. aB.
     const char* class_list_path;
@@ -1045,29 +1461,11 @@
       class_list_path = SharedClassListFile;
     }
 
-    int class_count = 0;
-    GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
-
-    // sun.io.Converters
-    static const char obj_array_sig[] = "[[Ljava/lang/Object;";
-    SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
-
-    // java.util.HashMap
-    static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
-    SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
-
-    // Need to allocate the op here:
-    // op.misc_data_space_alloc() will be called during preload_and_dump().
-    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-    VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
-
     tty->print_cr("Loading classes to share ...");
     _has_error_classes = false;
-    class_count += preload_and_dump(class_list_path, class_promote_order,
-                                    THREAD);
+    int class_count = preload_classes(class_list_path, THREAD);
     if (ExtraSharedClassListFile) {
-      class_count += preload_and_dump(ExtraSharedClassListFile, class_promote_order,
-                                      THREAD);
+      class_count += preload_classes(ExtraSharedClassListFile, THREAD);
     }
     tty->print_cr("Loading classes to share: done.");
 
@@ -1083,6 +1481,7 @@
     link_and_cleanup_shared_classes(CATCH);
     tty->print_cr("Rewriting and linking classes: done");
 
+    VM_PopulateDumpSharedSpace op;
     VMThread::execute(&op);
   }
 
@@ -1096,9 +1495,7 @@
 }
 
 
-int MetaspaceShared::preload_and_dump(const char* class_list_path,
-                                      GrowableArray<Klass*>* class_promote_order,
-                                      TRAPS) {
+int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
   ClassListParser parser(class_list_path);
   int class_count = 0;
 
@@ -1114,9 +1511,6 @@
 
         InstanceKlass* ik = InstanceKlass::cast(klass);
 
-        // Should be class load order as per -Xlog:class+preorder
-        class_promote_order->append(ik);
-
         // Link the class to cause the bytecodes to be rewritten and the
         // cpcache to be created. The linking is done as soon as classes
         // are loaded in order that the related data structures (klass and
@@ -1221,6 +1615,13 @@
   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
 }
 
+bool MetaspaceShared::is_in_trampoline_frame(address addr) {
+  if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
+    return true;
+  }
+  return false;
+}
+
 void MetaspaceShared::print_shared_spaces() {
   if (UseSharedSpaces) {
     FileMapInfo::current_info()->print_shared_spaces();
@@ -1244,22 +1645,22 @@
 
   char* _ro_base = NULL;
   char* _rw_base = NULL;
+  char* _mc_base = NULL;
   char* _md_base = NULL;
-  char* _mc_base = NULL;
   char* _od_base = NULL;
 
   // Map each shared region
-  if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
-      mapinfo->verify_region_checksum(ro) &&
+  if ((_mc_base = mapinfo->map_region(mc)) != NULL &&
+      mapinfo->verify_region_checksum(mc) &&
       (_rw_base = mapinfo->map_region(rw)) != NULL &&
       mapinfo->verify_region_checksum(rw) &&
+      (_ro_base = mapinfo->map_region(ro)) != NULL &&
+      mapinfo->verify_region_checksum(ro) &&
       (_md_base = mapinfo->map_region(md)) != NULL &&
       mapinfo->verify_region_checksum(md) &&
-      (_mc_base = mapinfo->map_region(mc)) != NULL &&
-      mapinfo->verify_region_checksum(mc) &&
       (_od_base = mapinfo->map_region(od)) != NULL &&
       mapinfo->verify_region_checksum(od) &&
-      (image_alignment == (size_t)max_alignment()) &&
+      (image_alignment == (size_t)os::vm_allocation_granularity()) &&
       mapinfo->validate_classpath_entry_table()) {
     // Success (no need to do anything)
     return true;
@@ -1268,8 +1669,8 @@
     // that succeeded
     if (_ro_base != NULL) mapinfo->unmap_region(ro);
     if (_rw_base != NULL) mapinfo->unmap_region(rw);
+    if (_mc_base != NULL) mapinfo->unmap_region(mc);
     if (_md_base != NULL) mapinfo->unmap_region(md);
-    if (_mc_base != NULL) mapinfo->unmap_region(mc);
     if (_od_base != NULL) mapinfo->unmap_region(od);
 #ifndef _WINDOWS
     // Release the entire mapped region
@@ -1293,10 +1694,12 @@
   FileMapInfo *mapinfo = FileMapInfo::current_info();
   _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers();
   _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size();
+  _core_spaces_size = mapinfo->core_spaces_size();
   char* buffer = mapinfo->misc_data_patching_start();
+  clone_cpp_vtables((intptr_t*)buffer);
 
-  buffer = (char*)clone_cpp_vtables((intptr_t*)buffer);
-
+  // The rest of the data is now stored in the RW region
+  buffer = mapinfo->read_only_tables_start();
   int sharedDictionaryLen = *(intptr_t*)buffer;
   buffer += sizeof(intptr_t);
   int number_of_entries = *(intptr_t*)buffer;
@@ -1306,9 +1709,8 @@
                                           number_of_entries);
   buffer += sharedDictionaryLen;
 
-  // The following data in the shared misc data region are the linked
-  // list elements (HashtableEntry objects) for the shared dictionary
-  // table.
+  // The following data are the linked list elements
+  // (HashtableEntry objects) for the shared dictionary table.
 
   int len = *(intptr_t*)buffer;     // skip over shared dictionary entries
   buffer += sizeof(intptr_t);
@@ -1318,7 +1720,7 @@
   // shared string/symbol tables
   intptr_t* array = (intptr_t*)buffer;
   ReadClosure rc(&array);
-  serialize(&rc, NULL, NULL);
+  serialize(&rc);
 
   // Initialize the run-time symbol table.
   SymbolTable::create_table();
@@ -1361,48 +1763,16 @@
   return true;
 }
 
-int MetaspaceShared::count_class(const char* classlist_file) {
-  if (classlist_file == NULL) {
-    return 0;
-  }
-  char class_name[256];
-  int class_count = 0;
-  FILE* file = fopen(classlist_file, "r");
-  if (file != NULL) {
-    while ((fgets(class_name, sizeof class_name, file)) != NULL) {
-      if (*class_name == '#') { // comment
-        continue;
-      }
-      class_count++;
-    }
-    fclose(file);
-  } else {
-    char errmsg[JVM_MAXPATHLEN];
-    os::lasterror(errmsg, JVM_MAXPATHLEN);
-    tty->print_cr("Loading classlist failed: %s", errmsg);
-    exit(1);
-  }
+void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
+  // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
+  // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
+  // or so.
+  _mc_region.print_out_of_space_msg(name, needed_bytes);
+  _rw_region.print_out_of_space_msg(name, needed_bytes);
+  _ro_region.print_out_of_space_msg(name, needed_bytes);
+  _md_region.print_out_of_space_msg(name, needed_bytes);
+  _od_region.print_out_of_space_msg(name, needed_bytes);
 
-  return class_count;
+  vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
+                                "Please reduce the number of shared classes.");
 }
-
-// the sizes are good for typical large applications that have a lot of shared
-// classes
-void MetaspaceShared::estimate_regions_size() {
-  int class_count = count_class(SharedClassListFile);
-  class_count += count_class(ExtraSharedClassListFile);
-
-  if (class_count > LargeThresholdClassCount) {
-    if (class_count < HugeThresholdClassCount) {
-      SET_ESTIMATED_SIZE(Large, ReadOnly);
-      SET_ESTIMATED_SIZE(Large, ReadWrite);
-      SET_ESTIMATED_SIZE(Large, MiscData);
-      SET_ESTIMATED_SIZE(Large, MiscCode);
-    } else {
-      SET_ESTIMATED_SIZE(Huge,  ReadOnly);
-      SET_ESTIMATED_SIZE(Huge,  ReadWrite);
-      SET_ESTIMATED_SIZE(Huge,  MiscData);
-      SET_ESTIMATED_SIZE(Huge,  MiscCode);
-    }
-  }
-}
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -32,45 +32,7 @@
 #include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
 
-#define DEFAULT_SHARED_READ_WRITE_SIZE  (NOT_LP64(6*M) LP64_ONLY(10*M))
-#define MIN_SHARED_READ_WRITE_SIZE      (NOT_LP64(6*M) LP64_ONLY(10*M))
-
-#define DEFAULT_SHARED_READ_ONLY_SIZE   (NOT_LP64(8*M) LP64_ONLY(13*M))
-#define MIN_SHARED_READ_ONLY_SIZE       (NOT_LP64(8*M) LP64_ONLY(13*M))
-
-// the MIN_SHARED_MISC_DATA_SIZE and MIN_SHARED_MISC_CODE_SIZE estimates are based on
-// the sizes required for dumping the archive using the default classlist. The sizes
-// are multiplied by 1.5 for a safety margin.
-
-#define DEFAULT_SHARED_MISC_DATA_SIZE   (NOT_LP64(2*M) LP64_ONLY(4*M))
-#define MIN_SHARED_MISC_DATA_SIZE       (NOT_LP64(1*M) LP64_ONLY(1200*K))
-
-#define DEFAULT_SHARED_MISC_CODE_SIZE   (120*K)
-#define MIN_SHARED_MISC_CODE_SIZE       (NOT_LP64(63*K) LP64_ONLY(69*K))
-#define DEFAULT_COMBINED_SIZE           (DEFAULT_SHARED_READ_WRITE_SIZE+DEFAULT_SHARED_READ_ONLY_SIZE+DEFAULT_SHARED_MISC_DATA_SIZE+DEFAULT_SHARED_MISC_CODE_SIZE)
-
-// the max size is the MAX size (ie. 0x7FFFFFFF) - the total size of
-// the other 3 sections - page size (to avoid overflow in case the final
-// size will get aligned up on page size)
-#define SHARED_PAGE                     ((size_t)os::vm_page_size())
 #define MAX_SHARED_DELTA                (0x7FFFFFFF)
-#define MAX_SHARED_READ_WRITE_SIZE      (MAX_SHARED_DELTA-(MIN_SHARED_READ_ONLY_SIZE+MIN_SHARED_MISC_DATA_SIZE+MIN_SHARED_MISC_CODE_SIZE)-SHARED_PAGE)
-#define MAX_SHARED_READ_ONLY_SIZE       (MAX_SHARED_DELTA-(MIN_SHARED_READ_WRITE_SIZE+MIN_SHARED_MISC_DATA_SIZE+MIN_SHARED_MISC_CODE_SIZE)-SHARED_PAGE)
-#define MAX_SHARED_MISC_DATA_SIZE       (MAX_SHARED_DELTA-(MIN_SHARED_READ_WRITE_SIZE+MIN_SHARED_READ_ONLY_SIZE+MIN_SHARED_MISC_CODE_SIZE)-SHARED_PAGE)
-#define MAX_SHARED_MISC_CODE_SIZE       (MAX_SHARED_DELTA-(MIN_SHARED_READ_WRITE_SIZE+MIN_SHARED_READ_ONLY_SIZE+MIN_SHARED_MISC_DATA_SIZE)-SHARED_PAGE)
-
-#define LargeSharedArchiveSize          (300*M)
-#define HugeSharedArchiveSize           (800*M)
-#define ReadOnlyRegionPercentage        0.52
-#define ReadWriteRegionPercentage       0.43
-#define MiscDataRegionPercentage        0.03
-#define MiscCodeRegionPercentage        0.02
-#define LargeThresholdClassCount        5000
-#define HugeThresholdClassCount         40000
-
-#define SET_ESTIMATED_SIZE(type, region)                              \
-  Shared ##region## Size  = FLAG_IS_DEFAULT(Shared ##region## Size) ? \
-    (uintx)(type ## SharedArchiveSize *  region ## RegionPercentage) : Shared ## region ## Size
 
 class FileMapInfo;
 
@@ -83,31 +45,12 @@
   CompactHashtableStats string;
 };
 
-class SharedMiscRegion VALUE_OBJ_CLASS_SPEC {
-private:
-  VirtualSpace _vs;
-  char* _alloc_top;
-  SharedSpaceType _space_type;
-
-public:
-  void initialize(ReservedSpace rs, size_t committed_byte_size,  SharedSpaceType space_type);
-  VirtualSpace* virtual_space() {
-    return &_vs;
-  }
-  char* low() const {
-    return _vs.low();
-  }
-  char* alloc_top() const {
-    return _alloc_top;
-  }
-  char* alloc(size_t num_bytes) NOT_CDS_RETURN_(NULL);
-};
-
 // Class Data Sharing Support
 class MetaspaceShared : AllStatic {
 
   // CDS support
-  static ReservedSpace* _shared_rs;
+  static ReservedSpace _shared_rs;
+  static VirtualSpace _shared_vs;
   static int _max_alignment;
   static MetaspaceSharedStats _stats;
   static bool _has_error_classes;
@@ -115,49 +58,46 @@
   static bool _remapped_readwrite;
   static address _cds_i2i_entry_code_buffers;
   static size_t  _cds_i2i_entry_code_buffers_size;
-
-  // Used only during dumping.
-  static SharedMiscRegion _md;
-  static SharedMiscRegion _mc;
-  static SharedMiscRegion _od;
+  static size_t  _core_spaces_size;
  public:
   enum {
-    ro = 0,  // read-only shared space in the heap
+    mc = 0,  // miscellaneous code for method trampolines
     rw = 1,  // read-write shared space in the heap
-    md = 2,  // miscellaneous data for initializing tables, etc.
-    mc = 3,  // miscellaneous code - vtable replacement.
+    ro = 2,  // read-only shared space in the heap
+    md = 3,  // miscellaneous data for initializing tables, etc.
     max_strings = 2, // max number of string regions in string space
     num_non_strings = 4, // number of non-string regions
     first_string = num_non_strings, // index of first string region
     // The optional data region is the last region.
     // Currently it only contains class file data.
     od = max_strings + num_non_strings,
+    last_valid_region = od,
     n_regions = od + 1 // total number of regions
   };
 
-  // Accessor functions to save shared space created for metadata, which has
-  // extra space allocated at the end for miscellaneous data and code.
-  static void set_max_alignment(int alignment) {
-    CDS_ONLY(_max_alignment = alignment);
-  }
-
-  static int max_alignment() {
-    CDS_ONLY(return _max_alignment);
-    NOT_CDS(return 0);
-  }
-
   static void prepare_for_dumping() NOT_CDS_RETURN;
   static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
-  static int preload_and_dump(const char * class_list_path,
-                              GrowableArray<Klass*>* class_promote_order,
-                              TRAPS) NOT_CDS_RETURN_(0);
+  static int preload_classes(const char * class_list_path,
+                             TRAPS) NOT_CDS_RETURN_(0);
 
   static ReservedSpace* shared_rs() {
-    CDS_ONLY(return _shared_rs);
+    CDS_ONLY(return &_shared_rs);
     NOT_CDS(return NULL);
   }
+  static void commit_shared_space_to(char* newtop) NOT_CDS_RETURN;
+  static size_t core_spaces_size() {
+    return _core_spaces_size;
+  }
+  static void initialize_shared_rs() NOT_CDS_RETURN;
 
-  static void initialize_shared_rs(ReservedSpace* rs) NOT_CDS_RETURN;
+  // Delta of this object from the bottom of the archive.
+  static uintx object_delta(void* obj) {
+    assert(DumpSharedSpaces, "supported only for dumping");
+    assert(shared_rs()->contains(obj), "must be");
+    address base_address = address(shared_rs()->base());
+    uintx delta = address(obj) - base_address;
+    return delta;
+  }
 
   static void set_archive_loading_failed() {
     _archive_loading_failed = true;
@@ -171,25 +111,26 @@
 
   // Return true if given address is in the shared region corresponding to the idx
   static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
-
   static bool is_string_region(int idx) {
       CDS_ONLY(return (idx >= first_string && idx < first_string + max_strings));
       NOT_CDS(return false);
   }
+  static bool is_in_trampoline_frame(address addr) NOT_CDS_RETURN_(false);
 
-  static intptr_t* allocate_cpp_vtable_clones(intptr_t* top, intptr_t* end);
+  static void allocate_cpp_vtable_clones();
   static intptr_t* clone_cpp_vtables(intptr_t* p);
   static void zero_cpp_vtable_clones_for_writing();
   static void patch_cpp_vtable_pointers();
   static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
+  static void serialize(SerializeClosure* sc);
 
-  static void serialize(SerializeClosure* sc, GrowableArray<MemRegion> *string_space,
-                        size_t* space_size);
 
   static MetaspaceSharedStats* stats() {
     return &_stats;
   }
 
+  static void report_out_of_space(const char* name, size_t needed_bytes);
+
   // JVM/TI RedefineClasses() support:
   // Remap the shared readonly space to shared readwrite, private if
   // sharing is enabled. Simply returns true if sharing is not enabled
@@ -206,13 +147,21 @@
   static void link_and_cleanup_shared_classes(TRAPS);
   static void check_shared_class_loader_type(Klass* obj);
 
-  static int count_class(const char* classlist_file);
-  static void estimate_regions_size() NOT_CDS_RETURN;
+  // Allocate a block of memory from the "mc", "ro", or "rw" regions.
+  static char* misc_code_space_alloc(size_t num_bytes);
+  static char* read_only_space_alloc(size_t num_bytes);
 
-  // Allocate a block of memory from the "mc", "md", or "od" regions.
-  static char* misc_code_space_alloc(size_t num_bytes) {  return _mc.alloc(num_bytes); }
-  static char* misc_data_space_alloc(size_t num_bytes) {  return _md.alloc(num_bytes); }
-  static char* optional_data_space_alloc(size_t num_bytes) { return _od.alloc(num_bytes); }
+  template <typename T>
+  static Array<T>* new_ro_array(int length) {
+#if INCLUDE_CDS
+    size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
+    Array<T>* array = (Array<T>*)read_only_space_alloc(byte_size);
+    array->initialize(length);
+    return array;
+#else
+    return NULL;
+#endif
+  }
 
   static address cds_i2i_entry_code_buffers(size_t total_size);
 
@@ -222,18 +171,6 @@
   static size_t cds_i2i_entry_code_buffers_size() {
     return _cds_i2i_entry_code_buffers_size;
   }
-
-  static SharedMiscRegion* misc_code_region() {
-    assert(DumpSharedSpaces, "used during dumping only");
-    return &_mc;
-  }
-  static SharedMiscRegion* misc_data_region() {
-    assert(DumpSharedSpaces, "used during dumping only");
-    return &_md;
-  }
-  static SharedMiscRegion* optional_data_region() {
-    assert(DumpSharedSpaces, "used during dumping only");
-    return &_od;
-  }
+  static void relocate_klass_ptr(oop o);
 };
 #endif // SHARE_VM_MEMORY_METASPACESHARED_HPP
--- a/hotspot/src/share/vm/memory/universe.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/universe.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -44,6 +44,7 @@
 #include "logging/logStream.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
@@ -223,6 +224,37 @@
   debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
 }
 
+void LatestMethodCache::metaspace_pointers_do(MetaspaceClosure* it) {
+  it->push(&_klass);
+}
+
+void Universe::metaspace_pointers_do(MetaspaceClosure* it) {
+  it->push(&_boolArrayKlassObj);
+  it->push(&_byteArrayKlassObj);
+  it->push(&_charArrayKlassObj);
+  it->push(&_intArrayKlassObj);
+  it->push(&_shortArrayKlassObj);
+  it->push(&_longArrayKlassObj);
+  it->push(&_singleArrayKlassObj);
+  it->push(&_doubleArrayKlassObj);
+  for (int i = 0; i < T_VOID+1; i++) {
+    it->push(&_typeArrayKlassObjs[i]);
+  }
+  it->push(&_objectArrayKlassObj);
+
+  it->push(&_the_empty_int_array);
+  it->push(&_the_empty_short_array);
+  it->push(&_the_empty_klass_array);
+  it->push(&_the_empty_method_array);
+  it->push(&_the_array_interfaces_array);
+
+  _finalizer_register_cache->metaspace_pointers_do(it);
+  _loader_addClass_cache->metaspace_pointers_do(it);
+  _pd_implies_cache->metaspace_pointers_do(it);
+  _throw_illegal_access_error_cache->metaspace_pointers_do(it);
+  _do_stack_walk_cache->metaspace_pointers_do(it);
+}
+
 // Serialize metadata in and out of CDS archive, not oops.
 void Universe::serialize(SerializeClosure* f, bool do_all) {
 
--- a/hotspot/src/share/vm/memory/universe.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/universe.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -67,6 +67,7 @@
   void serialize(SerializeClosure* f) {
     f->do_ptr((void**)&_klass);
   }
+  void metaspace_pointers_do(MetaspaceClosure* it);
 };
 
 
@@ -102,6 +103,7 @@
   friend class VMStructs;
   friend class VM_PopulateDumpSharedSpace;
   friend class Metaspace;
+  friend class MetaspaceShared;
 
   friend jint  universe_init();
   friend void  universe2_init();
@@ -474,6 +476,7 @@
   // Apply "f" to all klasses for basic types (classes not present in
   // SystemDictionary).
   static void basic_type_classes_do(void f(Klass*));
+  static void metaspace_pointers_do(MetaspaceClosure* it);
 
   // Debugging
   enum VERIFY_FLAGS {
--- a/hotspot/src/share/vm/memory/virtualspace.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/memory/virtualspace.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,6 +63,7 @@
   // Accessors
   char*  base()            const { return _base;      }
   size_t size()            const { return _size;      }
+  char*  end()             const { return _base + _size; }
   size_t alignment()       const { return _alignment; }
   bool   special()         const { return _special;   }
   bool   executable()      const { return _executable;   }
@@ -85,6 +86,9 @@
   static size_t page_align_size_down(size_t size);
   static size_t allocation_align_size_up(size_t size);
   static size_t allocation_align_size_down(size_t size);
+  bool contains(const void* p) const {
+    return (base() <= ((char*)p)) && (((char*)p) < (base() + size()));
+  }
 };
 
 ReservedSpace
--- a/hotspot/src/share/vm/oops/annotations.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/annotations.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,10 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoaderData.hpp"
+#include "logging/log.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/annotations.hpp"
 #include "oops/instanceKlass.hpp"
@@ -33,7 +35,7 @@
 
 // Allocate annotations in metadata area
 Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
-  return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations();
+  return new (loader_data, size(), MetaspaceObj::AnnotationsType, THREAD) Annotations();
 }
 
 // helper
@@ -74,6 +76,13 @@
   }
 }
 
+void Annotations::metaspace_pointers_do(MetaspaceClosure* it) {
+  log_trace(cds)("Iter(Annotations): %p", this);
+  it->push(&_class_annotations);
+  it->push(&_fields_annotations);
+  it->push(&_class_type_annotations);
+  it->push(&_fields_type_annotations); // FIXME: need a test case where _fields_type_annotations != NULL
+}
 
 void Annotations::print_value_on(outputStream* st) const {
   st->print("Anotations(" INTPTR_FORMAT ")", p2i(this));
--- a/hotspot/src/share/vm/oops/annotations.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/annotations.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -43,6 +43,8 @@
 // a type_annotation instance.
 
 class Annotations: public MetaspaceObj {
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to Annotations::metaspace_pointers_do().
 
   // Annotations for this class, or null if none.
   AnnotationArray*             _class_annotations;
@@ -63,6 +65,10 @@
 
   // Sizing (in words)
   static int size()    { return sizeof(Annotations) / wordSize; }
+
+  // Annotations should be stored in the read-only region of CDS archive.
+  static bool is_read_only_by_default() { return true; }
+
 #if INCLUDE_SERVICES
   void collect_statistics(KlassSizeStats *sz) const;
 #endif
@@ -87,6 +93,9 @@
   static typeArrayOop make_java_array(AnnotationArray* annotations, TRAPS);
 
   bool is_klass() const { return false; }
+  void metaspace_pointers_do(MetaspaceClosure* it);
+  MetaspaceObj::Type type() const { return AnnotationsType; }
+
  private:
   static julong count_bytes(Array<AnnotationArray*>* p);
  public:
--- a/hotspot/src/share/vm/oops/array.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/array.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -36,6 +36,7 @@
 template <typename T>
 class Array: public MetaspaceObj {
   friend class MetadataFactory;
+  friend class MetaspaceShared;
   friend class VMStructs;
   friend class JVMCIVMStructs;
   friend class MethodHandleCompiler;           // special case
@@ -53,13 +54,16 @@
   Array(const Array<T>&);
   void operator=(const Array<T>&);
 
-  void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() {
+  void* operator new(size_t size, ClassLoaderData* loader_data, int length, TRAPS) throw() {
     size_t word_size = Array::size(length);
-    return (void*) Metaspace::allocate(loader_data, word_size, read_only,
+    return (void*) Metaspace::allocate(loader_data, word_size,
                                        MetaspaceObj::array_type(sizeof(T)), THREAD);
   }
 
-  static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
+  static size_t byte_sizeof(int length, size_t elm_byte_size) {
+    return sizeof(Array<T>) + MAX2(length - 1, 0) * elm_byte_size;
+  }
+  static size_t byte_sizeof(int length) { return byte_sizeof(length, sizeof(T)); }
 
   // WhiteBox API helper.
   // Can't distinguish between array of length 0 and length 1,
@@ -130,6 +134,9 @@
 
     return (int)words;
   }
+  static int size(int length, int elm_byte_size) {
+    return align_size_up(byte_sizeof(length, elm_byte_size), BytesPerWord) / BytesPerWord; // FIXME
+  }
 
   int size() {
     return size(_length);
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -29,6 +29,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "jvmtifiles/jvmti.h"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/arrayKlass.hpp"
@@ -173,6 +174,17 @@
   return JVMTI_CLASS_STATUS_ARRAY;
 }
 
+void ArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
+  Klass::metaspace_pointers_do(it);
+
+  ResourceMark rm;
+  log_trace(cds)("Iter(ArrayKlass): %p (%s)", this, external_name());
+
+  // need to cast away volatile
+  it->push((Klass**)&_higher_dimension);
+  it->push((Klass**)&_lower_dimension);
+}
+
 void ArrayKlass::remove_unshareable_info() {
   Klass::remove_unshareable_info();
 }
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -36,6 +36,8 @@
 class ArrayKlass: public Klass {
   friend class VMStructs;
  private:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to ArrayKlass::metaspace_pointers_do().
   int      _dimension;         // This is n'th-dimensional array.
   Klass* volatile _higher_dimension;  // Refers the (n+1)'th-dimensional array (if present).
   Klass* volatile _lower_dimension;   // Refers the (n-1)'th-dimensional array (if present).
@@ -102,6 +104,8 @@
   // Sizing
   static int static_size(int header_size);
 
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+
 #if INCLUDE_SERVICES
   virtual void collect_statistics(KlassSizeStats *sz) const {
     Klass::collect_statistics(sz);
--- a/hotspot/src/share/vm/oops/constMethod.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/constMethod.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 #include "interpreter/interpreter.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/constMethod.hpp"
 #include "oops/method.hpp"
@@ -42,7 +43,7 @@
                                    MethodType method_type,
                                    TRAPS) {
   int size = ConstMethod::size(byte_code_size, sizes);
-  return new (loader_data, size, true, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
+  return new (loader_data, size, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
       byte_code_size, sizes, method_type, size);
 }
 
@@ -402,6 +403,25 @@
   }
 }
 
+void ConstMethod::metaspace_pointers_do(MetaspaceClosure* it) {
+  log_trace(cds)("Iter(ConstMethod): %p", this);
+
+  it->push(&_constants);
+  it->push(&_stackmap_data);
+  if (has_method_annotations()) {
+    it->push(method_annotations_addr());
+  }
+  if (has_parameter_annotations()) {
+      it->push(parameter_annotations_addr());
+  }
+  if (has_type_annotations()) {
+      it->push(type_annotations_addr());
+  }
+  if (has_default_annotations()) {
+      it->push(default_annotations_addr());
+  }
+}
+
 // Printing
 
 void ConstMethod::print_on(outputStream* st) const {
--- a/hotspot/src/share/vm/oops/constMethod.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/constMethod.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -198,6 +198,9 @@
   // multiple threads, so is volatile.
   volatile uint64_t _fingerprint;
 
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to ConstMethod::metaspace_pointers_do().
+
   ConstantPool*     _constants;                  // Constant pool
 
   // Raw stackmap data for the method
@@ -369,6 +372,10 @@
 
   int size() const                    { return _constMethod_size;}
   void set_constMethod_size(int size)     { _constMethod_size = size; }
+
+  // ConstMethods should be stored in the read-only region of CDS archive.
+  static bool is_read_only_by_default() { return true; }
+
 #if INCLUDE_SERVICES
   void collect_statistics(KlassSizeStats *sz) const;
 #endif
@@ -529,6 +536,8 @@
   bool is_klass() const { return false; }
   DEBUG_ONLY(bool on_stack() { return false; })
 
+  void metaspace_pointers_do(MetaspaceClosure* it);
+  MetaspaceObj::Type type() const { return ConstMethodType; }
 private:
   // Since the size of the compressed line number table is unknown, the
   // offsets of the other variable sized sections are computed backwards
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -32,6 +32,7 @@
 #include "interpreter/linkResolver.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/constantPool.hpp"
@@ -48,9 +49,9 @@
 #include "utilities/copy.hpp"
 
 ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
-  Array<u1>* tags = MetadataFactory::new_writeable_array<u1>(loader_data, length, 0, CHECK_NULL);
+  Array<u1>* tags = MetadataFactory::new_array<u1>(loader_data, length, 0, CHECK_NULL);
   int size = ConstantPool::size(length);
-  return new (loader_data, size, true, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
+  return new (loader_data, size, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
 }
 
 #ifdef ASSERT
@@ -108,6 +109,26 @@
   unreference_symbols();
 }
 
+void ConstantPool::metaspace_pointers_do(MetaspaceClosure* it) {
+  log_trace(cds)("Iter(ConstantPool): %p", this);
+
+  it->push(&_tags, MetaspaceClosure::_writable);
+  it->push(&_cache);
+  it->push(&_pool_holder);
+  it->push(&_operands);
+  it->push(&_resolved_klasses, MetaspaceClosure::_writable);
+
+  for (int i = 0; i < length(); i++) {
+    // The only MSO's embedded in the CP entries are Symbols:
+    //   JVM_CONSTANT_String (normal and pseudo)
+    //   JVM_CONSTANT_Utf8
+    constantTag ctag = tag_at(i);
+    if (ctag.is_string() || ctag.is_utf8()) {
+      it->push(symbol_at_addr(i));
+    }
+  }
+}
+
 objArrayOop ConstantPool::resolved_references() const {
   return (objArrayOop)JNIHandles::resolve(_cache->resolved_references());
 }
@@ -154,7 +175,7 @@
   // UnresolvedKlass entries that are temporarily created during class redefinition.
   assert(num_klasses < CPKlassSlot::_temp_resolved_klass_index, "sanity");
   assert(resolved_klasses() == NULL, "sanity");
-  Array<Klass*>* rk = MetadataFactory::new_writeable_array<Klass*>(loader_data, num_klasses, CHECK);
+  Array<Klass*>* rk = MetadataFactory::new_array<Klass*>(loader_data, num_klasses, CHECK);
   set_resolved_klasses(rk);
 }
 
--- a/hotspot/src/share/vm/oops/constantPool.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/constantPool.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -99,6 +99,8 @@
   friend class BytecodeInterpreter;  // Directly extracts a klass in the pool for fast instanceof/checkcast
   friend class Universe;             // For null constructor
  private:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to ConstantPool::metaspace_pointers_do().
   Array<u1>*           _tags;        // the tag array describing the constant pool's contents
   ConstantPoolCache*   _cache;       // the cache holding interpreter runtime information
   InstanceKlass*       _pool_holder; // the corresponding class
@@ -212,6 +214,9 @@
   ConstantPoolCache* cache() const        { return _cache; }
   void set_cache(ConstantPoolCache* cache){ _cache = cache; }
 
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+  virtual MetaspaceObj::Type type() const { return ConstantPoolType; }
+
   // Create object cache in the constant pool
   void initialize_resolved_references(ClassLoaderData* loader_data,
                                       const intStack& reference_map,
@@ -765,6 +770,9 @@
   void collect_statistics(KlassSizeStats *sz) const;
 #endif
 
+  // ConstantPools should be stored in the read-only region of CDS archive.
+  static bool is_read_only_by_default() { return true; }
+
   friend class ClassFileParser;
   friend class SystemDictionary;
 
--- a/hotspot/src/share/vm/oops/cpCache.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/cpCache.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -26,6 +26,7 @@
 #include "interpreter/interpreter.hpp"
 #include "interpreter/rewriter.hpp"
 #include "logging/log.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/cpCache.hpp"
@@ -566,7 +567,7 @@
   const int length = index_map.length() + invokedynamic_index_map.length();
   int size = ConstantPoolCache::size(length);
 
-  return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
+  return new (loader_data, size, MetaspaceObj::ConstantPoolCacheType, THREAD)
     ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
 }
 
@@ -652,6 +653,11 @@
 }
 #endif // INCLUDE_JVMTI
 
+void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) {
+  log_trace(cds)("Iter(ConstantPoolCache): %p", this);
+  it->push(&_constant_pool);
+  it->push(&_reference_map);
+}
 
 // Printing
 
--- a/hotspot/src/share/vm/oops/cpCache.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/cpCache.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -404,6 +404,8 @@
   friend class VMStructs;
   friend class MetadataFactory;
  private:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to ConstantPoolCache::metaspace_pointers_do().
   int             _length;
   ConstantPool*   _constant_pool;          // the corresponding constant pool
 
@@ -443,6 +445,8 @@
   bool is_constantPoolCache() const { return true; }
 
   int length() const                             { return _length; }
+  void metaspace_pointers_do(MetaspaceClosure* it);
+  MetaspaceObj::Type type() const                { return ConstantPoolCacheType; }
 
   jobject resolved_references()           { return _resolved_references; }
   void set_resolved_references(jobject s) { _resolved_references = s; }
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -46,6 +46,7 @@
 #include "memory/heapInspection.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
@@ -1998,6 +1999,49 @@
   }
 }
 
+void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) {
+  Klass::metaspace_pointers_do(it);
+
+  if (log_is_enabled(Trace, cds)) {
+    ResourceMark rm;
+    log_trace(cds)("Iter(InstanceKlass): %p (%s)", this, external_name());
+  }
+
+  it->push(&_annotations);
+  it->push((Klass**)&_array_klasses);
+  it->push(&_constants);
+  it->push(&_inner_classes);
+  it->push(&_array_name);
+#if INCLUDE_JVMTI
+  it->push(&_previous_versions);
+#endif
+  it->push(&_methods);
+  it->push(&_default_methods);
+  it->push(&_local_interfaces);
+  it->push(&_transitive_interfaces);
+  it->push(&_method_ordering);
+  it->push(&_default_vtable_indices);
+  it->push(&_fields);
+
+  if (itable_length() > 0) {
+    itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
+    int method_table_offset_in_words = ioe->offset()/wordSize;
+    int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
+                         / itableOffsetEntry::size();
+
+    for (int i = 0; i < nof_interfaces; i ++, ioe ++) {
+      if (ioe->interface_klass() != NULL) {
+        it->push(ioe->interface_klass_addr());
+        itableMethodEntry* ime = ioe->first_method_entry(this);
+        int n = klassItable::method_count_for_interface(ioe->interface_klass());
+        for (int index = 0; index < n; index ++) {
+          it->push(ime[index].method_addr());
+        }
+      }
+    }
+  }
+}
+
 void InstanceKlass::remove_unshareable_info() {
   Klass::remove_unshareable_info();
 
@@ -2018,12 +2062,26 @@
 
   constants()->remove_unshareable_info();
 
-  assert(_dep_context == DependencyContext::EMPTY, "dependency context is not shareable");
-
   for (int i = 0; i < methods()->length(); i++) {
     Method* m = methods()->at(i);
     m->remove_unshareable_info();
   }
+
+  // These are not allocated from metaspace, but they should should all be empty
+  // during dump time, so we don't need to worry about them in InstanceKlass::metaspace_pointers_do().
+  guarantee(_source_debug_extension == NULL, "must be");
+  guarantee(_oop_map_cache == NULL, "must be");
+  guarantee(_init_thread == NULL, "must be");
+  guarantee(_oop_map_cache == NULL, "must be");
+  guarantee(_jni_ids == NULL, "must be");
+  guarantee(_methods_jmethod_ids == NULL, "must be");
+  guarantee(_dep_context == DependencyContext::EMPTY, "must be");
+  guarantee(_osr_nmethods_head == NULL, "must be");
+
+#if INCLUDE_JVMTI
+  guarantee(_breakpoints == NULL, "must be");
+  guarantee(_previous_versions == NULL, "must be");
+#endif
 }
 
 static void restore_unshareable_in_class(Klass* k, TRAPS) {
@@ -3664,11 +3722,15 @@
 
 #if INCLUDE_CDS
 JvmtiCachedClassFileData* InstanceKlass::get_archived_class_data() {
-  assert(this->is_shared(), "class should be shared");
-  if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
+  if (DumpSharedSpaces) {
     return _cached_class_file;
   } else {
-    return NULL;
+    assert(this->is_shared(), "class should be shared");
+    if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
+      return _cached_class_file;
+    } else {
+      return NULL;
+    }
   }
 }
 #endif
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -142,6 +142,9 @@
   static InstanceKlass* allocate_instance_klass(const ClassFileParser& parser, TRAPS);
 
  protected:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to InstanceKlass::metaspace_pointers_do().
+
   // Annotations for this class
   Annotations*    _annotations;
   // Package this class is defined in
@@ -1341,6 +1344,8 @@
   // JVMTI support
   jint jvmti_class_status() const;
 
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+
  public:
   // Printing
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/oops/klass.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/klass.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -31,6 +31,8 @@
 #include "logging/log.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
@@ -162,8 +164,7 @@
 }
 
 void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
-  return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
-                             MetaspaceObj::ClassType, THREAD);
+  return Metaspace::allocate(loader_data, word_size, MetaspaceObj::ClassType, THREAD);
 }
 
 // "Normal" instantiation is preceeded by a MetaspaceObj allocation
@@ -485,6 +486,29 @@
   cl->do_oop(&_java_mirror);
 }
 
+void Klass::metaspace_pointers_do(MetaspaceClosure* it) {
+  if (log_is_enabled(Trace, cds)) {
+    ResourceMark rm;
+    log_trace(cds)("Iter(Klass): %p (%s)", this, external_name());
+  }
+
+  it->push(&_name);
+  it->push(&_secondary_super_cache);
+  it->push(&_secondary_supers);
+  for (int i = 0; i < _primary_super_limit; i++) {
+    it->push(&_primary_supers[i]);
+  }
+  it->push(&_super);
+  it->push(&_subklass);
+  it->push(&_next_sibling);
+  it->push(&_next_link);
+
+  vtableEntry* vt = start_of_vtable();
+  for (int i=0; i<vtable_length(); i++) {
+    it->push(vt[i].method_addr());
+  }
+}
+
 void Klass::remove_unshareable_info() {
   assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
   TRACE_REMOVE_ID(this);
--- a/hotspot/src/share/vm/oops/klass.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/klass.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -65,6 +65,9 @@
   friend class VMStructs;
   friend class JVMCIVMStructs;
  protected:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to Klass::metaspace_pointers_do().
+
   // note: put frequently-used fields together at start of klass structure
   // for better cache behavior (may not make much of a difference but sure won't hurt)
   enum { _primary_super_limit = 8 };
@@ -597,6 +600,9 @@
   // garbage collection support
   void oops_do(OopClosure* cl);
 
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+  virtual MetaspaceObj::Type type() const { return ClassType; }
+
   // Iff the class loader (or mirror for anonymous classes) is alive the
   // Klass is considered alive.
   // The is_alive closure passed in depends on the Garbage Collector used.
--- a/hotspot/src/share/vm/oops/klassVtable.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/klassVtable.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -196,6 +196,7 @@
 
   static int method_offset_in_bytes() { return offset_of(vtableEntry, _method); }
   Method* method() const    { return _method; }
+  Method** method_addr()    { return &_method; }
 
  private:
   Method* _method;
@@ -236,6 +237,7 @@
   int      _offset;
  public:
   Klass* interface_klass() const { return _interface; }
+  Klass**interface_klass_addr()  { return &_interface; }
   int      offset() const          { return _offset; }
 
   static itableMethodEntry* method_entry(Klass* k, int offset) { return (itableMethodEntry*)(((address)k) + offset); }
@@ -258,6 +260,7 @@
 
  public:
   Method* method() const { return _method; }
+  Method**method_addr() { return &_method; }
 
   void clear()             { _method = NULL; }
 
--- a/hotspot/src/share/vm/oops/metadata.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/metadata.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -48,8 +48,10 @@
   virtual bool is_methodData()         const volatile { return false; }
   virtual bool is_constantPool()       const volatile { return false; }
   virtual bool is_methodCounters()     const volatile { return false; }
-
+  virtual int  size()                  const = 0;
+  virtual MetaspaceObj::Type type()    const = 0;
   virtual const char* internal_name()  const = 0;
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter) {}
 
   void print()       const { print_on(tty); }
   void print_value() const { print_value_on(tty); }
--- a/hotspot/src/share/vm/oops/method.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/method.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -37,6 +37,7 @@
 #include "interpreter/oopMapCache.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
@@ -79,7 +80,7 @@
                                           method_type,
                                           CHECK_NULL);
   int size = Method::size(access_flags.is_native());
-  return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags);
+  return new (loader_data, size, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags);
 }
 
 Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
@@ -305,6 +306,14 @@
 }
 
 
+void Method::metaspace_pointers_do(MetaspaceClosure* it) {
+  log_trace(cds)("Iter(Method): %p", this);
+
+  it->push(&_constMethod);
+  it->push(&_method_data);
+  it->push(&_method_counters);
+}
+
 // Attempt to return method oop to original state.  Clear any pointers
 // (to objects outside the shared spaces).  We won't be able to predict
 // where they should point in a new JVM.  Further initialize some
--- a/hotspot/src/share/vm/oops/method.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/method.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -66,6 +66,8 @@
  friend class VMStructs;
  friend class JVMCIVMStructs;
  private:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to Method::metaspace_pointers_do().
   ConstMethod*      _constMethod;                // Method read-only data.
   MethodData*       _method_data;
   MethodCounters*   _method_counters;
@@ -471,6 +473,9 @@
   // clear entry points. Used by sharing code during dump time
   void unlink_method() NOT_CDS_RETURN;
 
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+  virtual MetaspaceObj::Type type() const { return MethodType; }
+
   // vtable index
   enum VtableIndexFlag {
     // Valid vtable indexes are non-negative (>= 0).
--- a/hotspot/src/share/vm/oops/methodCounters.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/methodCounters.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -22,12 +22,13 @@
  *
  */
 #include "precompiled.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "oops/methodCounters.hpp"
 #include "runtime/handles.inline.hpp"
 
 MethodCounters* MethodCounters::allocate(const methodHandle& mh, TRAPS) {
   ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
-  return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
+  return new(loader_data, method_counters_size(), MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
 }
 
 void MethodCounters::clear_counters() {
@@ -73,6 +74,12 @@
 #endif
 }
 
+void MethodCounters::metaspace_pointers_do(MetaspaceClosure* it) {
+  log_trace(cds)("Iter(MethodCounters): %p", this);
+#if INCLUDE_AOT
+  it->push(&_method);
+#endif
+}
 
 void MethodCounters::print_value_on(outputStream* st) const {
   assert(is_methodCounters(), "must be methodCounters");
--- a/hotspot/src/share/vm/oops/methodCounters.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/methodCounters.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -35,6 +35,8 @@
  friend class VMStructs;
  friend class JVMCIVMStructs;
  private:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to MethodCounters::metaspace_pointers_do().
 #if INCLUDE_AOT
   Method*           _method;                     // Back link to Method
 #endif
@@ -118,10 +120,14 @@
 
   AOT_ONLY(Method* method() const { return _method; })
 
-  static int size() {
+  static int method_counters_size() {
     return align_up((int)sizeof(MethodCounters), wordSize) / wordSize;
   }
-
+  virtual int size() const {
+    return method_counters_size();
+  }
+  void metaspace_pointers_do(MetaspaceClosure* it);
+  MetaspaceObj::Type type() const { return MethodCountersType; }
   void clear_counters();
 
 #if defined(COMPILER2) || INCLUDE_JVMCI
--- a/hotspot/src/share/vm/oops/methodData.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -29,6 +29,7 @@
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "memory/heapInspection.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/methodData.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
@@ -715,7 +716,7 @@
 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
   int size = MethodData::compute_allocation_size_in_words(method);
 
-  return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
+  return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
     MethodData(method(), size, THREAD);
 }
 
@@ -1634,6 +1635,11 @@
   return m->is_compiled_lambda_form();
 }
 
+void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
+  log_trace(cds)("Iter(MethodData): %p", this);
+  it->push(&_method);
+}
+
 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
   if (shift == 0) {
     return;
--- a/hotspot/src/share/vm/oops/methodData.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -2154,6 +2154,9 @@
   friend class ProfileData;
   friend class TypeEntriesAtCall;
 
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to MethodData::metaspace_pointers_do().
+
   // Back pointer to the Method*
   Method* _method;
 
@@ -2591,6 +2594,9 @@
     return byte_offset_of(MethodData, _parameters_type_data_di);
   }
 
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+  virtual MetaspaceObj::Type type() const { return MethodDataType; }
+
   // Deallocation support - no pointer fields to deallocate
   void deallocate_contents(ClassLoaderData* loader_data) {}
 
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -32,6 +32,7 @@
 #include "gc/shared/specialized_oop_closures.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/arrayKlass.inline.hpp"
@@ -398,6 +399,12 @@
   bottom_klass()->initialize(THREAD);  // dispatches to either InstanceKlass or TypeArrayKlass
 }
 
+void ObjArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
+  ArrayKlass::metaspace_pointers_do(it);
+  it->push(&_element_klass);
+  it->push(&_bottom_klass);
+}
+
 // JVM support
 
 jint ObjArrayKlass::compute_modifier_flags(TRAPS) const {
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -35,6 +35,8 @@
   friend class VMStructs;
   friend class JVMCIVMStructs;
  private:
+  // If you add a new field that points to any metaspace object, you
+  // must add this field to ObjArrayKlass::metaspace_pointers_do().
   Klass* _element_klass;            // The klass of the elements of this array type
   Klass* _bottom_klass;             // The one-dimensional type (InstanceKlass or TypeArrayKlass)
 
@@ -80,6 +82,8 @@
   // Compute protection domain
   oop protection_domain() const { return bottom_klass()->protection_domain(); }
 
+  virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+
  private:
   // Either oop or narrowOop depending on UseCompressedOops.
   // must be called from within ObjArrayKlass.cpp
--- a/hotspot/src/share/vm/oops/symbol.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/symbol.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,8 @@
 #include "precompiled.hpp"
 #include "classfile/altHashing.hpp"
 #include "classfile/classLoaderData.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/symbol.hpp"
@@ -53,13 +55,6 @@
   return res;
 }
 
-void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() {
-  address res;
-  res = (address) Metaspace::allocate(loader_data, size(len), true,
-                                      MetaspaceObj::SymbolType, CHECK_NULL);
-  return res;
-}
-
 void Symbol::operator delete(void *p) {
   assert(((Symbol*)p)->refcount() == 0, "should not call this");
   FreeHeap(p);
@@ -235,6 +230,15 @@
   }
 }
 
+void Symbol::metaspace_pointers_do(MetaspaceClosure* it) {
+  if (log_is_enabled(Trace, cds)) {
+    LogStream trace_stream(Log(cds)::trace());
+    trace_stream.print("Iter(Symbol): %p ", this);
+    print_value_on(&trace_stream);
+    trace_stream.cr();
+  }
+}
+
 void Symbol::print_on(outputStream* st) const {
   if (this == NULL) {
     st->print_cr("NULL");
--- a/hotspot/src/share/vm/oops/symbol.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/oops/symbol.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -119,9 +119,13 @@
     max_symbol_length = (1 << 16) -1
   };
 
+  static int byte_size(int length) {
+    // minimum number of natural words needed to hold these bits (no non-heap version)
+    return (int)(sizeof(Symbol) + (length > 2 ? length - 2 : 0));
+  }
   static int size(int length) {
     // minimum number of natural words needed to hold these bits (no non-heap version)
-    return (int)heap_word_size(sizeof(Symbol) + (length > 2 ? length - 2 : 0));
+    return (int)heap_word_size(byte_size(length));
   }
 
   void byte_at_put(int index, int value) {
@@ -141,6 +145,10 @@
   const jbyte* base() const { return &_body[0]; }
 
   int size()                { return size(utf8_length()); }
+  int byte_size()           { return byte_size(utf8_length()); }
+
+  // Symbols should be stored in the read-only region of CDS archive.
+  static bool is_read_only_by_default() { return true; }
 
   // Returns the largest size symbol we can safely hold.
   static int max_length() { return max_symbol_length; }
@@ -164,6 +172,9 @@
       _refcount = PERM_REFCOUNT;
     }
   }
+  bool is_permanent() {
+    return (_refcount == PERM_REFCOUNT);
+  }
 
   int byte_at(int index) const {
     assert(index >=0 && index < _length, "symbol index overflow");
@@ -227,6 +238,9 @@
   const char* as_klass_external_name() const;
   const char* as_klass_external_name(char* buf, int size) const;
 
+  void metaspace_pointers_do(MetaspaceClosure* it);
+  MetaspaceObj::Type type() const { return SymbolType; }
+
   // Printing
   void print_symbol_on(outputStream* st = NULL) const;
   void print_utf8_on(outputStream* st) const;
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -1454,7 +1454,7 @@
       ? java_lang_ClassLoader::loader_data(class_loader_oop)
       : ClassLoaderData::the_null_class_loader_data();
 
-  void* metadata = MetadataFactory::new_writeable_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
+  void* metadata = MetadataFactory::new_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
 
   return (jlong)(uintptr_t)metadata;
 WB_END
@@ -1553,6 +1553,9 @@
   return MetaspaceGC::should_concurrent_collect();
 WB_END
 
+WB_ENTRY(jlong, WB_MetaspaceReserveAlignment(JNIEnv* env, jobject wb))
+  return (jlong)Metaspace::reserve_alignment();
+WB_END
 
 WB_ENTRY(void, WB_AssertMatchingSafepointCalls(JNIEnv* env, jobject o, jboolean mutexSafepointValue, jboolean attemptedNoSafepointValue))
   Monitor::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
@@ -1937,6 +1940,7 @@
   {CC"incMetaspaceCapacityUntilGC", CC"(J)J",         (void*)&WB_IncMetaspaceCapacityUntilGC },
   {CC"metaspaceCapacityUntilGC", CC"()J",             (void*)&WB_MetaspaceCapacityUntilGC },
   {CC"metaspaceShouldConcurrentCollect", CC"()Z",     (void*)&WB_MetaspaceShouldConcurrentCollect },
+  {CC"metaspaceReserveAlignment", CC"()J",            (void*)&WB_MetaspaceReserveAlignment },
   {CC"getCPUFeatures",     CC"()Ljava/lang/String;",  (void*)&WB_GetCPUFeatures     },
   {CC"getNMethod0",         CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
                                                       (void*)&WB_GetNMethod         },
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -130,36 +130,3 @@
     return Flag::SUCCESS;
   }
 }
-
-static inline Flag::Error sharedConstraintFunc(const char *name, size_t value, size_t taken, bool verbose) {
-  size_t available = (MAX_SHARED_DELTA-(taken+SHARED_PAGE));
-  if (value > available) {
-    CommandLineError::print(verbose,
-                            "%s (" SIZE_FORMAT ") must be "
-                            "smaller than or equal to (" SIZE_FORMAT ")\n",
-                            name, value, available);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error SharedReadWriteSizeConstraintFunc(size_t value, bool verbose) {
-  size_t taken = (SharedReadOnlySize+SharedMiscDataSize+SharedMiscCodeSize);
-  return sharedConstraintFunc("SharedReadWriteSize", value, taken, verbose);
-}
-
-Flag::Error SharedReadOnlySizeConstraintFunc(size_t value, bool verbose) {
-  size_t taken = (SharedReadWriteSize+SharedMiscDataSize+SharedMiscCodeSize);
-  return sharedConstraintFunc("SharedReadOnlySize", value, taken, verbose);
-}
-
-Flag::Error SharedMiscDataSizeConstraintFunc(size_t value, bool verbose) {
-  size_t taken = (SharedReadWriteSize+SharedReadOnlySize+SharedMiscCodeSize);
-  return sharedConstraintFunc("SharedMiscDataSize", value, taken, verbose);
-}
-
-Flag::Error SharedMiscCodeSizeConstraintFunc(size_t value, bool verbose) {
-  size_t taken = (SharedReadWriteSize+SharedReadOnlySize+SharedMiscDataSize);
-  return sharedConstraintFunc("SharedMiscCodeSize", value, taken, verbose);
-}
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -45,9 +45,4 @@
 
 Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose);
 
-Flag::Error SharedReadWriteSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error SharedReadOnlySizeConstraintFunc(size_t value, bool verbose);
-Flag::Error SharedMiscDataSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error SharedMiscCodeSizeConstraintFunc(size_t value, bool verbose);
-
 #endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */
--- a/hotspot/src/share/vm/runtime/globals.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -3900,25 +3900,17 @@
           "If PrintSharedArchiveAndExit is true, also print the shared "    \
           "dictionary")                                                     \
                                                                             \
-  product(size_t, SharedReadWriteSize, DEFAULT_SHARED_READ_WRITE_SIZE,      \
-          "Size of read-write space for metadata (in bytes)")               \
-          range(MIN_SHARED_READ_WRITE_SIZE, MAX_SHARED_READ_WRITE_SIZE)     \
-          constraint(SharedReadWriteSizeConstraintFunc,AfterErgo)           \
-                                                                            \
-  product(size_t, SharedReadOnlySize, DEFAULT_SHARED_READ_ONLY_SIZE,        \
-          "Size of read-only space for metadata (in bytes)")                \
-          range(MIN_SHARED_READ_ONLY_SIZE, MAX_SHARED_READ_ONLY_SIZE)       \
-          constraint(SharedReadOnlySizeConstraintFunc,AfterErgo)            \
-                                                                            \
-  product(size_t, SharedMiscDataSize, DEFAULT_SHARED_MISC_DATA_SIZE,        \
-          "Size of the shared miscellaneous data area (in bytes)")          \
-          range(MIN_SHARED_MISC_DATA_SIZE, MAX_SHARED_MISC_DATA_SIZE)       \
-          constraint(SharedMiscDataSizeConstraintFunc,AfterErgo)            \
-                                                                            \
-  product(size_t, SharedMiscCodeSize, DEFAULT_SHARED_MISC_CODE_SIZE,        \
-          "Size of the shared miscellaneous code area (in bytes)")          \
-          range(MIN_SHARED_MISC_CODE_SIZE, MAX_SHARED_MISC_CODE_SIZE)       \
-          constraint(SharedMiscCodeSizeConstraintFunc,AfterErgo)            \
+  product(size_t, SharedReadWriteSize, 0,                                   \
+          "Deprecated")                                                     \
+                                                                            \
+  product(size_t, SharedReadOnlySize, 0,                                    \
+          "Deprecated")                                                     \
+                                                                            \
+  product(size_t, SharedMiscDataSize,  0,                                   \
+          "Deprecated")                                                     \
+                                                                            \
+  product(size_t, SharedMiscCodeSize,  0,                                   \
+          "Deprecated")                                                     \
                                                                             \
   product(size_t, SharedBaseAddress, LP64_ONLY(32*G)                        \
           NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)),                           \
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -3111,8 +3111,8 @@
 
 void CDSAdapterHandlerEntry::init() {
   assert(DumpSharedSpaces, "used during dump time only");
-  _c2i_entry_trampoline = (address)MetaspaceShared::misc_data_space_alloc(SharedRuntime::trampoline_size());
-  _adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_data_space_alloc(sizeof(AdapterHandlerEntry*));
+  _c2i_entry_trampoline = (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size());
+  _adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*));
 };
 
 #endif // INCLUDE_CDS
--- a/hotspot/src/share/vm/utilities/debug.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -275,42 +275,6 @@
 #endif // !PRODUCT
 }
 
-void report_out_of_shared_space(SharedSpaceType shared_space) {
-  if (shared_space == SharedOptional) {
-    // The estimated shared_optional_space size is large enough
-    // for all class bytes.  It should not run out of space.
-    ShouldNotReachHere();
-  }
-
-  static const char* name[] = {
-    "shared read only space",
-    "shared read write space",
-    "shared miscellaneous data space",
-    "shared miscellaneous code space"
-  };
-  static const char* flag[] = {
-    "SharedReadOnlySize",
-    "SharedReadWriteSize",
-    "SharedMiscDataSize",
-    "SharedMiscCodeSize"
-  };
-
-   warning("\nThe %s is not large enough\n"
-           "to preload requested classes. Use -XX:%s=<size>\n"
-           "to increase the initial size of %s.\n",
-           name[shared_space], flag[shared_space], name[shared_space]);
-   exit(2);
-}
-
-
-void report_insufficient_metaspace(size_t required_size) {
-  warning("\nThe MaxMetaspaceSize of " SIZE_FORMAT " bytes is not large enough.\n"
-          "Either don't specify the -XX:MaxMetaspaceSize=<size>\n"
-          "or increase the size to at least " SIZE_FORMAT ".\n",
-          MaxMetaspaceSize, required_size);
-  exit(2);
-}
-
 void report_java_out_of_memory(const char* message) {
   static jint out_of_memory_reported = 0;
 
--- a/hotspot/src/share/vm/utilities/debug.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/utilities/debug.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -184,19 +184,6 @@
   typedef char PASTE_TOKENS(STATIC_ASSERT_DUMMY_TYPE_, __LINE__)[ \
     STATIC_ASSERT_FAILURE< (Cond) >::value ]
 
-// out of shared space reporting
-enum SharedSpaceType {
-  SharedReadOnly,
-  SharedReadWrite,
-  SharedMiscData,
-  SharedMiscCode,
-  SharedOptional
-};
-
-void report_out_of_shared_space(SharedSpaceType space_type);
-
-void report_insufficient_metaspace(size_t required_size);
-
 // out of memory reporting
 void report_java_out_of_memory(const char* message);
 
--- a/hotspot/src/share/vm/utilities/hashtable.cpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/utilities/hashtable.cpp	Wed Aug 02 18:06:38 2017 -0700
@@ -198,30 +198,39 @@
   }
   Atomic::add(-context->_num_removed, &_number_of_entries);
 }
-
 // Copy the table to the shared space.
-
-template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
+template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_table() {
+  size_t bytes = 0;
+  bytes += sizeof(intptr_t); // len
 
-  // Dump the hash table entries.
+  for (int i = 0; i < _table_size; ++i) {
+    for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
+         *p != NULL;
+         p = (*p)->next_addr()) {
+      bytes += entry_size();
+    }
+  }
 
-  intptr_t *plen = (intptr_t*)(*top);
-  *top += sizeof(*plen);
+  return bytes;
+}
+
+// Dump the hash table entries (into CDS archive)
+template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char* top, char* end) {
+  assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
+  intptr_t *plen = (intptr_t*)(top);
+  top += sizeof(*plen);
 
   int i;
   for (i = 0; i < _table_size; ++i) {
     for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
-                              *p != NULL;
-                               p = (*p)->next_addr()) {
-      if (*top + entry_size() > end) {
-        report_out_of_shared_space(SharedMiscData);
-      }
-      *p = (BasicHashtableEntry<F>*)memcpy(*top, (void*)*p, entry_size());
-      *top += entry_size();
+         *p != NULL;
+         p = (*p)->next_addr()) {
+      *p = (BasicHashtableEntry<F>*)memcpy(top, (void*)*p, entry_size());
+      top += entry_size();
     }
   }
-  *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
-
+  *plen = (char*)(top) - (char*)plen - sizeof(*plen);
+  assert(top == end, "count_bytes_for_table is wrong");
   // Set the shared bit.
 
   for (i = 0; i < _table_size; ++i) {
@@ -272,7 +281,7 @@
   for (int i = 0; i < this->table_size(); ++i) {
     int count = 0;
     for (HashtableEntry<T, F>* e = this->bucket(i);
-       e != NULL; e = e->next()) {
+         e != NULL; e = e->next()) {
       count++;
       literal_bytes += literal_size(e->literal());
     }
@@ -305,19 +314,29 @@
 
 // Dump the hash table buckets.
 
-template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
-  intptr_t len = _table_size * sizeof(HashtableBucket<F>);
-  *(intptr_t*)(*top) = len;
-  *top += sizeof(intptr_t);
+template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_buckets() {
+  size_t bytes = 0;
+  bytes += sizeof(intptr_t); // len
+  bytes += sizeof(intptr_t); // _number_of_entries
+  bytes += _table_size * sizeof(HashtableBucket<F>); // the buckets
+
+  return bytes;
+}
 
-  *(intptr_t*)(*top) = _number_of_entries;
-  *top += sizeof(intptr_t);
+// Dump the buckets (into CDS archive)
+template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char* top, char* end) {
+  assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
+  intptr_t len = _table_size * sizeof(HashtableBucket<F>);
+  *(intptr_t*)(top) = len;
+  top += sizeof(intptr_t);
 
-  if (*top + len > end) {
-    report_out_of_shared_space(SharedMiscData);
-  }
-  _buckets = (HashtableBucket<F>*)memcpy(*top, (void*)_buckets, len);
-  *top += len;
+  *(intptr_t*)(top) = _number_of_entries;
+  top += sizeof(intptr_t);
+
+  _buckets = (HashtableBucket<F>*)memcpy(top, (void*)_buckets, len);
+  top += len;
+
+  assert(top == end, "count_bytes_for_buckets is wrong");
 }
 
 #ifndef PRODUCT
@@ -397,6 +416,7 @@
 template class HashtableEntry<Symbol*, mtSymbol>;
 template class HashtableEntry<Symbol*, mtClass>;
 template class HashtableEntry<oop, mtSymbol>;
+template class HashtableBucket<mtClass>;
 template class BasicHashtableEntry<mtSymbol>;
 template class BasicHashtableEntry<mtCode>;
 template class BasicHashtable<mtClass>;
--- a/hotspot/src/share/vm/utilities/hashtable.hpp	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/src/share/vm/utilities/hashtable.hpp	Wed Aug 02 18:06:38 2017 -0700
@@ -148,8 +148,10 @@
                  HashtableBucket<F>* buckets, int number_of_entries);
 
   // Sharing support.
-  void copy_buckets(char** top, char* end);
-  void copy_table(char** top, char* end);
+  size_t count_bytes_for_buckets();
+  size_t count_bytes_for_table();
+  void copy_buckets(char* top, char* end);
+  void copy_table(char* top, char* end);
 
   // Bucket handling
   int hash_to_index(unsigned int full_hash) const {
--- a/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Wed Aug 02 18:06:38 2017 -0700
@@ -77,12 +77,6 @@
 
         allOptionsAsMap = JVMOptionsUtils.getOptionsWithRangeAsMap(origin -> (!(origin.contains("develop") || origin.contains("notproduct"))));
 
-        /* Shared flags can cause JVM to exit with error code 2 */
-        setAllowedExitCodes("SharedReadWriteSize", 2);
-        setAllowedExitCodes("SharedReadOnlySize", 2);
-        setAllowedExitCodes("SharedMiscDataSize", 2);
-        setAllowedExitCodes("SharedMiscCodeSize", 2);
-
         /*
          * Remove CICompilerCount from testing because currently it can hang system
          */
--- a/hotspot/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Wed Aug 02 18:06:38 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -232,10 +232,6 @@
             case "NewSizeThreadIncrease":
                 option.addPrepend("-XX:+UseSerialGC");
                 break;
-            case "SharedReadWriteSize":
-            case "SharedReadOnlySize":
-            case "SharedMiscDataSize":
-            case "SharedMiscCodeSize":
             case "SharedBaseAddress":
             case "SharedSymbolTableBucketSize":
                 option.addPrepend("-XX:+UnlockDiagnosticVMOptions");
--- a/hotspot/test/runtime/SharedArchiveFile/LargeSharedSpace.java	Sat Jul 22 15:54:27 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test LargeSharedSpace
- * @bug 8168790 8169870
- * @summary Test CDS dumping using specific space size without crashing.
- * The space size used in the test might not be suitable on windows.
- * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
- * @requires (os.family != "windows")
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @run main LargeSharedSpace
- */
-
-import jdk.test.lib.cds.CDSTestUtils;
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.Platform;
-
-public class LargeSharedSpace {
-    public static void main(String[] args) throws Exception {
-       OutputAnalyzer output;
-
-       // Test case 1: -XX:SharedMiscCodeSize=1066924031
-       //
-       // The archive should be dumped successfully. It might fail to reserve memory
-       // for shared space under low memory condition. The dumping process should not crash.
-       output = CDSTestUtils.createArchive("-XX:SharedMiscCodeSize=1066924031",
-                                           "-XX:+UnlockDiagnosticVMOptions");
-       try {
-           output.shouldContain("Loading classes to share");
-       } catch (RuntimeException e1) {
-           output.shouldContain("Unable to allocate memory for shared space");
-       }
-
-       // Test case 2: -XX:SharedMiscCodeSize=1600386047
-       //
-       // On 64-bit platform, compressed class pointer is used. When the combined
-       // shared space size and the compressed space size is larger than the 4G
-       // compressed klass limit (0x100000000), error is reported.
-       //
-       // The dumping process should not crash.
-       if (Platform.is64bit()) {
-           CDSTestUtils.createArchive(
-               "-XX:+UseCompressedClassPointers", "-XX:CompressedClassSpaceSize=3G",
-               "-XX:SharedMiscCodeSize=1600386047")
-               .shouldContain("larger than compressed klass limit");
-        }
-
-        // Test case 3: -XX:SharedMiscCodeSize=1600386047
-        //
-        // On 32-bit platform, compressed class pointer is not used. It may fail
-        // to reserve memory under low memory condition.
-        //
-        // The dumping process should not crash.
-        if (Platform.is32bit()) {
-            output = CDSTestUtils.createArchive("-XX:SharedMiscCodeSize=1600386047");
-            try {
-                output.shouldContain("Loading classes to share");
-            } catch (RuntimeException e3) {
-                output.shouldContain("Unable to allocate memory for shared space");
-            }
-        }
-    }
-}
--- a/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Sat Jul 22 15:54:27 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,197 +0,0 @@
-/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/* @test LimitSharedSizes
- * @summary Test handling of limits on shared space size
- * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
- * @library /test/lib /runtime/CommandLine/OptionsValidation/common
- * @modules java.base/jdk.internal.misc
- *          java.management
- *          jdk.attach/sun.tools.attach
- * @run main LimitSharedSizes
- */
-
-import jdk.test.lib.cds.CDSTestUtils;
-import jdk.test.lib.process.ProcessTools;
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.Platform;
-import optionsvalidation.JVMOptionsUtils;
-
-public class LimitSharedSizes {
-    static enum Result {
-        OUT_OF_RANGE,
-        TOO_SMALL,
-        VALID,
-        VALID_ARCHIVE
-    }
-
-    static enum Region {
-        RO, RW, MD, MC
-    }
-
-    private static final boolean fitsRange(String name, String value) throws RuntimeException {
-        boolean fits = true;
-        try {
-            fits = JVMOptionsUtils.fitsRange(name, value);
-        } catch (Exception e) {
-            throw new RuntimeException(e.getMessage());
-        }
-        return fits;
-    }
-
-    private static class SharedSizeTestData {
-        public String optionName;
-        public String optionValue;
-        public Result optionResult;
-
-        public SharedSizeTestData(Region region, String value) {
-            optionName = "-XX:"+getName(region);
-            optionValue = value;
-            if (fitsRange(getName(region), value) == false) {
-                optionResult = Result.OUT_OF_RANGE;
-            } else {
-                optionResult = Result.TOO_SMALL;
-            }
-        }
-
-        public SharedSizeTestData(Region region, String value, Result result) {
-            optionName = "-XX:"+getName(region);
-            optionValue = value;
-            optionResult = result;
-        }
-
-        private String getName(Region region) {
-            String name;
-            switch (region) {
-                case RO:
-                    name = "SharedReadOnlySize";
-                    break;
-                case RW:
-                    name = "SharedReadWriteSize";
-                    break;
-                case MD:
-                    name = "SharedMiscDataSize";
-                    break;
-                case MC:
-                    name = "SharedMiscCodeSize";
-                    break;
-                default:
-                    name = "Unknown";
-                    break;
-            }
-            return name;
-        }
-
-        public Result getResult() {
-            return optionResult;
-        }
-    }
-
-    private static final SharedSizeTestData[] testTable = {
-        // Too small of a region size should not cause a vm crash.
-        // It should result in an error message either like the following #1:
-        // The shared miscellaneous code space is not large enough
-        // to preload requested classes. Use -XX:SharedMiscCodeSize=
-        // to increase the initial size of shared miscellaneous code space.
-        // or #2:
-        // The shared miscellaneous code space is outside the allowed range
-        new SharedSizeTestData(Region.RO, "4M"),
-        new SharedSizeTestData(Region.RW, "4M"),
-        new SharedSizeTestData(Region.MD, "50k"),
-        new SharedSizeTestData(Region.MC, "20k"),
-
-        // these values are larger than default ones, and should
-        // be acceptable and not cause failure
-        new SharedSizeTestData(Region.RO, "20M", Result.VALID),
-        new SharedSizeTestData(Region.RW, "20M", Result.VALID),
-        new SharedSizeTestData(Region.MD, "20M", Result.VALID),
-        new SharedSizeTestData(Region.MC, "20M", Result.VALID),
-
-        // test with sizes which just meet the minimum required sizes
-        // the following tests also attempt to use the shared archive
-        new SharedSizeTestData(Region.RO, Platform.is64bit() ? "14M":"9M", Result.VALID_ARCHIVE),
-        new SharedSizeTestData(Region.RW, Platform.is64bit() ? "12M":"7M", Result.VALID_ARCHIVE),
-        new SharedSizeTestData(Region.MD, Platform.is64bit() ? "4M":"2M", Result.VALID_ARCHIVE),
-        new SharedSizeTestData(Region.MC, "120k", Result.VALID_ARCHIVE),
-    };
-
-    public static void main(String[] args) throws Exception {
-        int counter = 0;
-        for (SharedSizeTestData td : testTable) {
-            String fileName = "LimitSharedSizes" + counter + ".jsa";
-            counter++;
-
-            String option = td.optionName + "=" + td.optionValue;
-            System.out.println("testing option number <" + counter + ">");
-            System.out.println("testing option <" + option + ">");
-
-            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-               "-XX:+UnlockDiagnosticVMOptions",
-               "-XX:SharedArchiveFile=./" + fileName,
-               option,
-               "-Xshare:dump");
-
-            OutputAnalyzer output = CDSTestUtils.executeAndLog(pb, "dump" + counter);
-
-            switch (td.getResult()) {
-                case VALID:
-                case VALID_ARCHIVE:
-                {
-                  output.shouldNotContain("space is not large enough");
-                  output.shouldHaveExitValue(0);
-
-                  if (td.getResult() == Result.VALID_ARCHIVE) {
-                      // try to use the archive
-                      pb = ProcessTools.createJavaProcessBuilder(
-                         "-XX:+UnlockDiagnosticVMOptions",
-                         "-XX:SharedArchiveFile=./" + fileName,
-                         "-XX:+PrintSharedArchiveAndExit",
-                         "-version");
-
-                      output = CDSTestUtils.executeAndLog(pb, "use" + counter);
-                      if(CDSTestUtils.isUnableToMap(output)) {
-                          System.out.println("Unable to use shared archive: " +
-                                             "test not executed; assumed passed");
-                          continue;
-                      } else {
-                          output.shouldHaveExitValue(0);
-                      }
-                  }
-                }
-                break;
-                case TOO_SMALL:
-                {
-                    output.shouldContain("space is not large enough");
-                    output.shouldHaveExitValue(2);
-                }
-                break;
-                case OUT_OF_RANGE:
-                {
-                    output.shouldContain("outside the allowed range");
-                    output.shouldHaveExitValue(1);
-                }
-                break;
-            }
-        }
-    }
-}
--- a/hotspot/test/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/test/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Wed Aug 02 18:06:38 2017 -0700
@@ -35,11 +35,9 @@
 
 public class MaxMetaspaceSize {
   public static void main(String[] args) throws Exception {
-      String msg = "is not large enough.\n" +
-          "Either don't specify the -XX:MaxMetaspaceSize=<size>\n" +
-          "or increase the size to at least";
+    String msg = "OutOfMemoryError: Metaspace";
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:MaxMetaspaceSize=10m", "-Xshare:dump");
-    CDSTestUtils.executeAndLog(pb, "dump").shouldContain(msg).shouldHaveExitValue(2);
+        "-XX:MaxMetaspaceSize=1m", "-Xshare:dump");
+    CDSTestUtils.executeAndLog(pb, "dump").shouldContain(msg).shouldHaveExitValue(1);
   }
 }
--- a/hotspot/test/runtime/SharedArchiveFile/SharedStringsWb.java	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedStringsWb.java	Wed Aug 02 18:06:38 2017 -0700
@@ -38,6 +38,11 @@
         String s = "<init>";
         String internedS = s.intern();
 
+        // Check that it's a valid string
+        if (s.getClass() != String.class || !(s instanceof String)) {
+            throw new RuntimeException("Shared string is not a valid String: FAIL");
+        }
+
         if (wb.isShared(internedS)) {
             System.out.println("Found shared string, result: PASS");
         } else {
--- a/hotspot/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java	Sat Jul 22 15:54:27 2017 -0400
+++ b/hotspot/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java	Wed Aug 02 18:06:38 2017 -0700
@@ -28,70 +28,82 @@
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @run main SpaceUtilizationCheck
+ * @build sun.hotspot.WhiteBox
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SpaceUtilizationCheck
  */
 
 import jdk.test.lib.cds.CDSTestUtils;
 import jdk.test.lib.process.OutputAnalyzer;
+import sun.hotspot.WhiteBox;
 
 import java.util.regex.Pattern;
 import java.util.regex.Matcher;
 import java.util.ArrayList;
+import java.util.Hashtable;
 import java.lang.Integer;
 
 public class SpaceUtilizationCheck {
-    // Minimum allowed utilization value (percent)
-    // The goal is to have this number to be 50% for RO and RW regions
-    // Once that feature is implemented, increase the MIN_UTILIZATION to 50
-    private static final int MIN_UTILIZATION = 30;
-
-    // Only RO and RW regions are considered for this check, since they
-    // currently account for the bulk of the shared space
-    private static final int NUMBER_OF_CHECKED_SHARED_REGIONS = 2;
+    // [1] Each region must have strictly less than
+    //     WhiteBox.metaspaceReserveAlignment() bytes of unused space.
+    // [2] There must be no gap between two consecutive regions.
 
     public static void main(String[] args) throws Exception {
-        OutputAnalyzer output = CDSTestUtils.createArchive();
-        CDSTestUtils.checkDump(output);
+        // (1) Default VM arguments
+        test();
 
-        String stdout = output.getStdout();
-        ArrayList<String> utilization = findUtilization(stdout);
+        // (2) Use the now deprecated VM arguments. They should have no effect.
+        test("-XX:SharedReadWriteSize=128M",
+             "-XX:SharedReadOnlySize=128M",
+             "-XX:SharedMiscDataSize=128M",
+             "-XX:SharedMiscCodeSize=128M");
+    }
+
+    static void test(String... extra_options) throws Exception {
+        OutputAnalyzer output = CDSTestUtils.createArchive(extra_options);
+        CDSTestUtils.checkDump(output);
+        Pattern pattern = Pattern.compile("^(..) space: *([0-9]+).* out of *([0-9]+) bytes .* at 0x([0-9a0-f]+)");
+        WhiteBox wb = WhiteBox.getWhiteBox();
+        long reserve_alignment = wb.metaspaceReserveAlignment();
+        System.out.println("Metaspace::reserve_alignment() = " + reserve_alignment);
 
-        if (utilization.size() != NUMBER_OF_CHECKED_SHARED_REGIONS )
-            throw new RuntimeException("The output format of sharing summary has changed");
-
-        for(String str : utilization) {
-            int value = Integer.parseInt(str);
-            if (value < MIN_UTILIZATION) {
-                System.out.println(stdout);
-                throw new RuntimeException("Utilization for one of the regions" +
-                    "is below a threshold of " + MIN_UTILIZATION + "%");
+        long last_region = -1;
+        Hashtable<String,String> checked = new Hashtable<>();
+        for (String line : output.getStdout().split("\n")) {
+            if (line.contains(" space:") && !line.contains("st space:")) {
+                Matcher matcher = pattern.matcher(line);
+                if (matcher.find()) {
+                    String name = matcher.group(1);
+                    if (name.equals("s0") || name.equals("s1")) {
+                      // String regions are listed at the end and they may not be fully occupied.
+                      break;
+                    } else {
+                      System.out.println("Checking " + name + " in : " + line);
+                      checked.put(name, name);
+                    }
+                    long used = Long.parseLong(matcher.group(2));
+                    long capacity = Long.parseLong(matcher.group(3));
+                    long address = Long.parseLong(matcher.group(4), 16);
+                    long unused = capacity - used;
+                    if (unused < 0) {
+                        throw new RuntimeException("Unused space (" + unused + ") less than 0");
+                    }
+                    if (unused > reserve_alignment) {
+                        // [1] Check for unused space
+                        throw new RuntimeException("Unused space (" + unused + ") must be smaller than Metaspace::reserve_alignment() (" +
+                                                   reserve_alignment + ")");
+                    }
+                    if (last_region >= 0 && address != last_region) {
+                        // [2] Check for no-gap
+                        throw new RuntimeException("Region 0x" + address + " should have started at 0x" + Long.toString(last_region, 16));
+                    }
+                    last_region = address + capacity;
+                }
             }
         }
+        if (checked.size() != 5) {
+          throw new RuntimeException("Must have 5 consecutive, fully utilized regions");
+        }
     }
-
-    public static ArrayList<String> findUtilization(String input) {
-        ArrayList<String> regions = filterRegionsOfInterest(input.split("\n"));
-        return filterByPattern(filterByPattern(regions, "bytes \\[.*% used\\]"), "\\d+");
-    }
-
-    private static ArrayList<String> filterByPattern(Iterable<String> input, String pattern) {
-        ArrayList<String> result = new ArrayList<String>();
-        for (String str : input) {
-            Matcher matcher = Pattern.compile(pattern).matcher(str);
-            if (matcher.find()) {
-                result.add(matcher.group());
-            }
-        }
-        return result;
-    }
-
-    private static ArrayList<String> filterRegionsOfInterest(String[] inputLines) {
-        ArrayList<String> result = new ArrayList<String>();
-        for (String str : inputLines) {
-            if (str.contains("ro space:") || str.contains("rw space:")) {
-                result.add(str);
-            }
-        }
-        return result;
-    }
 }