src/hotspot/share/memory/metaspaceShared.cpp
changeset 59070 22ee476cc664
parent 59056 15936b142f86
child 59282 f5f129bfa403
equal deleted inserted replaced
59069:e0d59f0c2b7d 59070:22ee476cc664
    39 #include "gc/shared/softRefPolicy.hpp"
    39 #include "gc/shared/softRefPolicy.hpp"
    40 #include "interpreter/bytecodeStream.hpp"
    40 #include "interpreter/bytecodeStream.hpp"
    41 #include "interpreter/bytecodes.hpp"
    41 #include "interpreter/bytecodes.hpp"
    42 #include "logging/log.hpp"
    42 #include "logging/log.hpp"
    43 #include "logging/logMessage.hpp"
    43 #include "logging/logMessage.hpp"
       
    44 #include "memory/archiveUtils.inline.hpp"
       
    45 #include "memory/dynamicArchive.hpp"
    44 #include "memory/filemap.hpp"
    46 #include "memory/filemap.hpp"
    45 #include "memory/heapShared.inline.hpp"
    47 #include "memory/heapShared.inline.hpp"
    46 #include "memory/metaspace.hpp"
    48 #include "memory/metaspace.hpp"
    47 #include "memory/metaspaceClosure.hpp"
    49 #include "memory/metaspaceClosure.hpp"
    48 #include "memory/metaspaceShared.hpp"
    50 #include "memory/metaspaceShared.hpp"
    49 #include "memory/resourceArea.hpp"
    51 #include "memory/resourceArea.hpp"
    50 #include "memory/universe.hpp"
    52 #include "memory/universe.hpp"
    51 #include "memory/dynamicArchive.hpp"
       
    52 #include "oops/compressedOops.inline.hpp"
    53 #include "oops/compressedOops.inline.hpp"
    53 #include "oops/instanceClassLoaderKlass.hpp"
    54 #include "oops/instanceClassLoaderKlass.hpp"
    54 #include "oops/instanceMirrorKlass.hpp"
    55 #include "oops/instanceMirrorKlass.hpp"
    55 #include "oops/instanceRefKlass.hpp"
    56 #include "oops/instanceRefKlass.hpp"
    56 #include "oops/methodData.hpp"
    57 #include "oops/methodData.hpp"
    65 #include "runtime/signature.hpp"
    66 #include "runtime/signature.hpp"
    66 #include "runtime/timerTrace.hpp"
    67 #include "runtime/timerTrace.hpp"
    67 #include "runtime/vmThread.hpp"
    68 #include "runtime/vmThread.hpp"
    68 #include "runtime/vmOperations.hpp"
    69 #include "runtime/vmOperations.hpp"
    69 #include "utilities/align.hpp"
    70 #include "utilities/align.hpp"
    70 #include "utilities/bitMap.hpp"
    71 #include "utilities/bitMap.inline.hpp"
    71 #include "utilities/defaultStream.hpp"
    72 #include "utilities/defaultStream.hpp"
    72 #include "utilities/hashtable.inline.hpp"
    73 #include "utilities/hashtable.inline.hpp"
    73 #if INCLUDE_G1GC
    74 #if INCLUDE_G1GC
    74 #include "gc/g1/g1CollectedHeap.hpp"
    75 #include "gc/g1/g1CollectedHeap.hpp"
    75 #endif
    76 #endif
    80 bool MetaspaceShared::_has_error_classes;
    81 bool MetaspaceShared::_has_error_classes;
    81 bool MetaspaceShared::_archive_loading_failed = false;
    82 bool MetaspaceShared::_archive_loading_failed = false;
    82 bool MetaspaceShared::_remapped_readwrite = false;
    83 bool MetaspaceShared::_remapped_readwrite = false;
    83 address MetaspaceShared::_i2i_entry_code_buffers = NULL;
    84 address MetaspaceShared::_i2i_entry_code_buffers = NULL;
    84 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0;
    85 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0;
    85 size_t MetaspaceShared::_core_spaces_size = 0;
       
    86 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
    86 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
       
    87 intx MetaspaceShared::_relocation_delta;
    87 
    88 
    88 // The CDS archive is divided into the following regions:
    89 // The CDS archive is divided into the following regions:
    89 //     mc  - misc code (the method entry trampolines)
    90 //     mc  - misc code (the method entry trampolines)
    90 //     rw  - read-write metadata
    91 //     rw  - read-write metadata
    91 //     ro  - read-only metadata and read-only tables
    92 //     ro  - read-only metadata and read-only tables
   145   expand_top_to(newtop);
   146   expand_top_to(newtop);
   146   memset(p, 0, newtop - p);
   147   memset(p, 0, newtop - p);
   147   return p;
   148   return p;
   148 }
   149 }
   149 
   150 
       
   151 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
       
   152   assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
       
   153   intptr_t *p = (intptr_t*)_top;
       
   154   char* newtop = _top + sizeof(intptr_t);
       
   155   expand_top_to(newtop);
       
   156   *p = n;
       
   157   if (need_to_mark) {
       
   158     ArchivePtrMarker::mark_pointer(p);
       
   159   }
       
   160 }
       
   161 
   150 void DumpRegion::print(size_t total_bytes) const {
   162 void DumpRegion::print(size_t total_bytes) const {
   151   tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
   163   tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
   152                 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base));
   164                 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
       
   165                 p2i(_base + MetaspaceShared::final_delta()));
   153 }
   166 }
   154 
   167 
   155 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
   168 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
   156   tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
   169   tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
   157              _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
   170              _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
   170     next->_base = next->_top = this->_end;
   183     next->_base = next->_top = this->_end;
   171     next->_end = MetaspaceShared::shared_rs()->end();
   184     next->_end = MetaspaceShared::shared_rs()->end();
   172   }
   185   }
   173 }
   186 }
   174 
   187 
   175 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
   188 static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
   176 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
   189 static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
   177 
   190 
   178 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
   191 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
   179   // Start with 0 committed bytes. The memory will be committed as needed by
   192   // Start with 0 committed bytes. The memory will be committed as needed by
   180   // MetaspaceShared::commit_shared_space_to().
   193   // MetaspaceShared::commit_shared_space_to().
   181   if (!_shared_vs.initialize(_shared_rs, 0)) {
   194   if (!_shared_vs.initialize(_shared_rs, 0)) {
   182     vm_exit_during_initialization("Unable to allocate memory for shared space");
   195     fatal("Unable to allocate memory for shared space");
   183   }
   196   }
   184   first_space->init(&_shared_rs, (char*)first_space_bottom);
   197   first_space->init(&_shared_rs, (char*)first_space_bottom);
   185 }
   198 }
   186 
   199 
   187 DumpRegion* MetaspaceShared::misc_code_dump_space() {
   200 DumpRegion* MetaspaceShared::misc_code_dump_space() {
   207 
   220 
   208 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
   221 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
   209   return _ro_region.allocate(num_bytes);
   222   return _ro_region.allocate(num_bytes);
   210 }
   223 }
   211 
   224 
   212 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
   225 // When reserving an address range using ReservedSpace, we need an alignment that satisfies both:
   213   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
   226 // os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions,
   214 
   227 //                                    while keeping the first range at offset 0 of this range.
   215   // If using shared space, open the file that contains the shared space
   228 // Metaspace::reserve_alignment()  -- so we can pass the region to
   216   // and map in the memory before initializing the rest of metaspace (so
   229 //                                    Metaspace::allocate_metaspace_compressed_klass_ptrs.
   217   // the addresses don't conflict)
   230 size_t MetaspaceShared::reserved_space_alignment() {
   218   FileMapInfo* mapinfo = new FileMapInfo(true);
   231   size_t os_align = os::vm_allocation_granularity();
   219 
   232   size_t ms_align = Metaspace::reserve_alignment();
   220   // Open the shared archive file, read and validate the header. If
   233   if (os_align >= ms_align) {
   221   // initialization fails, shared spaces [UseSharedSpaces] are
   234     assert(os_align % ms_align == 0, "must be a multiple");
   222   // disabled and the file is closed.
   235     return os_align;
   223   // Map in spaces now also
       
   224   if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) {
       
   225     size_t cds_total = core_spaces_size();
       
   226     address cds_address = (address)mapinfo->region_addr(0);
       
   227     char* cds_end = (char *)align_up(cds_address + cds_total,
       
   228                                      Metaspace::reserve_alignment());
       
   229 
       
   230     // Mapping the dynamic archive before allocating the class space
       
   231     cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end);
       
   232 
       
   233 #ifdef _LP64
       
   234     if (Metaspace::using_class_space()) {
       
   235       // If UseCompressedClassPointers is set then allocate the metaspace area
       
   236       // above the heap and above the CDS area (if it exists).
       
   237       Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
       
   238       // map_heap_regions() compares the current narrow oop and klass encodings
       
   239       // with the archived ones, so it must be done after all encodings are determined.
       
   240       mapinfo->map_heap_regions();
       
   241     }
       
   242     CompressedKlassPointers::set_range(CompressedClassSpaceSize);
       
   243 #endif // _LP64
       
   244   } else {
   236   } else {
   245     assert(!mapinfo->is_open() && !UseSharedSpaces,
   237     assert(ms_align % os_align == 0, "must be a multiple");
   246            "archive file not closed or shared spaces not disabled.");
   238     return ms_align;
   247   }
   239   }
   248 }
   240 }
   249 
   241 
   250 char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces(
   242 ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) {
   251         char* static_start, char* static_end) {
   243   bool large_pages = false; // Don't use large pages for the CDS archive.
   252   assert(UseSharedSpaces, "must be runtime");
   244   assert(is_aligned(requested_address, reserved_space_alignment()), "must be");
   253   char* cds_end = static_end;
   245   return ReservedSpace(size, reserved_space_alignment(), large_pages, requested_address);
   254   if (!DynamicDumpSharedSpaces) {
       
   255     address dynamic_top = DynamicArchive::map();
       
   256     if (dynamic_top != NULL) {
       
   257       assert(dynamic_top > (address)static_start, "Unexpected layout");
       
   258       MetaspaceObj::expand_shared_metaspace_range(dynamic_top);
       
   259       cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment());
       
   260     }
       
   261   }
       
   262   return cds_end;
       
   263 }
       
   264 
       
   265 ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment,
       
   266                                                   bool large, char* requested_address) {
       
   267   if (requested_address != NULL) {
       
   268     _shared_rs = ReservedSpace(size, alignment, large, requested_address);
       
   269   } else {
       
   270     _shared_rs = ReservedSpace(size, alignment, large);
       
   271   }
       
   272   return &_shared_rs;
       
   273 }
   246 }
   274 
   247 
   275 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
   248 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
   276   assert(DumpSharedSpaces, "should be called for dump time only");
   249   assert(DumpSharedSpaces, "should be called for dump time only");
   277   const size_t reserve_alignment = Metaspace::reserve_alignment();
   250   const size_t reserve_alignment = reserved_space_alignment();
   278   bool large_pages = false; // No large pages when dumping the CDS archive.
       
   279   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
   251   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
   280 
   252 
   281 #ifdef _LP64
   253 #ifdef _LP64
   282   // On 64-bit VM, the heap and class space layout will be the same as if
   254   // On 64-bit VM, the heap and class space layout will be the same as if
   283   // you're running in -Xshare:on mode:
   255   // you're running in -Xshare:on mode:
   294 #else
   266 #else
   295   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
   267   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
   296   size_t cds_total = align_down(256*M, reserve_alignment);
   268   size_t cds_total = align_down(256*M, reserve_alignment);
   297 #endif
   269 #endif
   298 
   270 
       
   271   bool use_requested_base = true;
       
   272   if (ArchiveRelocationMode == 1) {
       
   273     log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
       
   274     use_requested_base = false;
       
   275   }
       
   276 
   299   // First try to reserve the space at the specified SharedBaseAddress.
   277   // First try to reserve the space at the specified SharedBaseAddress.
   300   //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
   278   assert(!_shared_rs.is_reserved(), "must be");
   301   reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base);
   279   if (use_requested_base) {
       
   280     _shared_rs = reserve_shared_space(cds_total, shared_base);
       
   281   }
   302   if (_shared_rs.is_reserved()) {
   282   if (_shared_rs.is_reserved()) {
   303     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
   283     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
   304   } else {
   284   } else {
   305     // Get a mmap region anywhere if the SharedBaseAddress fails.
   285     // Get a mmap region anywhere if the SharedBaseAddress fails.
   306     //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
   286     _shared_rs = reserve_shared_space(cds_total);
   307     reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL);
       
   308   }
   287   }
   309   if (!_shared_rs.is_reserved()) {
   288   if (!_shared_rs.is_reserved()) {
   310     vm_exit_during_initialization("Unable to reserve memory for shared space",
   289     vm_exit_during_initialization("Unable to reserve memory for shared space",
   311                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
   290                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
   312   }
   291   }
   440   size_t commit =MAX2(min_bytes, preferred_bytes);
   419   size_t commit =MAX2(min_bytes, preferred_bytes);
   441   commit = MIN2(commit, uncommitted);
   420   commit = MIN2(commit, uncommitted);
   442   assert(commit <= uncommitted, "sanity");
   421   assert(commit <= uncommitted, "sanity");
   443 
   422 
   444   bool result = _shared_vs.expand_by(commit, false);
   423   bool result = _shared_vs.expand_by(commit, false);
       
   424   ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high());
       
   425 
   445   if (!result) {
   426   if (!result) {
   446     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
   427     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
   447                                           need_committed_size));
   428                                           need_committed_size));
   448   }
   429   }
   449 
   430 
   450   log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
   431   log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
   451                 commit, _shared_vs.actual_committed_size(), _shared_vs.high());
   432                 commit, _shared_vs.actual_committed_size(), _shared_vs.high());
       
   433 }
       
   434 
       
   435 void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
       
   436   ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high());
   452 }
   437 }
   453 
   438 
   454 // Read/write a data stream for restoring/preserving metadata pointers and
   439 // Read/write a data stream for restoring/preserving metadata pointers and
   455 // miscellaneous data from/to the shared archive file.
   440 // miscellaneous data from/to the shared archive file.
   456 
   441 
   467   soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
   452   soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
   468   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
   453   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
   469   soc->do_tag(sizeof(Symbol));
   454   soc->do_tag(sizeof(Symbol));
   470 
   455 
   471   // Dump/restore miscellaneous metadata.
   456   // Dump/restore miscellaneous metadata.
       
   457   JavaClasses::serialize_offsets(soc);
   472   Universe::serialize(soc);
   458   Universe::serialize(soc);
   473   soc->do_tag(--tag);
   459   soc->do_tag(--tag);
   474 
   460 
   475   // Dump/restore references to commonly used names and signatures.
   461   // Dump/restore references to commonly used names and signatures.
   476   vmSymbols::serialize(soc);
   462   vmSymbols::serialize(soc);
   480   SymbolTable::serialize_shared_table_header(soc);
   466   SymbolTable::serialize_shared_table_header(soc);
   481   StringTable::serialize_shared_table_header(soc);
   467   StringTable::serialize_shared_table_header(soc);
   482   HeapShared::serialize_subgraph_info_table_header(soc);
   468   HeapShared::serialize_subgraph_info_table_header(soc);
   483   SystemDictionaryShared::serialize_dictionary_headers(soc);
   469   SystemDictionaryShared::serialize_dictionary_headers(soc);
   484 
   470 
   485   JavaClasses::serialize_offsets(soc);
       
   486   InstanceMirrorKlass::serialize_offsets(soc);
   471   InstanceMirrorKlass::serialize_offsets(soc);
   487   soc->do_tag(--tag);
   472   soc->do_tag(--tag);
   488 
   473 
   489   serialize_cloned_cpp_vtptrs(soc);
   474   serialize_cloned_cpp_vtptrs(soc);
   490   soc->do_tag(--tag);
   475   soc->do_tag(--tag);
   703   }
   688   }
   704 
   689 
   705   // Switch the vtable pointer to point to the cloned vtable.
   690   // Switch the vtable pointer to point to the cloned vtable.
   706   static void patch(Metadata* obj) {
   691   static void patch(Metadata* obj) {
   707     assert(DumpSharedSpaces, "dump-time only");
   692     assert(DumpSharedSpaces, "dump-time only");
       
   693     assert(MetaspaceShared::is_in_output_space(obj), "must be");
   708     *(void**)obj = (void*)(_info->cloned_vtable());
   694     *(void**)obj = (void*)(_info->cloned_vtable());
       
   695     ArchivePtrMarker::mark_pointer(obj);
   709   }
   696   }
   710 
   697 
   711   static bool is_valid_shared_object(const T* obj) {
   698   static bool is_valid_shared_object(const T* obj) {
   712     intptr_t* vptr = *(intptr_t**)obj;
   699     intptr_t* vptr = *(intptr_t**)obj;
   713     return vptr == _info->cloned_vtable();
   700     return vptr == _info->cloned_vtable();
   797 
   784 
   798   return vtable_len;
   785   return vtable_len;
   799 }
   786 }
   800 
   787 
   801 #define ALLOC_CPP_VTABLE_CLONE(c) \
   788 #define ALLOC_CPP_VTABLE_CLONE(c) \
   802   _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c);
   789   _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \
       
   790   ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]);
   803 
   791 
   804 #define CLONE_CPP_VTABLE(c) \
   792 #define CLONE_CPP_VTABLE(c) \
   805   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
   793   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
   806 
   794 
   807 #define ZERO_CPP_VTABLE(c) \
   795 #define ZERO_CPP_VTABLE(c) \
   963 void WriteClosure::do_region(u_char* start, size_t size) {
   951 void WriteClosure::do_region(u_char* start, size_t size) {
   964   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
   952   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
   965   assert(size % sizeof(intptr_t) == 0, "bad size");
   953   assert(size % sizeof(intptr_t) == 0, "bad size");
   966   do_tag((int)size);
   954   do_tag((int)size);
   967   while (size > 0) {
   955   while (size > 0) {
   968     _dump_region->append_intptr_t(*(intptr_t*)start);
   956     _dump_region->append_intptr_t(*(intptr_t*)start, true);
   969     start += sizeof(intptr_t);
   957     start += sizeof(intptr_t);
   970     size -= sizeof(intptr_t);
   958     size -= sizeof(intptr_t);
   971   }
   959   }
   972 }
   960 }
   973 
   961 
  1127   void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
  1115   void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
  1128   void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
  1116   void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
  1129                                  GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
  1117                                  GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
  1130   void dump_symbols();
  1118   void dump_symbols();
  1131   char* dump_read_only_tables();
  1119   char* dump_read_only_tables();
       
  1120   void print_class_stats();
  1132   void print_region_stats();
  1121   void print_region_stats();
       
  1122   void print_bitmap_region_stats(size_t size, size_t total_size);
  1133   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
  1123   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
  1134                                const char *name, const size_t total_size);
  1124                                const char *name, size_t total_size);
       
  1125   void relocate_to_default_base_address(CHeapBitMap* ptrmap);
       
  1126 
  1135 public:
  1127 public:
  1136 
  1128 
  1137   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
  1129   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
  1138   void doit();   // outline because gdb sucks
  1130   void doit();   // outline because gdb sucks
  1139   static void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
  1131   static void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {
  1274       address new_loc = get_new_loc(ref);
  1266       address new_loc = get_new_loc(ref);
  1275       RefRelocator refer;
  1267       RefRelocator refer;
  1276       ref->metaspace_pointers_do_at(&refer, new_loc);
  1268       ref->metaspace_pointers_do_at(&refer, new_loc);
  1277       return true; // recurse into ref.obj()
  1269       return true; // recurse into ref.obj()
  1278     }
  1270     }
       
  1271     virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
       
  1272       assert(type == _method_entry_ref, "only special type allowed for now");
       
  1273       address obj = ref->obj();
       
  1274       address new_obj = get_new_loc(ref);
       
  1275       size_t offset = pointer_delta(p, obj,  sizeof(u1));
       
  1276       intptr_t* new_p = (intptr_t*)(new_obj + offset);
       
  1277       assert(*p == *new_p, "must be a copy");
       
  1278       ArchivePtrMarker::mark_pointer((address*)new_p);
       
  1279     }
  1279   };
  1280   };
  1280 
  1281 
  1281   // Relocate a reference to point to its shallow copy
  1282   // Relocate a reference to point to its shallow copy
  1282   class RefRelocator: public MetaspaceClosure {
  1283   class RefRelocator: public MetaspaceClosure {
  1283   public:
  1284   public:
  1284     virtual bool do_ref(Ref* ref, bool read_only) {
  1285     virtual bool do_ref(Ref* ref, bool read_only) {
  1285       if (ref->not_null()) {
  1286       if (ref->not_null()) {
  1286         ref->update(get_new_loc(ref));
  1287         ref->update(get_new_loc(ref));
       
  1288         ArchivePtrMarker::mark_pointer(ref->addr());
  1287       }
  1289       }
  1288       return false; // Do not recurse.
  1290       return false; // Do not recurse.
  1289     }
  1291     }
  1290   };
  1292   };
  1291 
  1293 
  1438   dump_archive_heap_oopmaps();
  1440   dump_archive_heap_oopmaps();
  1439 
  1441 
  1440   return start;
  1442   return start;
  1441 }
  1443 }
  1442 
  1444 
  1443 void VM_PopulateDumpSharedSpace::doit() {
  1445 void VM_PopulateDumpSharedSpace::print_class_stats() {
  1444   // We should no longer allocate anything from the metaspace, so that:
       
  1445   //
       
  1446   // (1) Metaspace::allocate might trigger GC if we have run out of
       
  1447   //     committed metaspace, but we can't GC because we're running
       
  1448   //     in the VM thread.
       
  1449   // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs.
       
  1450   Metaspace::freeze();
       
  1451   DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
       
  1452 
       
  1453   Thread* THREAD = VMThread::vm_thread();
       
  1454 
       
  1455   FileMapInfo::check_nonempty_dir_in_shared_path_table();
       
  1456 
       
  1457   NOT_PRODUCT(SystemDictionary::verify();)
       
  1458   // The following guarantee is meant to ensure that no loader constraints
       
  1459   // exist yet, since the constraints table is not shared.  This becomes
       
  1460   // more important now that we don't re-initialize vtables/itables for
       
  1461   // shared classes at runtime, where constraints were previously created.
       
  1462   guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
       
  1463             "loader constraints are not saved");
       
  1464   guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
       
  1465           "placeholders are not saved");
       
  1466 
       
  1467   // At this point, many classes have been loaded.
       
  1468   // Gather systemDictionary classes in a global array and do everything to
       
  1469   // that so we don't have to walk the SystemDictionary again.
       
  1470   SystemDictionaryShared::check_excluded_classes();
       
  1471   _global_klass_objects = new GrowableArray<Klass*>(1000);
       
  1472   CollectClassesClosure collect_classes;
       
  1473   ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
       
  1474 
       
  1475   tty->print_cr("Number of classes %d", _global_klass_objects->length());
  1446   tty->print_cr("Number of classes %d", _global_klass_objects->length());
  1476   {
  1447   {
  1477     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
  1448     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
  1478     for (int i = 0; i < _global_klass_objects->length(); i++) {
  1449     for (int i = 0; i < _global_klass_objects->length(); i++) {
  1479       Klass* k = _global_klass_objects->at(i);
  1450       Klass* k = _global_klass_objects->at(i);
  1488     }
  1459     }
  1489     tty->print_cr("    instance classes   = %5d", num_inst);
  1460     tty->print_cr("    instance classes   = %5d", num_inst);
  1490     tty->print_cr("    obj array classes  = %5d", num_obj_array);
  1461     tty->print_cr("    obj array classes  = %5d", num_obj_array);
  1491     tty->print_cr("    type array classes = %5d", num_type_array);
  1462     tty->print_cr("    type array classes = %5d", num_type_array);
  1492   }
  1463   }
       
  1464 }
       
  1465 
       
  1466 void VM_PopulateDumpSharedSpace::relocate_to_default_base_address(CHeapBitMap* ptrmap) {
       
  1467   intx addr_delta = MetaspaceShared::final_delta();
       
  1468   if (addr_delta == 0) {
       
  1469     ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_md_region.top());
       
  1470   } else {
       
  1471     // We are not able to reserve space at Arguments::default_SharedBaseAddress() (due to ASLR).
       
  1472     // This means that the current content of the archive is based on a random
       
  1473     // address. Let's relocate all the pointers, so that it can be mapped to
       
  1474     // Arguments::default_SharedBaseAddress() without runtime relocation.
       
  1475     //
       
  1476     // Note: both the base and dynamic archive are written with
       
  1477     // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress()
       
  1478 
       
  1479     // Patch all pointers that are marked by ptrmap within this region,
       
  1480     // where we have just dumped all the metaspace data.
       
  1481     address patch_base = (address)SharedBaseAddress;
       
  1482     address patch_end  = (address)_md_region.top();
       
  1483     size_t size = patch_end - patch_base;
       
  1484 
       
  1485     // the current value of the pointers to be patched must be within this
       
  1486     // range (i.e., must point to valid metaspace objects)
       
  1487     address valid_old_base = patch_base;
       
  1488     address valid_old_end  = patch_end;
       
  1489 
       
  1490     // after patching, the pointers must point inside this range
       
  1491     // (the requested location of the archive, as mapped at runtime).
       
  1492     address valid_new_base = (address)Arguments::default_SharedBaseAddress();
       
  1493     address valid_new_end  = valid_new_base + size;
       
  1494 
       
  1495     log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
       
  1496                    "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end),
       
  1497                    p2i(valid_new_base), p2i(valid_new_end));
       
  1498 
       
  1499     SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
       
  1500                                       valid_new_base, valid_new_end, addr_delta, ptrmap);
       
  1501     ptrmap->iterate(&patcher);
       
  1502     ArchivePtrMarker::compact(patcher.max_non_null_offset());
       
  1503   }
       
  1504 }
       
  1505 
       
  1506 void VM_PopulateDumpSharedSpace::doit() {
       
  1507   CHeapBitMap ptrmap;
       
  1508   MetaspaceShared::initialize_ptr_marker(&ptrmap);
       
  1509 
       
  1510   // We should no longer allocate anything from the metaspace, so that:
       
  1511   //
       
  1512   // (1) Metaspace::allocate might trigger GC if we have run out of
       
  1513   //     committed metaspace, but we can't GC because we're running
       
  1514   //     in the VM thread.
       
  1515   // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs.
       
  1516   Metaspace::freeze();
       
  1517   DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
       
  1518 
       
  1519   Thread* THREAD = VMThread::vm_thread();
       
  1520 
       
  1521   FileMapInfo::check_nonempty_dir_in_shared_path_table();
       
  1522 
       
  1523   NOT_PRODUCT(SystemDictionary::verify();)
       
  1524   // The following guarantee is meant to ensure that no loader constraints
       
  1525   // exist yet, since the constraints table is not shared.  This becomes
       
  1526   // more important now that we don't re-initialize vtables/itables for
       
  1527   // shared classes at runtime, where constraints were previously created.
       
  1528   guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
       
  1529             "loader constraints are not saved");
       
  1530   guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
       
  1531           "placeholders are not saved");
       
  1532 
       
  1533   // At this point, many classes have been loaded.
       
  1534   // Gather systemDictionary classes in a global array and do everything to
       
  1535   // that so we don't have to walk the SystemDictionary again.
       
  1536   SystemDictionaryShared::check_excluded_classes();
       
  1537   _global_klass_objects = new GrowableArray<Klass*>(1000);
       
  1538   CollectClassesClosure collect_classes;
       
  1539   ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
       
  1540 
       
  1541   print_class_stats();
  1493 
  1542 
  1494   // Ensure the ConstMethods won't be modified at run-time
  1543   // Ensure the ConstMethods won't be modified at run-time
  1495   tty->print("Updating ConstMethods ... ");
  1544   tty->print("Updating ConstMethods ... ");
  1496   rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD);
  1545   rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD);
  1497   tty->print_cr("done. ");
  1546   tty->print_cr("done. ");
  1518 
  1567 
  1519   char* vtbl_list = _md_region.top();
  1568   char* vtbl_list = _md_region.top();
  1520   MetaspaceShared::allocate_cpp_vtable_clones();
  1569   MetaspaceShared::allocate_cpp_vtable_clones();
  1521   _md_region.pack();
  1570   _md_region.pack();
  1522 
  1571 
  1523   // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size
       
  1524   // is just the spaces between the two ends.
       
  1525   size_t core_spaces_size = _md_region.end() - _mc_region.base();
       
  1526   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
       
  1527          "should already be aligned");
       
  1528 
       
  1529   // During patching, some virtual methods may be called, so at this point
  1572   // During patching, some virtual methods may be called, so at this point
  1530   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
  1573   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
  1531   MetaspaceShared::patch_cpp_vtable_pointers();
  1574   MetaspaceShared::patch_cpp_vtable_pointers();
  1532 
  1575 
  1533   // The vtable clones contain addresses of the current process.
  1576   // The vtable clones contain addresses of the current process.
  1534   // We don't want to write these addresses into the archive.
  1577   // We don't want to write these addresses into the archive.
  1535   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
  1578   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
       
  1579 
       
  1580   // relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress()
       
  1581   // without runtime relocation.
       
  1582   relocate_to_default_base_address(&ptrmap);
  1536 
  1583 
  1537   // Create and write the archive file that maps the shared spaces.
  1584   // Create and write the archive file that maps the shared spaces.
  1538 
  1585 
  1539   FileMapInfo* mapinfo = new FileMapInfo(true);
  1586   FileMapInfo* mapinfo = new FileMapInfo(true);
  1540   mapinfo->populate_header(os::vm_allocation_granularity());
  1587   mapinfo->populate_header(os::vm_allocation_granularity());
  1541   mapinfo->set_serialized_data_start(serialized_data_start);
  1588   mapinfo->set_serialized_data_start(serialized_data_start);
  1542   mapinfo->set_misc_data_patching_start(vtbl_list);
  1589   mapinfo->set_misc_data_patching_start(vtbl_list);
  1543   mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(),
  1590   mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(),
  1544                                       MetaspaceShared::i2i_entry_code_buffers_size());
  1591                                       MetaspaceShared::i2i_entry_code_buffers_size());
  1545   mapinfo->set_core_spaces_size(core_spaces_size);
       
  1546   mapinfo->open_for_write();
  1592   mapinfo->open_for_write();
  1547 
  1593 
  1548   // NOTE: md contains the trampoline code for method entries, which are patched at run time,
  1594   // NOTE: md contains the trampoline code for method entries, which are patched at run time,
  1549   // so it needs to be read/write.
  1595   // so it needs to be read/write.
  1550   write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
  1596   write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
  1551   write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
  1597   write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
  1552   write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
  1598   write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
  1553   write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
  1599   write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
       
  1600 
       
  1601   mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap());
  1554 
  1602 
  1555   _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
  1603   _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
  1556                                         _closed_archive_heap_regions,
  1604                                         _closed_archive_heap_regions,
  1557                                         _closed_archive_heap_oopmaps,
  1605                                         _closed_archive_heap_oopmaps,
  1558                                         MetaspaceShared::first_closed_archive_heap_region,
  1606                                         MetaspaceShared::first_closed_archive_heap_region,
  1561                                         _open_archive_heap_regions,
  1609                                         _open_archive_heap_regions,
  1562                                         _open_archive_heap_oopmaps,
  1610                                         _open_archive_heap_oopmaps,
  1563                                         MetaspaceShared::first_open_archive_heap_region,
  1611                                         MetaspaceShared::first_open_archive_heap_region,
  1564                                         MetaspaceShared::max_open_archive_heap_region);
  1612                                         MetaspaceShared::max_open_archive_heap_region);
  1565 
  1613 
       
  1614   mapinfo->set_final_requested_base((char*)Arguments::default_SharedBaseAddress());
  1566   mapinfo->set_header_crc(mapinfo->compute_header_crc());
  1615   mapinfo->set_header_crc(mapinfo->compute_header_crc());
  1567   mapinfo->write_header();
  1616   mapinfo->write_header();
  1568   mapinfo->close();
  1617   mapinfo->close();
  1569 
  1618 
  1570   // Restore the vtable in case we invoke any virtual methods.
  1619   // Restore the vtable in case we invoke any virtual methods.
  1592   vm_direct_exit(0);
  1641   vm_direct_exit(0);
  1593 }
  1642 }
  1594 
  1643 
  1595 void VM_PopulateDumpSharedSpace::print_region_stats() {
  1644 void VM_PopulateDumpSharedSpace::print_region_stats() {
  1596   // Print statistics of all the regions
  1645   // Print statistics of all the regions
       
  1646   const size_t bitmap_used = ArchivePtrMarker::ptrmap()->size_in_bytes();
       
  1647   const size_t bitmap_reserved = align_up(bitmap_used, Metaspace::reserve_alignment());
  1597   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
  1648   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
  1598                                 _mc_region.reserved()  + _md_region.reserved() +
  1649                                 _mc_region.reserved()  + _md_region.reserved() +
       
  1650                                 bitmap_reserved +
  1599                                 _total_closed_archive_region_size +
  1651                                 _total_closed_archive_region_size +
  1600                                 _total_open_archive_region_size;
  1652                                 _total_open_archive_region_size;
  1601   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
  1653   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
  1602                              _mc_region.used()  + _md_region.used() +
  1654                              _mc_region.used()  + _md_region.used() +
       
  1655                              bitmap_used +
  1603                              _total_closed_archive_region_size +
  1656                              _total_closed_archive_region_size +
  1604                              _total_open_archive_region_size;
  1657                              _total_open_archive_region_size;
  1605   const double total_u_perc = percent_of(total_bytes, total_reserved);
  1658   const double total_u_perc = percent_of(total_bytes, total_reserved);
  1606 
  1659 
  1607   _mc_region.print(total_reserved);
  1660   _mc_region.print(total_reserved);
  1608   _rw_region.print(total_reserved);
  1661   _rw_region.print(total_reserved);
  1609   _ro_region.print(total_reserved);
  1662   _ro_region.print(total_reserved);
  1610   _md_region.print(total_reserved);
  1663   _md_region.print(total_reserved);
       
  1664   print_bitmap_region_stats(bitmap_reserved, total_reserved);
  1611   print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
  1665   print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
  1612   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
  1666   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
  1613 
  1667 
  1614   tty->print_cr("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
  1668   tty->print_cr("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
  1615                  total_bytes, total_reserved, total_u_perc);
  1669                  total_bytes, total_reserved, total_u_perc);
  1616 }
  1670 }
  1617 
  1671 
       
  1672 void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
       
  1673   tty->print_cr("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
       
  1674                 size, size/double(total_size)*100.0, size, p2i(NULL));
       
  1675 }
       
  1676 
  1618 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
  1677 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
  1619                                                          const char *name, const size_t total_size) {
  1678                                                          const char *name, size_t total_size) {
  1620   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
  1679   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
  1621   for (int i = 0; i < arr_len; i++) {
  1680   for (int i = 0; i < arr_len; i++) {
  1622       char* start = (char*)heap_mem->at(i).start();
  1681       char* start = (char*)heap_mem->at(i).start();
  1623       size_t size = heap_mem->at(i).byte_size();
  1682       size_t size = heap_mem->at(i).byte_size();
  1624       char* top = start + size;
  1683       char* top = start + size;
  1634   assert(DumpSharedSpaces, "sanity");
  1693   assert(DumpSharedSpaces, "sanity");
  1635   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
  1694   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
  1636   o->set_klass(k);
  1695   o->set_klass(k);
  1637 }
  1696 }
  1638 
  1697 
  1639 Klass* MetaspaceShared::get_relocated_klass(Klass *k) {
  1698 Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
  1640   assert(DumpSharedSpaces, "sanity");
  1699   assert(DumpSharedSpaces, "sanity");
  1641   return ArchiveCompactor::get_relocated_klass(k);
  1700   k = ArchiveCompactor::get_relocated_klass(k);
       
  1701   if (is_final) {
       
  1702     k = (Klass*)(address(k) + final_delta());
       
  1703   }
       
  1704   return k;
  1642 }
  1705 }
  1643 
  1706 
  1644 class LinkSharedClassesClosure : public KlassClosure {
  1707 class LinkSharedClassesClosure : public KlassClosure {
  1645   Thread* THREAD;
  1708   Thread* THREAD;
  1646   bool    _made_progress;
  1709   bool    _made_progress;
  1945     start += sizeof(intptr_t);
  2008     start += sizeof(intptr_t);
  1946     size -= sizeof(intptr_t);
  2009     size -= sizeof(intptr_t);
  1947   }
  2010   }
  1948 }
  2011 }
  1949 
  2012 
  1950 void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) {
  2013 void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
  1951   _shared_metaspace_static_top = top;
  2014   assert(base <= static_top && static_top <= top, "must be");
       
  2015   _shared_metaspace_static_top = static_top;
  1952   MetaspaceObj::set_shared_metaspace_range(base, top);
  2016   MetaspaceObj::set_shared_metaspace_range(base, top);
  1953 }
  2017 }
  1954 
  2018 
  1955 // Return true if given address is in the misc data region
  2019 // Return true if given address is in the misc data region
  1956 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
  2020 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
  1971   } else {
  2035   } else {
  1972     return false;
  2036     return false;
  1973   }
  2037   }
  1974 }
  2038 }
  1975 
  2039 
  1976 // Map shared spaces at requested addresses and return if succeeded.
  2040 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
  1977 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
  2041   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
  1978   size_t image_alignment = mapinfo->alignment();
  2042   MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
  1979 
  2043   FileMapInfo* static_mapinfo = open_static_archive();
  1980 #ifndef _WINDOWS
  2044   FileMapInfo* dynamic_mapinfo = NULL;
  1981   // Map in the shared memory and then map the regions on top of it.
  2045 
  1982   // On Windows, don't map the memory here because it will cause the
  2046   if (static_mapinfo != NULL) {
  1983   // mappings of the regions to fail.
  2047     dynamic_mapinfo = open_dynamic_archive();
  1984   ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
  2048 
  1985   if (!shared_rs.is_reserved()) return false;
  2049     // First try to map at the requested address
  1986 #endif
  2050     result = map_archives(static_mapinfo, dynamic_mapinfo, true);
  1987 
  2051     if (result == MAP_ARCHIVE_MMAP_FAILURE) {
  1988   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
  2052       // Mapping has failed (probably due to ASLR). Let's map at an address chosen
  1989 
  2053       // by the OS.
  1990   // Map each shared region
  2054       result = map_archives(static_mapinfo, dynamic_mapinfo, false);
  1991   int regions[] = {mc, rw, ro, md};
  2055     }
  1992   size_t len = sizeof(regions)/sizeof(int);
  2056   }
  1993   char* saved_base[] = {NULL, NULL, NULL, NULL};
  2057 
  1994   char* top = mapinfo->map_regions(regions, saved_base, len );
  2058   if (result == MAP_ARCHIVE_SUCCESS) {
  1995 
  2059     bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped());
  1996   if (top != NULL &&
  2060     char* cds_base = static_mapinfo->mapped_base();
  1997       (image_alignment == (size_t)os::vm_allocation_granularity()) &&
  2061     char* cds_end =  dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
  1998       mapinfo->validate_shared_path_table()) {
  2062     set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
  1999     // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
  2063     _relocation_delta = static_mapinfo->relocation_delta();
  2000     // fast checking in MetaspaceShared::is_in_shared_metaspace() and
  2064     if (dynamic_mapped) {
  2001     // MetaspaceObj::is_shared().
  2065       FileMapInfo::set_shared_path_table(dynamic_mapinfo);
  2002     _core_spaces_size = mapinfo->core_spaces_size();
  2066     } else {
  2003     set_shared_metaspace_range((void*)saved_base[0], (void*)top);
  2067       FileMapInfo::set_shared_path_table(static_mapinfo);
  2004     return true;
  2068     }
  2005   } else {
  2069   } else {
  2006     mapinfo->unmap_regions(regions, saved_base, len);
  2070     set_shared_metaspace_range(NULL, NULL, NULL);
  2007 #ifndef _WINDOWS
  2071     UseSharedSpaces = false;
  2008     // Release the entire mapped region
  2072     FileMapInfo::fail_continue("Unable to map shared spaces");
  2009     shared_rs.release();
  2073     if (PrintSharedArchiveAndExit) {
  2010 #endif
  2074       vm_exit_during_initialization("Unable to use shared archive.");
  2011     // If -Xshare:on is specified, print out the error message and exit VM,
  2075     }
  2012     // otherwise, set UseSharedSpaces to false and continue.
  2076   }
  2013     if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
  2077 
  2014       vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
  2078   if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
       
  2079     delete static_mapinfo;
       
  2080   }
       
  2081   if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
       
  2082     delete dynamic_mapinfo;
       
  2083   }
       
  2084 }
       
  2085 
       
  2086 FileMapInfo* MetaspaceShared::open_static_archive() {
       
  2087   FileMapInfo* mapinfo = new FileMapInfo(true);
       
  2088   if (!mapinfo->initialize()) {
       
  2089     delete(mapinfo);
       
  2090     return NULL;
       
  2091   }
       
  2092   return mapinfo;
       
  2093 }
       
  2094 
       
  2095 FileMapInfo* MetaspaceShared::open_dynamic_archive() {
       
  2096   if (DynamicDumpSharedSpaces) {
       
  2097     return NULL;
       
  2098   }
       
  2099   if (Arguments::GetSharedDynamicArchivePath() == NULL) {
       
  2100     return NULL;
       
  2101   }
       
  2102 
       
  2103   FileMapInfo* mapinfo = new FileMapInfo(false);
       
  2104   if (!mapinfo->initialize()) {
       
  2105     delete(mapinfo);
       
  2106     return NULL;
       
  2107   }
       
  2108   return mapinfo;
       
  2109 }
       
  2110 
       
  2111 // use_requested_addr:
       
  2112 //  true  = map at FileMapHeader::_requested_base_address
       
  2113 //  false = map at an alternative address picked by OS.
       
  2114 MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
       
  2115                                                bool use_requested_addr) {
       
  2116   PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
       
  2117       // For product build only -- this is for benchmarking the cost of doing relocation.
       
  2118       // For debug builds, the check is done in FileMapInfo::map_regions for better test coverage.
       
  2119       log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
       
  2120       return MAP_ARCHIVE_MMAP_FAILURE;
       
  2121     });
       
  2122 
       
  2123   if (ArchiveRelocationMode == 2 && !use_requested_addr) {
       
  2124     log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address");
       
  2125     return MAP_ARCHIVE_MMAP_FAILURE;
       
  2126   };
       
  2127 
       
  2128   if (dynamic_mapinfo != NULL) {
       
  2129     // Ensure that the OS won't be able to allocate new memory spaces between the two
       
  2130     // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared().
       
  2131     assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
       
  2132   }
       
  2133 
       
  2134   ReservedSpace main_rs, archive_space_rs, class_space_rs;
       
  2135   MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
       
  2136   char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo,
       
  2137                                                                  use_requested_addr, main_rs, archive_space_rs,
       
  2138                                                                  class_space_rs);
       
  2139   if (mapped_base_address == NULL) {
       
  2140     result = MAP_ARCHIVE_MMAP_FAILURE;
       
  2141   } else {
       
  2142     log_debug(cds)("Reserved archive_space_rs     [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
       
  2143                    p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
       
  2144     log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
       
  2145                    p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
       
  2146     MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
       
  2147     MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
       
  2148                                      map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
       
  2149 
       
  2150     if (static_result == MAP_ARCHIVE_SUCCESS) {
       
  2151       if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
       
  2152         result = MAP_ARCHIVE_SUCCESS;
       
  2153       } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) {
       
  2154         assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed");
       
  2155         // No need to retry mapping the dynamic archive again, as it will never succeed
       
  2156         // (bad file, etc) -- just keep the base archive.
       
  2157         log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s",
       
  2158                                   dynamic_mapinfo->full_path());
       
  2159         result = MAP_ARCHIVE_SUCCESS;
       
  2160         // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no
       
  2161         // easy API to do that right now.
       
  2162       } else {
       
  2163         result = MAP_ARCHIVE_MMAP_FAILURE;
       
  2164       }
       
  2165     } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) {
       
  2166       result = MAP_ARCHIVE_OTHER_FAILURE;
  2015     } else {
  2167     } else {
  2016       FLAG_SET_DEFAULT(UseSharedSpaces, false);
  2168       result = MAP_ARCHIVE_MMAP_FAILURE;
  2017     }
  2169     }
  2018     return false;
  2170   }
       
  2171 
       
  2172   if (result == MAP_ARCHIVE_SUCCESS) {
       
  2173     if (!main_rs.is_reserved() && class_space_rs.is_reserved()) {
       
  2174       MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass);
       
  2175     }
       
  2176     SharedBaseAddress = (size_t)mapped_base_address;
       
  2177     LP64_ONLY({
       
  2178         if (Metaspace::using_class_space()) {
       
  2179           assert(class_space_rs.is_reserved(), "must be");
       
  2180           char* cds_base = static_mapinfo->mapped_base();
       
  2181           Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base);
       
  2182           // map_heap_regions() compares the current narrow oop and klass encodings
       
  2183           // with the archived ones, so it must be done after all encodings are determined.
       
  2184           static_mapinfo->map_heap_regions();
       
  2185         }
       
  2186         CompressedKlassPointers::set_range(CompressedClassSpaceSize);
       
  2187       });
       
  2188   } else {
       
  2189     unmap_archive(static_mapinfo);
       
  2190     unmap_archive(dynamic_mapinfo);
       
  2191     release_reserved_spaces(main_rs, archive_space_rs, class_space_rs);
       
  2192   }
       
  2193 
       
  2194   return result;
       
  2195 }
       
  2196 
       
  2197 char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
       
  2198                                                           FileMapInfo* dynamic_mapinfo,
       
  2199                                                           bool use_requested_addr,
       
  2200                                                           ReservedSpace& main_rs,
       
  2201                                                           ReservedSpace& archive_space_rs,
       
  2202                                                           ReservedSpace& class_space_rs) {
       
  2203   const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space());
       
  2204   const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size());
       
  2205 
       
  2206   if (use_klass_space) {
       
  2207     assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated");
       
  2208   }
       
  2209   if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) {
       
  2210     return NULL;
       
  2211   }
       
  2212 
       
  2213   // Size and requested location of the archive_space_rs (for both static and dynamic archives)
       
  2214   size_t base_offset = static_mapinfo->mapping_base_offset();
       
  2215   size_t end_offset  = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
       
  2216   assert(base_offset == 0, "must be");
       
  2217   assert(is_aligned(end_offset,  os::vm_allocation_granularity()), "must be");
       
  2218   assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be");
       
  2219 
       
  2220   // In case reserved_space_alignment() != os::vm_allocation_granularity()
       
  2221   assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be");
       
  2222   end_offset = align_up(end_offset, reserved_space_alignment());
       
  2223 
       
  2224   size_t archive_space_size = end_offset - base_offset;
       
  2225 
       
  2226   // Special handling for Windows because it cannot mmap into a reserved space:
       
  2227   //    use_requested_addr: We just map each region individually, and give up if any one of them fails.
       
  2228   //   !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap).
       
  2229   //                        We're going to patch all the pointers anyway so there's no benefit for mmap.
       
  2230 
       
  2231   if (use_requested_addr) {
       
  2232     char* archive_space_base = static_mapinfo->requested_base_address() + base_offset;
       
  2233     char* archive_space_end  = archive_space_base + archive_space_size;
       
  2234     if (!MetaspaceShared::use_windows_memory_mapping()) {
       
  2235       archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base);
       
  2236       if (!archive_space_rs.is_reserved()) {
       
  2237         return NULL;
       
  2238       }
       
  2239     }
       
  2240     if (use_klass_space) {
       
  2241       // Make sure we can map the klass space immediately following the archive_space space
       
  2242       char* class_space_base = archive_space_end;
       
  2243       class_space_rs = reserve_shared_space(class_space_size, class_space_base);
       
  2244       if (!class_space_rs.is_reserved()) {
       
  2245         return NULL;
       
  2246       }
       
  2247     }
       
  2248     return static_mapinfo->requested_base_address();
       
  2249   } else {
       
  2250     if (use_klass_space) {
       
  2251       main_rs = reserve_shared_space(archive_space_size + class_space_size);
       
  2252       if (main_rs.is_reserved()) {
       
  2253         archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true);
       
  2254         class_space_rs = main_rs.last_part(archive_space_size);
       
  2255       }
       
  2256     } else {
       
  2257       main_rs = reserve_shared_space(archive_space_size);
       
  2258       archive_space_rs = main_rs;
       
  2259     }
       
  2260     if (archive_space_rs.is_reserved()) {
       
  2261       return archive_space_rs.base();
       
  2262     } else {
       
  2263       return NULL;
       
  2264     }
       
  2265   }
       
  2266 }
       
  2267 
       
  2268 void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs,
       
  2269                                               ReservedSpace& archive_space_rs,
       
  2270                                               ReservedSpace& class_space_rs) {
       
  2271   if (main_rs.is_reserved()) {
       
  2272     assert(main_rs.contains(archive_space_rs.base()), "must be");
       
  2273     assert(main_rs.contains(class_space_rs.base()), "must be");
       
  2274     log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base()));
       
  2275     main_rs.release();
       
  2276   } else {
       
  2277     if (archive_space_rs.is_reserved()) {
       
  2278       log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
       
  2279       archive_space_rs.release();
       
  2280     }
       
  2281     if (class_space_rs.is_reserved()) {
       
  2282       log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
       
  2283       class_space_rs.release();
       
  2284     }
       
  2285   }
       
  2286 }
       
  2287 
       
  2288 static int static_regions[]  = {MetaspaceShared::mc,
       
  2289                                 MetaspaceShared::rw,
       
  2290                                 MetaspaceShared::ro,
       
  2291                                 MetaspaceShared::md};
       
  2292 static int dynamic_regions[] = {MetaspaceShared::rw,
       
  2293                                 MetaspaceShared::ro,
       
  2294                                 MetaspaceShared::mc};
       
  2295 static int static_regions_count  = 4;
       
  2296 static int dynamic_regions_count = 3;
       
  2297 
       
  2298 MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) {
       
  2299   assert(UseSharedSpaces, "must be runtime");
       
  2300   if (mapinfo == NULL) {
       
  2301     return MAP_ARCHIVE_SUCCESS; // no error has happeed -- trivially succeeded.
       
  2302   }
       
  2303 
       
  2304   mapinfo->set_is_mapped(false);
       
  2305 
       
  2306   if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) {
       
  2307     log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT
       
  2308                    " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity());
       
  2309     return MAP_ARCHIVE_OTHER_FAILURE;
       
  2310   }
       
  2311 
       
  2312   MapArchiveResult result = mapinfo->is_static() ?
       
  2313     mapinfo->map_regions(static_regions, static_regions_count, mapped_base_address, rs) :
       
  2314     mapinfo->map_regions(dynamic_regions, dynamic_regions_count, mapped_base_address, rs);
       
  2315 
       
  2316   if (result != MAP_ARCHIVE_SUCCESS) {
       
  2317     unmap_archive(mapinfo);
       
  2318     return result;
       
  2319   }
       
  2320 
       
  2321   if (mapinfo->is_static()) {
       
  2322     if (!mapinfo->validate_shared_path_table()) {
       
  2323       unmap_archive(mapinfo);
       
  2324       return MAP_ARCHIVE_OTHER_FAILURE;
       
  2325     }
       
  2326   } else {
       
  2327     if (!DynamicArchive::validate(mapinfo)) {
       
  2328       unmap_archive(mapinfo);
       
  2329       return MAP_ARCHIVE_OTHER_FAILURE;
       
  2330     }
       
  2331   }
       
  2332 
       
  2333   mapinfo->set_is_mapped(true);
       
  2334   return MAP_ARCHIVE_SUCCESS;
       
  2335 }
       
  2336 
       
  2337 void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) {
       
  2338   assert(UseSharedSpaces, "must be runtime");
       
  2339   if (mapinfo != NULL) {
       
  2340     if (mapinfo->is_static()) {
       
  2341       mapinfo->unmap_regions(static_regions, static_regions_count);
       
  2342     } else {
       
  2343       mapinfo->unmap_regions(dynamic_regions, dynamic_regions_count);
       
  2344     }
       
  2345     mapinfo->set_is_mapped(false);
  2019   }
  2346   }
  2020 }
  2347 }
  2021 
  2348 
  2022 // Read the miscellaneous data from the shared file, and
  2349 // Read the miscellaneous data from the shared file, and
  2023 // serialize it out to its various destinations.
  2350 // serialize it out to its various destinations.
  2024 
  2351 
  2025 void MetaspaceShared::initialize_shared_spaces() {
  2352 void MetaspaceShared::initialize_shared_spaces() {
  2026   FileMapInfo *mapinfo = FileMapInfo::current_info();
  2353   FileMapInfo *static_mapinfo = FileMapInfo::current_info();
  2027   _i2i_entry_code_buffers = mapinfo->i2i_entry_code_buffers();
  2354   _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers();
  2028   _i2i_entry_code_buffers_size = mapinfo->i2i_entry_code_buffers_size();
  2355   _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size();
  2029   // _core_spaces_size is loaded from the shared archive immediatelly after mapping
  2356   char* buffer = static_mapinfo->misc_data_patching_start();
  2030   assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity");
       
  2031   char* buffer = mapinfo->misc_data_patching_start();
       
  2032   clone_cpp_vtables((intptr_t*)buffer);
  2357   clone_cpp_vtables((intptr_t*)buffer);
  2033 
  2358 
  2034   // Verify various attributes of the archive, plus initialize the
  2359   // Verify various attributes of the archive, plus initialize the
  2035   // shared string/symbol tables
  2360   // shared string/symbol tables
  2036   buffer = mapinfo->serialized_data_start();
  2361   buffer = static_mapinfo->serialized_data_start();
  2037   intptr_t* array = (intptr_t*)buffer;
  2362   intptr_t* array = (intptr_t*)buffer;
  2038   ReadClosure rc(&array);
  2363   ReadClosure rc(&array);
  2039   serialize(&rc);
  2364   serialize(&rc);
  2040 
  2365 
  2041   // Initialize the run-time symbol table.
  2366   // Initialize the run-time symbol table.
  2042   SymbolTable::create_table();
  2367   SymbolTable::create_table();
  2043 
  2368 
  2044   mapinfo->patch_archived_heap_embedded_pointers();
  2369   static_mapinfo->patch_archived_heap_embedded_pointers();
  2045 
  2370 
  2046   // Close the mapinfo file
  2371   // Close the mapinfo file
  2047   mapinfo->close();
  2372   static_mapinfo->close();
       
  2373 
       
  2374   FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
       
  2375   if (dynamic_mapinfo != NULL) {
       
  2376     intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data_start();
       
  2377     ReadClosure rc(&buffer);
       
  2378     SymbolTable::serialize_shared_table_header(&rc, false);
       
  2379     SystemDictionaryShared::serialize_dictionary_headers(&rc, false);
       
  2380     dynamic_mapinfo->close();
       
  2381   }
  2048 
  2382 
  2049   if (PrintSharedArchiveAndExit) {
  2383   if (PrintSharedArchiveAndExit) {
  2050     if (PrintSharedDictionary) {
  2384     if (PrintSharedDictionary) {
  2051       tty->print_cr("\nShared classes:\n");
  2385       tty->print_cr("\nShared classes:\n");
  2052       SystemDictionaryShared::print_on(tty);
  2386       SystemDictionaryShared::print_on(tty);
  2053     }
  2387     }
  2054     if (_archive_loading_failed) {
  2388     if (FileMapInfo::current_info() == NULL || _archive_loading_failed) {
  2055       tty->print_cr("archive is invalid");
  2389       tty->print_cr("archive is invalid");
  2056       vm_exit(1);
  2390       vm_exit(1);
  2057     } else {
  2391     } else {
  2058       tty->print_cr("archive is valid");
  2392       tty->print_cr("archive is valid");
  2059       vm_exit(0);
  2393       vm_exit(0);
  2092   _md_region.print_out_of_space_msg(name, needed_bytes);
  2426   _md_region.print_out_of_space_msg(name, needed_bytes);
  2093 
  2427 
  2094   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
  2428   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
  2095                                 "Please reduce the number of shared classes.");
  2429                                 "Please reduce the number of shared classes.");
  2096 }
  2430 }
       
  2431 
       
  2432 // This is used to relocate the pointers so that the archive can be mapped at
       
  2433 // Arguments::default_SharedBaseAddress() without runtime relocation.
       
  2434 intx MetaspaceShared::final_delta() {
       
  2435   return intx(Arguments::default_SharedBaseAddress())  // We want the archive to be mapped to here at runtime
       
  2436        - intx(SharedBaseAddress);                      // .. but the archive is mapped at here at dump time
       
  2437 }